Browse Source

Upgrade V8 to 3.9.24.6

v0.9.1-release
isaacs 13 years ago
parent
commit
4b64542fe0
  1. 1
      deps/v8/AUTHORS
  2. 81
      deps/v8/ChangeLog
  3. 35
      deps/v8/SConstruct
  4. 7
      deps/v8/benchmarks/README.txt
  5. 4
      deps/v8/benchmarks/revisions.html
  6. 20
      deps/v8/benchmarks/run.html
  7. 12
      deps/v8/build/common.gypi
  8. 3
      deps/v8/build/mipsu.gypi
  9. 8
      deps/v8/include/v8.h
  10. 1
      deps/v8/src/SConscript
  11. 191
      deps/v8/src/api.cc
  12. 12
      deps/v8/src/arm/assembler-arm-inl.h
  13. 21
      deps/v8/src/arm/assembler-arm.cc
  14. 70
      deps/v8/src/arm/assembler-arm.h
  15. 64
      deps/v8/src/arm/code-stubs-arm.cc
  16. 7
      deps/v8/src/arm/codegen-arm.cc
  17. 30
      deps/v8/src/arm/deoptimizer-arm.cc
  18. 209
      deps/v8/src/arm/full-codegen-arm.cc
  19. 8
      deps/v8/src/arm/lithium-arm.cc
  20. 17
      deps/v8/src/arm/lithium-arm.h
  21. 77
      deps/v8/src/arm/lithium-codegen-arm.cc
  22. 4
      deps/v8/src/arm/macro-assembler-arm.cc
  23. 2
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  24. 16
      deps/v8/src/arm/stub-cache-arm.cc
  25. 54
      deps/v8/src/assembler.cc
  26. 15
      deps/v8/src/assembler.h
  27. 12
      deps/v8/src/ast.cc
  28. 15
      deps/v8/src/ast.h
  29. 45
      deps/v8/src/builtins.cc
  30. 13
      deps/v8/src/codegen.cc
  31. 6
      deps/v8/src/codegen.h
  32. 20
      deps/v8/src/compiler.cc
  33. 8
      deps/v8/src/d8.cc
  34. 29
      deps/v8/src/debug-agent.cc
  35. 25
      deps/v8/src/debug.cc
  36. 1
      deps/v8/src/debug.h
  37. 3
      deps/v8/src/deoptimizer.cc
  38. 9
      deps/v8/src/deoptimizer.h
  39. 259
      deps/v8/src/elements.cc
  40. 15
      deps/v8/src/elements.h
  41. 3
      deps/v8/src/execution.cc
  42. 5
      deps/v8/src/factory.cc
  43. 55
      deps/v8/src/flag-definitions.h
  44. 15
      deps/v8/src/flags.cc
  45. 9
      deps/v8/src/frames.cc
  46. 3
      deps/v8/src/full-codegen.cc
  47. 24
      deps/v8/src/full-codegen.h
  48. 9
      deps/v8/src/gdb-jit.cc
  49. 3
      deps/v8/src/gdb-jit.h
  50. 3
      deps/v8/src/globals.h
  51. 158
      deps/v8/src/handles.cc
  52. 2
      deps/v8/src/handles.h
  53. 73
      deps/v8/src/hashmap.h
  54. 199
      deps/v8/src/heap.cc
  55. 44
      deps/v8/src/heap.h
  56. 49
      deps/v8/src/hydrogen-instructions.cc
  57. 57
      deps/v8/src/hydrogen-instructions.h
  58. 214
      deps/v8/src/hydrogen.cc
  59. 33
      deps/v8/src/hydrogen.h
  60. 2
      deps/v8/src/ia32/assembler-ia32-inl.h
  61. 36
      deps/v8/src/ia32/assembler-ia32.h
  62. 41
      deps/v8/src/ia32/code-stubs-ia32.cc
  63. 38
      deps/v8/src/ia32/codegen-ia32.cc
  64. 43
      deps/v8/src/ia32/deoptimizer-ia32.cc
  65. 59
      deps/v8/src/ia32/full-codegen-ia32.cc
  66. 108
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  67. 6
      deps/v8/src/ia32/lithium-codegen-ia32.h
  68. 20
      deps/v8/src/ia32/lithium-ia32.cc
  69. 30
      deps/v8/src/ia32/lithium-ia32.h
  70. 2
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  71. 15
      deps/v8/src/ia32/stub-cache-ia32.cc
  72. 11
      deps/v8/src/ic.cc
  73. 15
      deps/v8/src/incremental-marking.cc
  74. 10
      deps/v8/src/isolate-inl.h
  75. 137
      deps/v8/src/isolate.cc
  76. 40
      deps/v8/src/isolate.h
  77. 28
      deps/v8/src/jsregexp.cc
  78. 216
      deps/v8/src/lazy-instance.h
  79. 23
      deps/v8/src/lithium-allocator.cc
  80. 25
      deps/v8/src/lithium.cc
  81. 14
      deps/v8/src/lithium.h
  82. 26
      deps/v8/src/log.cc
  83. 1
      deps/v8/src/log.h
  84. 9
      deps/v8/src/mark-compact-inl.h
  85. 6
      deps/v8/src/mark-compact.cc
  86. 3
      deps/v8/src/mark-compact.h
  87. 28
      deps/v8/src/mips/assembler-mips-inl.h
  88. 23
      deps/v8/src/mips/assembler-mips.cc
  89. 14
      deps/v8/src/mips/assembler-mips.h
  90. 8
      deps/v8/src/mips/builtins-mips.cc
  91. 323
      deps/v8/src/mips/code-stubs-mips.cc
  92. 7
      deps/v8/src/mips/codegen-mips.cc
  93. 15
      deps/v8/src/mips/constants-mips.h
  94. 4
      deps/v8/src/mips/debug-mips.cc
  95. 8
      deps/v8/src/mips/deoptimizer-mips.cc
  96. 18
      deps/v8/src/mips/disasm-mips.cc
  97. 264
      deps/v8/src/mips/full-codegen-mips.cc
  98. 47
      deps/v8/src/mips/ic-mips.cc
  99. 81
      deps/v8/src/mips/lithium-codegen-mips.cc
  100. 15
      deps/v8/src/mips/lithium-mips.cc

1
deps/v8/AUTHORS

@ -51,3 +51,4 @@ Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
Zhongping Wang <kewpie.w.zp@gmail.com>

81
deps/v8/ChangeLog

@ -1,3 +1,84 @@
2012-03-23: Version 3.9.24
Activated count-based profiler for ARM.
Fixed use of proxies as f.prototype properties. (issue 2021)
Enabled snapshots on MIPS.
Performance and stability improvements on all platforms.
2012-03-21: Version 3.9.23
Use correct arguments adaptation environment when inlining function
containing arguments. (Issue 2014)
Performance and stability improvements on all platforms.
2012-03-20: Version 3.9.22
Enabled count-based profiler by default.
Implemented a hash based look-up to speed up address checks
in large object space (issue 853).
Performance and stability improvements on all platforms.
2012-03-19: Version 3.9.21
Fixed push-to-trunk script (and re-push).
Added API call that identifies strings that are guaranteed only to
contain ASCII characters.
2012-03-19: Version 3.9.20
Fixed declarations escaping global strict eval. (Issue 1624)
Fixed wrapping of receiver for non-strict callbacks. (Issue 1973)
Fixed function declarations overwriting read-only global properties.
(Chromium issue 115452)
Fixed --use-strict flag in combination with --harmony[-scoping].
Debugger: naive implementation of "step into Function.prototype.bind".
Debugger: added ability to set script source from within OnBeforeCompile
Added flag to always call DebugBreak on abort.
Re-enabled constructor inlining and inline === comparison with boolean
constants. (Issue 2009)
Don't use an explicit s0 in ClampDoubleToUint8. (Issue 2004)
Performance and stability improvements on all platforms.
2012-03-14: Version 3.9.19
Ensure there is a smi check of the receiver for global load and call
ICs (Chromium issue 117794).
Performance and stability improvements on all platforms.
2012-03-13: Version 3.9.18
Ensure consistency of Math.sqrt on Intel platforms.
Remove static initializers in v8. (issue 1859)
Add explicit dependency on v8_base in the GYP-based build.
Performance and stability improvements on all platforms.
2012-03-12: Version 3.9.17
Fixed VFP detection through compiler defines. (issue 1996)

35
deps/v8/SConstruct

@ -185,6 +185,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@ -194,6 +197,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@ -212,9 +218,12 @@ LIBRARY_FLAGS = {
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
'fpu:on': {
'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
}
},
'mipsabi:hardfloat': {
'CPPDEFINES': ['__mips_hard_float=1'],
'CPPDEFINES': ['__mips_hard_float=1', 'CAN_USE_FPU_INSTRUCTIONS'],
}
},
'arch:x64': {
@ -545,6 +554,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@ -554,6 +566,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@ -563,7 +578,10 @@ SAMPLE_FLAGS = {
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
'LINKFLAGS': ['-mhard-float'],
'fpu:on': {
'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
}
}
}
},
@ -697,6 +715,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
@ -706,6 +727,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
@ -1114,7 +1138,7 @@ SIMPLE_OPTIONS = {
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'],
'values': ['mips32r2', 'mips32r1', 'loongson'],
'default': 'mips32r2',
'help': 'mips variant'
},
@ -1128,6 +1152,11 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'use vfp3 instructions when building the snapshot [Arm only]'
},
'fpu': {
'values': ['on', 'off'],
'default': 'on',
'help': 'use fpu instructions when building the snapshot [MIPS only]'
},
}

7
deps/v8/benchmarks/README.txt

@ -77,3 +77,10 @@ input strings.
Furthermore, the benchmark runner was changed to run the benchmarks
for at least a few times to stabilize the reported numbers on slower
machines.
Changes from Version 6 to Version 7
===================================
Added the Navier-Stokes benchmark, a 2D differential equation solver
that stresses arithmetic computations on double arrays.

4
deps/v8/benchmarks/revisions.html

@ -19,6 +19,10 @@ not comparable unless both results are run with the same revision of
the benchmark suite.
</p>
<div class="subtitle"><h3>Version 7 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v7/run.html">link</a>)</h3></div>
<p>This version includes the new Navier-Stokes benchmark, a 2D differential
equation solver that stresses arithmetic computations on double arrays.</p>
<div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>

20
deps/v8/benchmarks/run.html

@ -53,16 +53,16 @@ function Run() {
BenchmarkSuite.RunSuites({ NotifyStep: ShowProgress,
NotifyError: AddError,
NotifyResult: AddResult,
NotifyScore: AddScore });
NotifyScore: AddScore });
}
function ShowWarningIfObsolete() {
// If anything goes wrong we will just catch the exception and no
// If anything goes wrong we will just catch the exception and no
// warning is shown, i.e., no harm is done.
try {
var xmlhttp;
var next_version = parseInt(BenchmarkSuite.version) + 1;
var next_version_url = "../v" + next_version + "/run.html";
var next_version = parseInt(BenchmarkSuite.version) + 1;
var next_version_url = "../v" + next_version + "/run.html";
if (window.XMLHttpRequest) {
xmlhttp = new window.XMLHttpRequest();
} else if (window.ActiveXObject) {
@ -76,7 +76,7 @@ function ShowWarningIfObsolete() {
};
xmlhttp.send(null);
} catch(e) {
// Ignore exception if check for next version fails.
// Ignore exception if check for next version fails.
// Hence no warning is displayed.
}
}
@ -84,7 +84,7 @@ function ShowWarningIfObsolete() {
function Load() {
var version = BenchmarkSuite.version;
document.getElementById("version").innerHTML = version;
ShowWarningIfObsolete();
ShowWarningIfObsolete();
setTimeout(Run, 200);
}
</script>
@ -92,11 +92,11 @@ function Load() {
<body onload="Load()">
<div>
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
<div class="warning" id="obsolete">
<div class="warning" id="obsolete">
Warning! This is not the latest version of the V8 benchmark
suite. Consider running the
suite. Consider running the
<a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html">
latest version</a>.
latest version</a>.
</div>
<table>
<tr>
@ -118,7 +118,7 @@ higher scores means better performance: <em>Bigger is better!</em>
(<i>1761 lines</i>).
</li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li>
<li><b>NavierStokes (beta)</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>396 lines</i>).</li>
<li><b>NavierStokes</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>387 lines</i>).</li>
</ul>
<p>

12
deps/v8/build/common.gypi

@ -62,6 +62,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0,
@ -184,6 +187,9 @@
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
@ -209,6 +215,9 @@
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
@ -305,7 +314,7 @@
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -352,6 +361,7 @@
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

3
deps/v8/build/mipsu.gypi

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -29,6 +29,5 @@
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'mips',
'mips_arch_variant': 'mips32r2',
},
}

8
deps/v8/include/v8.h

@ -1020,6 +1020,14 @@ class String : public Primitive {
*/
V8EXPORT int Utf8Length() const;
/**
* A fast conservative check for non-ASCII characters. May
* return true even for ASCII strings, but if it returns
* false you can be sure that all characters are in the range
* 0-127.
*/
V8EXPORT bool MayContainNonAscii() const;
/**
* Write the contents of the string to an external buffer.
* If no arguments are given, expects the buffer to be large

1
deps/v8/src/SConscript

@ -101,6 +101,7 @@ SOURCES = {
objects.cc
objects-printer.cc
objects-visiting.cc
once.cc
parser.cc
preparser.cc
preparse-data.cc

191
deps/v8/src/api.cc

@ -1430,7 +1430,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUC16CharacterStream stream(
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
@ -1439,11 +1439,11 @@ ScriptData* ScriptData::PreCompile(const char* input, int length) {
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUC16CharacterStream stream(
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
} else {
i::GenericStringUC16CharacterStream stream(str, 0, str->length());
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
}
@ -3064,8 +3064,11 @@ bool Object::SetAccessor(Handle<String> name,
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
return !result.is_null() && !result->IsUndefined();
if (result.is_null() || result->IsUndefined()) return false;
if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
return true;
}
@ -3690,7 +3693,104 @@ int String::Length() const {
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
return str->Utf8Length();
return i::Utf8Length(str);
}
// Will fail with a negative answer if the recursion depth is too high.
static int RecursivelySerializeToUtf8(i::String* string,
char* buffer,
int start,
int end,
int recursion_budget,
int32_t previous_character,
int32_t* last_character) {
int utf8_bytes = 0;
while (true) {
if (string->IsAsciiRepresentation()) {
i::String::WriteToFlat(string, buffer, start, end);
*last_character = unibrow::Utf16::kNoPreviousCharacter;
return utf8_bytes + end - start;
}
switch (i::StringShape(string).representation_tag()) {
case i::kExternalStringTag: {
const uint16_t* data = i::ExternalTwoByteString::cast(string)->
ExternalTwoByteStringGetData(0);
char* current = buffer;
for (int i = start; i < end; i++) {
uint16_t character = data[i];
current +=
unibrow::Utf8::Encode(current, character, previous_character);
previous_character = character;
}
*last_character = previous_character;
return static_cast<int>(utf8_bytes + current - buffer);
}
case i::kSeqStringTag: {
const uint16_t* data =
i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
char* current = buffer;
for (int i = start; i < end; i++) {
uint16_t character = data[i];
current +=
unibrow::Utf8::Encode(current, character, previous_character);
previous_character = character;
}
*last_character = previous_character;
return static_cast<int>(utf8_bytes + current - buffer);
}
case i::kSlicedStringTag: {
i::SlicedString* slice = i::SlicedString::cast(string);
unsigned offset = slice->offset();
string = slice->parent();
start += offset;
end += offset;
continue;
}
case i::kConsStringTag: {
i::ConsString* cons_string = i::ConsString::cast(string);
i::String* first = cons_string->first();
int boundary = first->length();
if (start >= boundary) {
// Only need RHS.
string = cons_string->second();
start -= boundary;
end -= boundary;
continue;
} else if (end <= boundary) {
// Only need LHS.
string = first;
} else {
if (recursion_budget == 0) return -1;
int extra_utf8_bytes =
RecursivelySerializeToUtf8(first,
buffer,
start,
boundary,
recursion_budget - 1,
previous_character,
&previous_character);
if (extra_utf8_bytes < 0) return extra_utf8_bytes;
buffer += extra_utf8_bytes;
utf8_bytes += extra_utf8_bytes;
string = cons_string->second();
start = 0;
end -= boundary;
}
}
}
}
UNREACHABLE();
return 0;
}
bool String::MayContainNonAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
return false;
}
return !str->HasOnlyAsciiChars();
}
@ -3703,11 +3803,12 @@ int String::WriteUtf8(char* buffer,
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
len = str->length();
len = string_length;
} else {
len = i::Min(capacity, str->length());
}
@ -3720,6 +3821,42 @@ int String::WriteUtf8(char* buffer,
return len;
}
if (capacity == -1 || capacity / 3 >= string_length) {
int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
const int kMaxRecursion = 100;
int utf8_bytes =
RecursivelySerializeToUtf8(*str,
buffer,
0,
string_length,
kMaxRecursion,
previous,
&previous);
if (utf8_bytes >= 0) {
// Success serializing with recursion.
if ((options & NO_NULL_TERMINATION) == 0 &&
(capacity > utf8_bytes || capacity == -1)) {
buffer[utf8_bytes++] = '\0';
}
if (nchars_ref != NULL) *nchars_ref = string_length;
return utf8_bytes;
}
FlattenString(str);
// Recurse once. This time around the string is flat and the serializing
// with recursion will certainly succeed.
return WriteUtf8(buffer, capacity, nchars_ref, options);
} else if (capacity >= string_length) {
// First check that the buffer is large enough. If it is, then recurse
// once without a capacity limit, which will get into the other branch of
// this 'if'.
int utf8_bytes = i::Utf8Length(str);
if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
if (utf8_bytes <= capacity) {
return WriteUtf8(buffer, -1, nchars_ref, options);
}
}
// Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
@ -3736,11 +3873,13 @@ int String::WriteUtf8(char* buffer,
int i;
int pos = 0;
int nchars = 0;
int previous = unibrow::Utf16::kNoPreviousCharacter;
for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
i::uc32 c = write_input_buffer.GetNext();
int written = unibrow::Utf8::Encode(buffer + pos, c);
int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
pos += written;
nchars++;
previous = c;
}
if (i < len) {
// For the last characters we need to check the length for each one
@ -3749,16 +3888,33 @@ int String::WriteUtf8(char* buffer,
char intermediate[unibrow::Utf8::kMaxEncodedSize];
for (; i < len && pos < capacity; i++) {
i::uc32 c = write_input_buffer.GetNext();
int written = unibrow::Utf8::Encode(intermediate, c);
if (pos + written <= capacity) {
for (int j = 0; j < written; j++)
buffer[pos + j] = intermediate[j];
if (unibrow::Utf16::IsTrailSurrogate(c) &&
unibrow::Utf16::IsLeadSurrogate(previous)) {
// We can't use the intermediate buffer here because the encoding
// of surrogate pairs is done under assumption that you can step
// back and fix the UTF8 stream. Luckily we only need space for one
// more byte, so there is always space.
ASSERT(pos < capacity);
int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
ASSERT(written == 1);
pos += written;
nchars++;
} else {
// We've reached the end of the buffer
break;
int written =
unibrow::Utf8::Encode(intermediate,
c,
unibrow::Utf16::kNoPreviousCharacter);
if (pos + written <= capacity) {
for (int j = 0; j < written; j++)
buffer[pos + j] = intermediate[j];
pos += written;
nchars++;
} else {
// We've reached the end of the buffer
break;
}
}
previous = c;
}
}
if (nchars_ref != NULL) *nchars_ref = nchars;
@ -4014,7 +4170,7 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
bool v8::V8::Initialize() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
i::Isolate* isolate = i::Isolate::Current();
if (isolate != NULL && isolate->IsInitialized()) {
return true;
}
@ -4907,7 +5063,7 @@ Local<Number> v8::Number::New(double value) {
Local<Integer> v8::Integer::New(int32_t value) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
if (i::Smi::IsValid(value)) {
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
@ -5185,7 +5341,7 @@ bool V8::IsExecutionTerminating(Isolate* isolate) {
Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
i::Isolate* isolate = i::Isolate::Current();
return reinterpret_cast<Isolate*>(isolate);
}
@ -5240,7 +5396,8 @@ String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
length_ = str->Utf8Length();
i::Handle<i::String> i_str = Utils::OpenHandle(*str);
length_ = i::Utf8Length(i_str);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
}

12
deps/v8/src/arm/assembler-arm-inl.h

@ -80,7 +80,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
return kPointerSize;
}
@ -364,8 +364,14 @@ Address Assembler::target_address_at(Address pc) {
}
void Assembler::set_target_at(Address constant_pool_entry,
Address target) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::set_external_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}

21
deps/v8/src/arm/assembler-arm.cc

@ -139,7 +139,6 @@ bool RelocInfo::IsCodedSpecially() {
}
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@ -238,25 +237,27 @@ MemOperand::MemOperand(Register rn, Register rm,
// add(sp, sp, 4) instruction (aka Pop())
const Instr kPopInstruction =
al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
kRegister_sp_Code * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | sp.code() * B16;
al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
// mov lr, pc
const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@ -273,13 +274,13 @@ const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16;
al | B26 | L | Offset | kRegister_fp_Code * B16;
const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16;
al | B26 | Offset | kRegister_fp_Code * B16;
const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
al | B26 | L | NegOffset | kRegister_fp_Code * B16;
const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16;
al | B26 | NegOffset | kRegister_fp_Code * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;

70
deps/v8/src/arm/assembler-arm.h

@ -124,24 +124,47 @@ struct Register {
int code_;
};
const Register no_reg = { -1 };
const Register r0 = { 0 };
const Register r1 = { 1 };
const Register r2 = { 2 };
const Register r3 = { 3 };
const Register r4 = { 4 };
const Register r5 = { 5 };
const Register r6 = { 6 };
const Register r7 = { 7 };
const Register r8 = { 8 }; // Used as context register.
const Register r9 = { 9 }; // Used as lithium codegen scratch register.
const Register r10 = { 10 }; // Used as roots register.
const Register fp = { 11 };
const Register ip = { 12 };
const Register sp = { 13 };
const Register lr = { 14 };
const Register pc = { 15 };
// These constants are used in several locations, including static initializers
const int kRegister_no_reg_Code = -1;
const int kRegister_r0_Code = 0;
const int kRegister_r1_Code = 1;
const int kRegister_r2_Code = 2;
const int kRegister_r3_Code = 3;
const int kRegister_r4_Code = 4;
const int kRegister_r5_Code = 5;
const int kRegister_r6_Code = 6;
const int kRegister_r7_Code = 7;
const int kRegister_r8_Code = 8;
const int kRegister_r9_Code = 9;
const int kRegister_r10_Code = 10;
const int kRegister_fp_Code = 11;
const int kRegister_ip_Code = 12;
const int kRegister_sp_Code = 13;
const int kRegister_lr_Code = 14;
const int kRegister_pc_Code = 15;
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
const Register r1 = { kRegister_r1_Code };
const Register r2 = { kRegister_r2_Code };
const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code };
// Used as roots register.
const Register r10 = { kRegister_r10_Code };
const Register fp = { kRegister_fp_Code };
const Register ip = { kRegister_ip_Code };
const Register sp = { kRegister_sp_Code };
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
// Single word VFP register.
struct SwVfpRegister {
@ -581,6 +604,7 @@ extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
@ -662,20 +686,18 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void set_target_at(Address constant_pool_entry, Address target);
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
Address target) {
set_target_at(constant_pool_entry, target);
}
Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
static const int kCallTargetSize = kPointerSize;
static const int kExternalTargetSize = kPointerSize;
static const int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);

64
deps/v8/src/arm/code-stubs-arm.cc

@ -480,7 +480,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
static const uint32_t exponent_word_for_1 =
const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
@ -4237,7 +4237,7 @@ Register InstanceofStub::right() { return r1; }
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
static const int kDisplacement =
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smi.
@ -4622,10 +4622,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// sp[8]: subject string
// sp[12]: JSRegExp object
static const int kLastMatchInfoOffset = 0 * kPointerSize;
static const int kPreviousIndexOffset = 1 * kPointerSize;
static const int kSubjectOffset = 2 * kPointerSize;
static const int kJSRegExpOffset = 3 * kPointerSize;
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime, invoke_regexp;
@ -4824,8 +4824,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 8;
static const int kParameterRegisters = 4;
const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@ -5714,7 +5714,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// scratch: -
// Perform a number of probes in the symbol table.
static const int kProbes = 4;
const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@ -5839,9 +5839,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
static const int kToOffset = 0 * kPointerSize;
static const int kFromOffset = 1 * kPointerSize;
static const int kStringOffset = 2 * kPointerSize;
const int kToOffset = 0 * kPointerSize;
const int kFromOffset = 1 * kPointerSize;
const int kStringOffset = 2 * kPointerSize;
__ Ldrd(r2, r3, MemOperand(sp, kToOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
@ -7085,43 +7085,45 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
#define REG(Name) { kRegister_ ## Name ## _Code }
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ r6, r4, r7, EMIT_REMEMBERED_SET },
{ r6, r2, r7, EMIT_REMEMBERED_SET },
{ REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
{ REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ r3, r4, r5, EMIT_REMEMBERED_SET },
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
{ r4, r1, r2, OMIT_REMEMBERED_SET },
{ REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r1, r2, r3, EMIT_REMEMBERED_SET },
{ r3, r2, r1, EMIT_REMEMBERED_SET },
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
{ REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r3, r2, r4, EMIT_REMEMBERED_SET },
{ r2, r3, r4, EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ r2, r3, r9, EMIT_REMEMBERED_SET },
{ r2, r3, r9, OMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
{ r6, r2, r0, EMIT_REMEMBERED_SET },
{ r2, r6, r9, EMIT_REMEMBERED_SET },
{ REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ r5, r0, r6, EMIT_REMEMBERED_SET },
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
#undef REG
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@ -7148,7 +7150,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,

7
deps/v8/src/arm/codegen-arm.cc

@ -37,8 +37,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
@ -50,6 +49,10 @@ TranscendentalFunction CreateTranscendentalFunction(
}
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

30
deps/v8/src/arm/deoptimizer-arm.cc

@ -108,6 +108,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
@ -118,10 +122,16 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
(al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
if (FLAG_count_based_interrupts) {
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
@ -155,13 +165,21 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(+4, cs);
if (FLAG_count_based_interrupts) {
patcher.masm()->b(+16, pl);
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
patcher.masm()->b(+4, cs);
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.

209
deps/v8/src/arm/full-codegen-arm.cc

@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@ -109,7 +110,9 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 24;
}
@ -132,32 +135,11 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
if (maybe_cell->To(&cell)) {
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
__ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -336,20 +318,68 @@ void FullCodeGenerator::ClearAccumulator() {
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(r2, Operand(profiling_counter_));
__ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
__ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
}
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = 10;
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
__ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
}
static const int kMaxBackEdgeWeight = 127;
static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
} else {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
if (FLAG_count_based_interrupts) {
EmitProfilingCounterReset();
}
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@ -371,6 +401,32 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
__ b(pl, &ok);
__ push(r0);
if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
InterruptStub stub;
__ CallStub(&stub);
}
__ pop(r0);
EmitProfilingCounterReset();
__ bind(&ok);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@ -888,7 +944,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
@ -1186,7 +1242,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, mode);
CallIC(ic, mode);
}
@ -1270,7 +1326,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
break;
}
@ -1410,6 +1466,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void FullCodeGenerator::EmitAccessor(Expression* expression) {
if (expression == NULL) {
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
} else {
VisitForStackValue(expression);
}
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@ -1445,6 +1511,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@ -1470,7 +1537,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1493,27 +1560,29 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
case ObjectLiteral::Property::SETTER:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
__ push(r0);
VisitForStackValue(key);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
} else {
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
VisitForStackValue(value);
}
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
accessor_table.lookup(key)->second->setter = value;
break;
}
}
// Emit code to define accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
__ ldr(r0, MemOperand(sp)); // Duplicate receiver.
__ push(r0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
}
if (expr->has_function()) {
ASSERT(result_saved);
__ ldr(r0, MemOperand(sp));
@ -1736,7 +1805,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@ -1744,7 +1813,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@ -1771,7 +1840,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -1854,7 +1923,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@ -1895,7 +1964,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic);
CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@ -1908,7 +1977,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic);
CallIC(ic);
break;
}
}
@ -1925,7 +1994,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@ -2043,7 +2112,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2089,7 +2158,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2123,6 +2192,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
}
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id) {
ic_total_count_++;
__ Call(code, rmode, ast_id);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@ -2140,7 +2217,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ Call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2173,7 +2250,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -3770,7 +3847,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ Call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -3925,7 +4002,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@ -4036,7 +4113,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -4068,7 +4145,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4085,7 +4162,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4111,7 +4188,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic);
CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@ -4294,7 +4371,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));

8
deps/v8/src/arm/lithium-arm.cc

@ -1098,6 +1098,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);

17
deps/v8/src/arm/lithium-arm.h

@ -178,7 +178,8 @@ class LCodeGen;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField)
V(DateField) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -468,6 +469,20 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
};
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,

77
deps/v8/src/arm/lithium-codegen-arm.cc

@ -648,7 +648,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@ -2800,15 +2799,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(r0)); // Used for parameter count.
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@ -2849,6 +2843,18 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(r0)); // Used for parameter count.
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@ -4601,34 +4607,51 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
// Copy elements backing store header.
ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, elements_offset + i));
}
}
// Copy elements backing store content.
ASSERT(!has_elements || elements->IsFixedArray());
int elements_length = has_elements ? elements->length() : 0;
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
// Copy elements backing store content.
int elements_length = has_elements ? elements->length() : 0;
if (elements->IsFixedDoubleArray()) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
int32_t value_low = value & 0xFFFFFFFF;
int32_t value_high = value >> 32;
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(r2, Operand(value_low));
__ str(r2, FieldMemOperand(result, total_offset));
__ mov(r2, Operand(value_high));
__ str(r2, FieldMemOperand(result, total_offset + 4));
}
} else if (elements->IsFixedArray()) {
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
__ str(r2, FieldMemOperand(result, total_offset));
}
}
} else {
__ mov(r2, Operand(value));
__ str(r2, FieldMemOperand(result, total_offset));
UNREACHABLE();
}
}
}

4
deps/v8/src/arm/macro-assembler-arm.cc

@ -3647,8 +3647,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
bind(&in_bounds);
Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
vcvt_u32_f64(s0, temp_double_reg);
vmov(result_reg, s0);
vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
vmov(result_reg, temp_double_reg.low());
bind(&done);
}

2
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -472,7 +472,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUC16CharCode);
ASSERT(minus < String::kMaxUtf16CodeUnit);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));

16
deps/v8/src/arm/stub-cache-arm.cc

@ -1387,14 +1387,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, miss);
}
// Check that the maps haven't changed.
__ JumpIfSmi(r0, miss);
CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
}
@ -2813,14 +2807,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, &miss);
}
// Check that the map of the global has not changed.
__ JumpIfSmi(r0, &miss);
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.

54
deps/v8/src/assembler.cc

@ -45,6 +45,7 @@
#include "ic.h"
#include "isolate.h"
#include "jsregexp.h"
#include "lazy-instance.h"
#include "platform.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
@ -84,15 +85,36 @@
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Common double constants.
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
double minus_zero;
double zero;
double uint8_max_value;
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
};
struct InitializeDoubleConstants {
static void Construct(DoubleConstant* double_constants) {
double_constants->min_int = kMinInt;
double_constants->one_half = 0.5;
double_constants->minus_zero = -0.0;
double_constants->uint8_max_value = 255;
double_constants->zero = 0.0;
double_constants->canonical_non_hole_nan = OS::nan_value();
double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants->negative_infinity = -V8_INFINITY;
}
};
static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
double_constants = LAZY_INSTANCE_INITIALIZER;
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::uint8_max_value = 255;
const double DoubleConstant::zero = 0.0;
const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
@ -937,49 +959,49 @@ ExternalReference ExternalReference::scheduled_exception_address(
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::min_int)));
&double_constants.Pointer()->min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::one_half)));
&double_constants.Pointer()->one_half));
}
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::minus_zero)));
&double_constants.Pointer()->minus_zero));
}
ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::zero)));
&double_constants.Pointer()->zero));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::uint8_max_value)));
&double_constants.Pointer()->uint8_max_value));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));
&double_constants.Pointer()->negative_infinity));
}
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
&double_constants.Pointer()->canonical_non_hole_nan));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::the_hole_nan)));
&double_constants.Pointer()->the_hole_nan));
}

15
deps/v8/src/assembler.h

@ -67,21 +67,6 @@ class AssemblerBase: public Malloced {
int jit_cookie_;
};
// -----------------------------------------------------------------------------
// Common double constants.
class DoubleConstant: public AllStatic {
public:
static const double min_int;
static const double one_half;
static const double minus_zero;
static const double zero;
static const double uint8_max_value;
static const double negative_infinity;
static const double canonical_non_hole_nan;
static const double the_hole_nan;
};
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.

12
deps/v8/src/ast.cc

@ -399,6 +399,9 @@ bool FunctionDeclaration::IsInlineable() const {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
@ -602,6 +605,13 @@ void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this)
: Handle<Map>::null();
}
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@ -1054,8 +1064,6 @@ void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
add_flag(kDontSelfOptimize);
}

15
deps/v8/src/ast.h

@ -1320,6 +1320,11 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value() { return value_; }
Kind kind() { return kind_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
bool IsCompileTimeValue();
void set_emit_store(bool emit_store);
@ -1336,6 +1341,7 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value_;
Kind kind_;
bool emit_store_;
Handle<Map> receiver_type_;
};
DECLARE_NODE_TYPE(ObjectLiteral)
@ -1360,6 +1366,12 @@ class ObjectLiteral: public MaterializedLiteral {
kHasFunction = 1 << 1
};
struct Accessors: public ZoneObject {
Accessors() : getter(NULL), setter(NULL) { }
Expression* getter;
Expression* setter;
};
protected:
template<class> friend class AstNodeFactory;
@ -1515,6 +1527,7 @@ class Property: public Expression {
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
bool IsUninitialized() { return is_uninitialized_; }
protected:
template<class> friend class AstNodeFactory;
@ -1528,6 +1541,7 @@ class Property: public Expression {
key_(key),
pos_(pos),
is_monomorphic_(false),
is_uninitialized_(false),
is_array_length_(false),
is_string_length_(false),
is_string_access_(false),
@ -1540,6 +1554,7 @@ class Property: public Expression {
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1;
bool is_array_length_ : 1;
bool is_string_length_ : 1;
bool is_string_access_ : 1;

45
deps/v8/src/builtins.cc

@ -33,6 +33,7 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "heap-profiler.h"
#include "mark-compact.h"
#include "vm-state-inl.h"
@ -380,6 +381,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@ -508,8 +511,7 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
@ -645,8 +647,7 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@ -757,8 +758,7 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
AssertNoAllocation no_gc;
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, k,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
@ -831,9 +831,8 @@ BUILTIN(ArraySplice) {
if (!maybe_array->To(&result_array)) return maybe_array;
{
AssertNoAllocation no_gc;
// Fill newly created array.
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, actual_start,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
}
@ -883,12 +882,11 @@ BUILTIN(ArraySplice) {
FixedArray* new_elms = FixedArray::cast(obj);
{
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS,
CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
@ -973,14 +971,13 @@ BUILTIN(ArrayConcat) {
if (result_len == 0) return result_array;
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
@ -1570,30 +1567,30 @@ struct BuiltinDesc {
BuiltinExtraArguments extra_args;
};
#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
class BuiltinFunctionTable {
public:
BuiltinFunctionTable() {
Builtins::InitBuiltinFunctionTable();
BuiltinDesc* functions() {
CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
return functions_;
}
static const BuiltinDesc* functions() { return functions_; }
private:
static BuiltinDesc functions_[Builtins::builtin_count + 1];
OnceType once_;
BuiltinDesc functions_[Builtins::builtin_count + 1];
friend class Builtins;
};
BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
static const BuiltinFunctionTable builtin_function_table_init;
static BuiltinFunctionTable builtin_function_table =
BUILTIN_FUNCTION_TABLE_INIT;
// Define array of pointers to generators and C builtin functions.
// We do this in a sort of roundabout way so that we can do the initialization
// within the lexical scope of Builtins:: and within a context where
// Code::Flags names a non-abstract type.
void Builtins::InitBuiltinFunctionTable() {
BuiltinDesc* functions = BuiltinFunctionTable::functions_;
BuiltinDesc* functions = builtin_function_table.functions_;
functions[builtin_count].generator = NULL;
functions[builtin_count].c_code = NULL;
functions[builtin_count].s_name = NULL;
@ -1637,7 +1634,7 @@ void Builtins::SetUp(bool create_heap_objects) {
// Create a scope for the handles in the builtins.
HandleScope scope(isolate);
const BuiltinDesc* functions = BuiltinFunctionTable::functions();
const BuiltinDesc* functions = builtin_function_table.functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful

13
deps/v8/src/codegen.cc

@ -71,13 +71,6 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
} else {
print_source = FLAG_print_source;
print_ast = FLAG_print_ast;
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if (print_source && !filter.is_empty()) {
print_source = info->function()->name()->IsEqualTo(filter);
}
if (print_ast && !filter.is_empty()) {
print_ast = info->function()->name()->IsEqualTo(filter);
}
ftype = "user-defined";
}
@ -124,11 +117,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
FunctionLiteral* function = info->function();
bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
if (print_code && match) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");

6
deps/v8/src/codegen.h

@ -87,10 +87,10 @@ namespace internal {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
typedef double (*TranscendentalFunction)(double x);
typedef double (*UnaryMathFunction)(double x);
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {

20
deps/v8/src/compiler.cc

@ -243,12 +243,15 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
// Take --hydrogen-filter into account.
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
Handle<String> name = info->function()->debug_name();
bool match = filter.is_empty() || name->IsEqualTo(filter);
if (!match) {
info->SetCode(code);
return true;
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsEqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsEqualTo(filter))) {
info->SetCode(code);
return true;
}
}
// Recompile the unoptimized version of the code if the current version
@ -450,6 +453,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
script->set_compilation_state(
Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
isolate->debugger()->OnAfterCompile(
@ -518,7 +524,9 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
if (FLAG_use_strict) info.SetLanguageMode(STRICT_MODE);
if (FLAG_use_strict) {
info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
}
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);

8
deps/v8/src/d8.cc

@ -1436,6 +1436,13 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (!options.last_run) {
context.Dispose();
#if !defined(V8_SHARED)
if (i::FLAG_send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
#endif // !V8_SHARED
}
#ifndef V8_SHARED
@ -1490,6 +1497,7 @@ int Shell::Main(int argc, char* argv[]) {
int stress_runs = i::FLAG_stress_runs;
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Run %d/%d ============\n", i + 1, stress_runs);
options.last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
#endif

29
deps/v8/src/debug-agent.cc

@ -372,8 +372,11 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
// Calculate the message size in UTF-8 encoding.
int utf8_len = 0;
int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < message.length(); i++) {
utf8_len += unibrow::Utf8::Length(message[i]);
uint16_t character = message[i];
utf8_len += unibrow::Utf8::Length(character, previous);
previous = character;
}
// Send the header.
@ -388,17 +391,33 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < message.length(); i++) {
// Write next UTF-8 encoded character to buffer.
uint16_t character = message[i];
buffer_position +=
unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
ASSERT(buffer_position < kBufferSize);
// Send buffer if full or last character is encoded.
if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
conn->Send(buffer, buffer_position);
buffer_position = 0;
if (kBufferSize - buffer_position <
unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
i == message.length() - 1) {
if (unibrow::Utf16::IsLeadSurrogate(character)) {
const int kEncodedSurrogateLength =
unibrow::Utf16::kUtf8BytesToCodeASurrogate;
ASSERT(buffer_position >= kEncodedSurrogateLength);
conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
for (int i = 0; i < kEncodedSurrogateLength; i++) {
buffer[i] = buffer[buffer_position + i];
}
buffer_position = kEncodedSurrogateLength;
} else {
conn->Send(buffer, buffer_position);
buffer_position = 0;
}
}
previous = character;
}
return true;

25
deps/v8/src/debug.cc

@ -1223,6 +1223,18 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
}
void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
Handle<FixedArray> new_bindings(function->function_bindings());
Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
Debug::FloodWithOneShot(shared_info);
}
}
void Debug::FloodHandlerWithOneShot() {
// Iterate through the JavaScript stack looking for handlers.
StackFrame::Id id = break_frame_id();
@ -1442,8 +1454,10 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
expressions_count - 2 - call_function_arg_count);
if (fun->IsJSFunction()) {
Handle<JSFunction> js_function(JSFunction::cast(fun));
// Don't step into builtins.
if (!js_function->IsBuiltin()) {
if (js_function->shared()->bound()) {
Debug::FloodBoundFunctionWithOneShot(js_function);
} else if (!js_function->IsBuiltin()) {
// Don't step into builtins.
// It will also compile target function if it's not compiled yet.
FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
}
@ -1639,8 +1653,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// Flood the function with one-shot break points if it is called from where
// step into was requested.
if (fp == step_in_fp()) {
// Don't allow step into functions in the native context.
if (!function->IsBuiltin()) {
if (function->shared()->bound()) {
// Handle Function.prototype.bind
Debug::FloodBoundFunctionWithOneShot(function);
} else if (!function->IsBuiltin()) {
// Don't allow step into functions in the native context.
if (function->shared()->code() ==
Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==

1
deps/v8/src/debug.h

@ -239,6 +239,7 @@ class Debug {
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);

3
deps/v8/src/deoptimizer.cc

@ -358,8 +358,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_count_(0),
jsframe_count_(0),
output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@ -847,7 +845,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::ARGUMENTS_OBJECT: {
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
ASSERT(frame_index == 0); // Only supported for first frame.
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,

9
deps/v8/src/deoptimizer.h

@ -220,11 +220,6 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int frame_alignment_marker_offset() {
return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate);
@ -337,10 +332,6 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
// Frames can be dynamically padded on ia32 to align untagged doubles.
Object* frame_alignment_marker_;
intptr_t has_alignment_padding_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;

259
deps/v8/src/elements.cc

@ -131,95 +131,132 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
FixedArray* from_obj,
void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
ASSERT(to_obj->map() != HEAP->fixed_cow_array_map());
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
Address to = to_obj->address() + FixedArray::kHeaderSize;
Address from = from_obj->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to) + to_start,
reinterpret_cast<Object**>(from) + from_start,
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
Heap* heap = from_obj->GetHeap();
WriteBarrierMode mode = to_obj->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(to_obj->address(),
to_obj->OffsetOfElementAt(to_start),
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
heap->incremental_marking()->RecordWrites(to_obj);
heap->incremental_marking()->RecordWrites(to);
}
}
static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
int raw_copy_size) {
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length());
ASSERT(to != from);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(copy_size == -1 ||
(copy_size + static_cast<int>(to_start)) <= to->length());
WriteBarrierMode mode = to_kind == FAST_ELEMENTS
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
uint32_t copy_limit = (copy_size == -1)
? to->length()
: Min(to_start + copy_size, static_cast<uint32_t>(to->length()));
for (int i = 0; i < from->Capacity(); ++i) {
Object* key = from->KeyAt(i);
if (key->IsNumber()) {
uint32_t entry = static_cast<uint32_t>(key->Number());
if (entry >= to_start && entry < copy_limit) {
Object* value = from->ValueAt(i);
ASSERT(to_kind == FAST_ELEMENTS || value->IsSmi());
to->set(entry, value, mode);
}
if (copy_size == 0) return;
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
ASSERT(!value->IsTheHole());
to->set(i + to_start, value, SKIP_WRITE_BARRIER);
} else {
to->set_the_hole(i + to_start);
}
}
if (to_kind == FAST_ELEMENTS) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
heap->incremental_marking()->RecordWrites(to);
}
}
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
FixedDoubleArray* from_obj,
FixedDoubleArray* from,
uint32_t from_start,
FixedArray* to_obj,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
int raw_copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
if (copy_size == 0) return from_obj;
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from_obj->get(i + from_start);
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
@ -229,42 +266,109 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
// can't be taken from new space.
if (!maybe_value->ToObject(&value)) {
ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
Heap* heap = from_obj->GetHeap();
Heap* heap = from->GetHeap();
MaybeObject* maybe_value_object =
heap->AllocateHeapNumber(from_obj->get_scalar(i + from_start),
heap->AllocateHeapNumber(from->get_scalar(i + from_start),
TENURED);
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
to_obj->set(i + to_start, value, UPDATE_WRITE_BARRIER);
to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
}
}
return to_obj;
return to;
}
static void CopyDoubleToDoubleElements(FixedDoubleArray* from_obj,
static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
uint32_t from_start,
FixedDoubleArray* to_obj,
FixedDoubleArray* to,
uint32_t to_start,
int copy_size) {
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
Address to = to_obj->address() + FixedDoubleArray::kHeaderSize;
Address from = from_obj->address() + FixedDoubleArray::kHeaderSize;
to += kDoubleSize * to_start;
from += kDoubleSize * from_start;
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
from_address += kDoubleSize * from_start;
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to),
reinterpret_cast<Object**>(from),
CopyWords(reinterpret_cast<Object**>(to_address),
reinterpret_cast<Object**>(from_address),
words_per_double * copy_size);
}
static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
for (int i = 0; i < copy_size; i++) {
Object* hole_or_object = from->get(i + from_start);
if (hole_or_object->IsTheHole()) {
to->set_the_hole(i + to_start);
} else {
to->set(i + to_start, hole_or_object->Number());
}
}
}
static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT(copy_size + static_cast<int>(to_start) <= to->length());
if (copy_size == 0) return;
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
to->set(i + to_start, from->ValueAt(entry)->Number());
} else {
to->set_the_hole(i + to_start);
}
}
}
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@ -384,6 +488,9 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = from_holder->elements();
}
if (from->length() == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, copy_size);
}
@ -626,12 +733,16 @@ class FastObjectElementsAccessor
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
CopyObjectToObjectElements(
&no_gc, FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
}
case FAST_DOUBLE_ELEMENTS:
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
}
@ -726,7 +837,8 @@ class FastDoubleElementsAccessor
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
return !backing_store->is_the_hole(key);
return key < static_cast<uint32_t>(backing_store->length()) &&
!backing_store->is_the_hole(key);
}
};
@ -997,6 +1109,11 @@ class DictionaryElementsAccessor
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
}

15
deps/v8/src/elements.h

@ -88,6 +88,15 @@ class ElementsAccessor {
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
static const int kCopyToEnd = -1;
// If kCopyToEndAndInitializeToHole is specified as the copy_size to
// CopyElements, it copies all of elements from source after source_start to
// destination array, padding any remaining uninitialized elements in the
// destination array with the hole.
static const int kCopyToEndAndInitializeToHole = -2;
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
@ -104,7 +113,8 @@ class ElementsAccessor {
FixedArrayBase* to,
ElementsKind to_kind,
FixedArrayBase* from = NULL) {
return CopyElements(from_holder, 0, to, to_kind, 0, -1, from);
return CopyElements(from_holder, 0, to, to_kind, 0,
kCopyToEndAndInitializeToHole, from);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
@ -146,8 +156,7 @@ class ElementsAccessor {
};
void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
FixedArray* from_obj,
void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,

3
deps/v8/src/execution.cc

@ -885,7 +885,8 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
}
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
"StackGuard GC request");
stack_guard->Continue(GC_REQUEST);
}

5
deps/v8/src/factory.cc

@ -382,6 +382,8 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_compilation_state(
Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
script->set_wrapper(*wrapper);
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
@ -552,7 +554,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
function_info->allows_lazy_compilation()) {
function_info->allows_lazy_compilation() &&
!function_info->optimization_disabled()) {
result->MarkForLazyRecompilation();
}
return result;

55
deps/v8/src/flag-definitions.h

@ -81,17 +81,41 @@
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
JSArguments();
JSArguments(int argc, const char** argv);
int argc() const;
const char** argv();
const char*& operator[](int idx);
JSArguments& operator=(JSArguments args);
inline int argc() const {
return static_cast<int>(storage_[0]);
}
inline const char** argv() const {
return reinterpret_cast<const char**>(storage_[1]);
}
inline const char*& operator[] (int idx) const {
return argv()[idx];
}
inline JSArguments& operator=(JSArguments args) {
set_argc(args.argc());
set_argv(args.argv());
return *this;
}
static JSArguments Create(int argc, const char** argv) {
JSArguments args;
args.set_argc(argc);
args.set_argv(argv);
return args;
}
private:
int argc_;
const char** argv_;
void set_argc(int argc) {
storage_[0] = argc;
}
void set_argv(const char** argv) {
storage_[1] = reinterpret_cast<AtomicWord>(argv);
}
public:
// Contains argc and argv. Unfortunately we have to store these two fields
// into a single one to avoid making the initialization macro (which would be
// "{ 0, NULL }") contain a coma.
AtomicWord storage_[2];
};
#endif
@ -135,7 +159,7 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_string(hydrogen_filter, "", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
@ -168,14 +192,15 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
DEFINE_bool(inline_construct, false, "inline constructor calls")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, false, "enable all profiler experiments")
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
DEFINE_bool(self_optimization, false,
@ -191,7 +216,7 @@ DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 5900,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 40,
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
@ -282,6 +307,7 @@ DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
// execution.cc
DEFINE_int(stack_size, kPointerSize * 128,
@ -324,6 +350,9 @@ DEFINE_bool(trace_incremental_marking, false,
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
DEFINE_bool(send_idle_notification, false,
"Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
@ -417,7 +446,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
DEFINE_args(js_arguments, JSARGUMENTS_INIT,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)

15
deps/v8/src/flags.cc

@ -411,7 +411,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
for (int k = i; k < *argc; k++) {
js_argv[k - start_pos] = StrDup(argv[k]);
}
*flag->args_variable() = JSArguments(js_argc, js_argv);
*flag->args_variable() = JSArguments::Create(js_argc, js_argv);
i = *argc; // Consume all arguments
break;
}
@ -534,19 +534,6 @@ void FlagList::PrintHelp() {
}
}
JSArguments::JSArguments()
: argc_(0), argv_(NULL) {}
JSArguments::JSArguments(int argc, const char** argv)
: argc_(argc), argv_(argv) {}
int JSArguments::argc() const { return argc_; }
const char** JSArguments::argv() { return argv_; }
const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
JSArguments& JSArguments::operator=(JSArguments args) {
argc_ = args.argc_;
argv_ = args.argv_;
return *this;
}
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS

9
deps/v8/src/frames.cc

@ -31,6 +31,7 @@
#include "deoptimizer.h"
#include "frames-inl.h"
#include "full-codegen.h"
#include "lazy-instance.h"
#include "mark-compact.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
@ -1301,7 +1302,7 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
if (large_page != NULL) {
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
@ -1380,12 +1381,12 @@ struct JSCallerSavedCodeData {
};
static const JSCallerSavedCodeData kCallerSavedCodeData;
static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
LAZY_INSTANCE_INITIALIZER;
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
return kCallerSavedCodeData.reg_code[n];
return caller_saved_code_data.Get().reg_code[n];
}

3
deps/v8/src/full-codegen.cc

@ -313,7 +313,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable());
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize));
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);

24
deps/v8/src/full-codegen.h

@ -470,6 +470,8 @@ class FullCodeGenerator: public AstVisitor {
Label* done);
void EmitVariableLoad(VariableProxy* proxy);
void EmitAccessor(Expression* expression);
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(int arg_count);
@ -804,6 +806,28 @@ class FullCodeGenerator: public AstVisitor {
};
// A map from property names to getter/setter pairs allocated in the zone.
class AccessorTable: public TemplateHashMap<Literal,
ObjectLiteral::Accessors,
ZoneListAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone) :
TemplateHashMap<Literal,
ObjectLiteral::Accessors,
ZoneListAllocationPolicy>(Literal::Match),
zone_(zone) { }
Iterator lookup(Literal* literal) {
Iterator it = find(literal, true);
if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
return it;
}
private:
Zone* zone_;
};
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_

9
deps/v8/src/gdb-jit.cc

@ -33,6 +33,7 @@
#include "compiler.h"
#include "global-handles.h"
#include "messages.h"
#include "platform.h"
#include "natives.h"
#include "scopeinfo.h"
@ -2035,7 +2036,7 @@ static void AddUnwindInfo(CodeDescription* desc) {
}
Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
void GDBJITInterface::AddCode(const char* name,
@ -2045,7 +2046,7 @@ void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex_);
ScopedLock lock(mutex.Pointer());
AssertNoAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@ -2126,7 +2127,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex_);
ScopedLock lock(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@ -2146,7 +2147,7 @@ void GDBJITInterface::RemoveCode(Code* code) {
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
ScopedLock lock(mutex_);
ScopedLock lock(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);

3
deps/v8/src/gdb-jit.h

@ -132,9 +132,6 @@ class GDBJITInterface: public AllStatic {
static void RemoveCode(Code* code);
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
private:
static Mutex* mutex_;
};
#define GDBJIT(action) GDBJITInterface::action

3
deps/v8/src/globals.h

@ -267,8 +267,9 @@ const int kBinary32ExponentShift = 23;
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// ASCII/UC16 constants
// ASCII/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
const int kASCIISize = kCharSize;

158
deps/v8/src/handles.cc

@ -800,4 +800,162 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
}
// This method determines the type of string involved and then gets the UTF8
// length of the string. It doesn't flatten the string and has log(n) recursion
// for a string of length n. If the failure flag gets set, then we have to
// flatten the string and retry. Failures are caused by surrogate pairs in deep
// cons strings.
// Single surrogate characters that are encountered in the UTF-16 character
// sequence of the input string get counted as 3 UTF-8 bytes, because that
// is the way that WriteUtf8 will encode them. Surrogate pairs are counted and
// encoded as one 4-byte UTF-8 sequence.
// This function conceptually uses recursion on the two halves of cons strings.
// However, in order to avoid the recursion going too deep it recurses on the
// second string of the cons, but iterates on the first substring (by manually
// eliminating it as a tail recursion). This means it counts the UTF-8 length
// from the end to the start, which makes no difference to the total.
// Surrogate pairs are recognized even if they are split across two sides of a
// cons, which complicates the implementation somewhat. Therefore, too deep
// recursion cannot always be avoided. This case is detected, and the failure
// flag is set, a signal to the caller that the string should be flattened and
// the operation retried.
int Utf8LengthHelper(String* input,
int from,
int to,
bool followed_by_surrogate,
int max_recursion,
bool* failure,
bool* starts_with_surrogate) {
if (from == to) return 0;
int total = 0;
bool dummy;
while (true) {
if (input->IsAsciiRepresentation()) {
*starts_with_surrogate = false;
return total + to - from;
}
switch (StringShape(input).representation_tag()) {
case kConsStringTag: {
ConsString* str = ConsString::cast(input);
String* first = str->first();
String* second = str->second();
int first_length = first->length();
if (first_length - from > to - first_length) {
if (first_length < to) {
// Right hand side is shorter. No need to check the recursion depth
// since this can only happen log(n) times.
bool right_starts_with_surrogate = false;
total += Utf8LengthHelper(second,
0,
to - first_length,
followed_by_surrogate,
max_recursion - 1,
failure,
&right_starts_with_surrogate);
if (*failure) return 0;
followed_by_surrogate = right_starts_with_surrogate;
input = first;
to = first_length;
} else {
// We only need the left hand side.
input = first;
}
} else {
if (first_length > from) {
// Left hand side is shorter.
if (first->IsAsciiRepresentation()) {
total += first_length - from;
*starts_with_surrogate = false;
starts_with_surrogate = &dummy;
input = second;
from = 0;
to -= first_length;
} else if (second->IsAsciiRepresentation()) {
followed_by_surrogate = false;
total += to - first_length;
input = first;
to = first_length;
} else if (max_recursion > 0) {
bool right_starts_with_surrogate = false;
// Recursing on the long one. This may fail.
total += Utf8LengthHelper(second,
0,
to - first_length,
followed_by_surrogate,
max_recursion - 1,
failure,
&right_starts_with_surrogate);
if (*failure) return 0;
input = first;
to = first_length;
followed_by_surrogate = right_starts_with_surrogate;
} else {
*failure = true;
return 0;
}
} else {
// We only need the right hand side.
input = second;
from = 0;
to -= first_length;
}
}
continue;
}
case kExternalStringTag:
case kSeqStringTag: {
Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
const uc16* p = vector.start();
int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = from; i < to; i++) {
uc16 c = p[i];
total += unibrow::Utf8::Length(c, previous);
previous = c;
}
if (to - from > 0) {
if (unibrow::Utf16::IsLeadSurrogate(previous) &&
followed_by_surrogate) {
total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
}
if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
*starts_with_surrogate = true;
}
}
return total;
}
case kSlicedStringTag: {
SlicedString* str = SlicedString::cast(input);
int offset = str->offset();
input = str->parent();
from += offset;
to += offset;
continue;
}
default:
break;
}
UNREACHABLE();
return 0;
}
return 0;
}
int Utf8Length(Handle<String> str) {
bool dummy;
bool failure;
int len;
const int kRecursionBudget = 100;
do {
failure = false;
len = Utf8LengthHelper(
*str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
if (failure) FlattenString(str);
} while (failure);
return len;
}
} } // namespace v8::internal

2
deps/v8/src/handles.h

@ -174,6 +174,8 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
int Utf8Length(Handle<String> str);
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,

73
deps/v8/src/hashmap.h

@ -36,15 +36,15 @@ namespace v8 {
namespace internal {
template<class AllocationPolicy>
class TemplateHashMap {
class TemplateHashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
TemplateHashMap(MatchFun match, uint32_t initial_capacity = 8);
TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
~TemplateHashMap();
~TemplateHashMapImpl();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
@ -99,10 +99,10 @@ class TemplateHashMap {
void Resize();
};
typedef TemplateHashMap<FreeStoreAllocationPolicy> HashMap;
typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
template<class P>
TemplateHashMap<P>::TemplateHashMap(MatchFun match,
TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
@ -110,13 +110,13 @@ TemplateHashMap<P>::TemplateHashMap(MatchFun match,
template<class P>
TemplateHashMap<P>::~TemplateHashMap() {
TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
P::Delete(map_);
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
@ -146,7 +146,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
template<class P>
void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
@ -206,7 +206,7 @@ void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
template<class P>
void TemplateHashMap<P>::Clear() {
void TemplateHashMapImpl<P>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
@ -217,13 +217,14 @@ void TemplateHashMap<P>::Clear() {
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Start() const {
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
return Next(map_ - 1);
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@ -236,7 +237,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
uint32_t hash) {
ASSERT(key != NULL);
@ -258,7 +259,7 @@ typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
template<class P>
void TemplateHashMap<P>::Initialize(uint32_t capacity) {
void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
if (map_ == NULL) {
@ -271,7 +272,7 @@ void TemplateHashMap<P>::Initialize(uint32_t capacity) {
template<class P>
void TemplateHashMap<P>::Resize() {
void TemplateHashMapImpl<P>::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
@ -290,6 +291,50 @@ void TemplateHashMap<P>::Resize() {
P::Delete(map);
}
// A hash map for pointer keys and values with an STL-like interface.
template<class Key, class Value, class AllocationPolicy>
class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
public:
STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
struct value_type {
Key* first;
Value* second;
};
class Iterator {
public:
Iterator& operator++() {
entry_ = map_->Next(entry_);
return *this;
}
value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
private:
Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
map_(map), entry_(entry) { }
const TemplateHashMapImpl<AllocationPolicy>* map_;
typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
friend class TemplateHashMap;
};
TemplateHashMap(
typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
: TemplateHashMapImpl<AllocationPolicy>(match) { }
Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); }
Iterator find(Key* key, bool insert = false) {
return Iterator(this, this->Lookup(key, key->Hash(), insert));
}
};
} } // namespace v8::internal
#endif // V8_HASHMAP_H_

199
deps/v8/src/heap.cc

@ -60,8 +60,7 @@
namespace v8 {
namespace internal {
static Mutex* gc_initializer_mutex = OS::CreateMutex();
static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
Heap::Heap()
@ -82,7 +81,7 @@ Heap::Heap()
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(128l * LUMP_OF_MEMORY),
max_executable_size_(256l * LUMP_OF_MEMORY),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
@ -93,6 +92,7 @@ Heap::Heap()
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@ -105,6 +105,7 @@ Heap::Heap()
gc_post_processing_depth_(0),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_allowed_(true),
@ -2471,34 +2472,26 @@ bool Heap::CreateInitialObjects() {
set_the_hole_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-2),
Smi::FromInt(-4),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
Smi::FromInt(-3),
Smi::FromInt(-2),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
Smi::FromInt(-4),
Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
{ MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
Smi::FromInt(-5),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_frame_alignment_marker(Oddball::cast(obj));
STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@ -3401,6 +3394,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@ -4186,8 +4180,6 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
PretenureFlag pretenure) {
// V8 only supports characters in the Basic Multilingual Plane.
const uc32 kMaxSupportedChar = 0xFFFF;
// Count the number of characters in the UTF-8 string and check if
// it is an ASCII string.
Access<UnicodeCache::Utf8Decoder>
@ -4195,8 +4187,12 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
decoder->Reset(string.start(), string.length());
int chars = 0;
while (decoder->has_more()) {
decoder->GetNext();
chars++;
uint32_t r = decoder->GetNext();
if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
chars++;
} else {
chars += 2;
}
}
Object* result;
@ -4207,10 +4203,15 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
// Convert and copy the characters into the new object.
String* string_result = String::cast(result);
decoder->Reset(string.start(), string.length());
for (int i = 0; i < chars; i++) {
uc32 r = decoder->GetNext();
if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
string_result->Set(i, r);
int i = 0;
while (i < chars) {
uint32_t r = decoder->GetNext();
if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
} else {
string_result->Set(i++, r);
}
}
return result;
}
@ -4267,7 +4268,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
uint32_t hash_field) {
ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
// Determine whether the string is ASCII.
bool is_ascii = true;
while (buffer->has_more()) {
@ -4313,8 +4314,15 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
for (int i = 0; i < chars; i++) {
answer->Set(i, buffer->GetNext());
int i = 0;
while (i < chars) {
uint32_t character = buffer->GetNext();
if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
} else {
answer->Set(i++, character);
}
}
return answer;
}
@ -4808,11 +4816,62 @@ void Heap::EnsureHeapIsIterable() {
}
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
// This flag prevents incremental marking from requesting GC via stack guard
idle_notification_will_schedule_next_gc_ = true;
incremental_marking()->Step(step_size);
idle_notification_will_schedule_next_gc_ = false;
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
if (gc_count_at_last_idle_gc_ == gc_count_) {
// No GC since the last full GC, the mutator is probably not active.
isolate_->compilation_cache()->Clear();
uncommit = true;
}
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
UncommitFromSpace();
}
}
}
bool Heap::IdleNotification(int hint) {
if (hint >= 1000) return IdleGlobalGC();
if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
const int kMaxHint = 1000;
intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
// The size factor is in range [3..100].
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
if (hint >= kMaxHint) {
// The embedder is requesting a lot of GC work after context disposal,
// we age inline caches so that they don't keep objects from
// the old context alive.
AgeInlineCaches();
}
int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
if (hint >= mark_sweep_time && !FLAG_expose_gc &&
incremental_marking()->IsStopped()) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: contexts disposed");
} else {
AdvanceIdleIncrementalMarking(step_size);
contexts_disposed_ = 0;
}
// Make sure that we have no pending context disposals.
// Take into account that we might have decided to delay full collection
// because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
return false;
}
if (hint >= kMaxHint || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
return true;
return IdleGlobalGC();
}
// By doing small chunks of GC work in each IdleNotification,
@ -4824,9 +4883,6 @@ bool Heap::IdleNotification(int hint) {
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
// The size factor is in range [3..100].
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (incremental_marking()->IsStopped()) {
if (!IsSweepingComplete() &&
@ -4853,32 +4909,14 @@ bool Heap::IdleNotification(int hint) {
}
if (incremental_marking()->IsStopped()) {
if (hint < 1000 && !WorthStartingGCWhenIdle()) {
if (!WorthStartingGCWhenIdle()) {
FinishIdleRound();
return true;
}
incremental_marking()->Start();
}
// This flag prevents incremental marking from requesting GC via stack guard
idle_notification_will_schedule_next_gc_ = true;
incremental_marking()->Step(step_size);
idle_notification_will_schedule_next_gc_ = false;
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
if (gc_count_at_last_idle_gc_ == gc_count_) {
// No GC since the last full GC, the mutator is probably not active.
isolate_->compilation_cache()->Clear();
uncommit = true;
}
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
new_space_.Shrink();
UncommitFromSpace();
}
}
AdvanceIdleIncrementalMarking(step_size);
return false;
}
@ -4911,13 +4949,7 @@ bool Heap::IdleGlobalGC() {
}
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: contexts disposed");
} else {
CollectGarbage(NEW_SPACE, "idle notification");
}
CollectGarbage(NEW_SPACE, "idle notification");
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
@ -4936,23 +4968,6 @@ bool Heap::IdleGlobalGC() {
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
finished = true;
} else if (contexts_disposed_ > 0) {
if (FLAG_expose_gc) {
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: contexts disposed");
last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
// notification count to avoid letting idle notifications for
// context disposal garbage collections start a potentially too
// aggressive idle GC cycle.
if (number_idle_notifications_ <= 1) {
number_idle_notifications_ = 0;
uncommit = false;
}
} else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
@ -4960,11 +4975,6 @@ bool Heap::IdleGlobalGC() {
finished = true;
}
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
// Take into account that we might have decided to delay full collection
// because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
if (uncommit) UncommitFromSpace();
return finished;
@ -5612,15 +5622,15 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.SizeAsInt();
*stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
*stats->old_pointer_space_size = old_pointer_space_->Size();
*stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
*stats->old_data_space_size = old_data_space_->Size();
*stats->old_data_space_size = old_data_space_->SizeOfObjects();
*stats->old_data_space_capacity = old_data_space_->Capacity();
*stats->code_space_size = code_space_->Size();
*stats->code_space_size = code_space_->SizeOfObjects();
*stats->code_space_capacity = code_space_->Capacity();
*stats->map_space_size = map_space_->Size();
*stats->map_space_size = map_space_->SizeOfObjects();
*stats->map_space_capacity = map_space_->Capacity();
*stats->cell_space_size = cell_space_->Size();
*stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
@ -5855,7 +5865,7 @@ bool Heap::SetUp(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
gc_initializer_mutex->Lock();
gc_initializer_mutex.Pointer()->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
initialized_gc = true;
@ -5863,7 +5873,7 @@ bool Heap::SetUp(bool create_heap_objects) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
gc_initializer_mutex->Unlock();
gc_initializer_mutex.Pointer()->Unlock();
MarkMapPointersAsEncoded(false);
@ -6958,4 +6968,19 @@ void Heap::FreeQueuedChunks() {
chunks_queued_for_free_ = NULL;
}
void Heap::RememberUnmappedPage(Address page, bool compacted) {
uintptr_t p = reinterpret_cast<uintptr_t>(page);
// Tag the page pointer to make it findable in the dump file.
if (compacted) {
p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
} else {
p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
}
remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
reinterpret_cast<Address>(p);
remembered_unmapped_pages_index_++;
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
} } // namespace v8::internal

44
deps/v8/src/heap.h

@ -77,7 +77,6 @@ namespace internal {
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The first 32 roots above this line should be boring from a GC point of */ \
/* view. This means they are never in new space and never on a page that */ \
@ -1481,6 +1480,13 @@ class Heap {
void ClearNormalizedMapCaches();
// Clears the cache of ICs related to this map.
void ClearCacheOnMap(Map* map) {
if (FLAG_cleanup_code_caches_at_gc) {
map->ClearCodeCache(this);
}
}
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
@ -1583,6 +1589,19 @@ class Heap {
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
// Global inline caching age: it is incremented on some GCs after context
// disposal. We use it to flush inline caches.
int global_ic_age() {
return global_ic_age_;
}
void AgeInlineCaches() {
++global_ic_age_;
}
private:
Heap();
@ -1610,6 +1629,8 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
int global_ic_age_;
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
@ -1634,6 +1655,11 @@ class Heap {
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
// For post mortem debugging.
static const int kRememberedUnmappedPages = 128;
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
// Total length of the strings we failed to flatten since the last GC.
int unflattened_strings_length_;
@ -1781,7 +1807,6 @@ class Heap {
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@ -1960,9 +1985,24 @@ class Heap {
return incremental_marking()->WorthActivating();
}
// Estimates how many milliseconds a Mark-Sweep would take to complete.
// In idle notification handler we assume that this function will return:
// - a number less than 10 for small heaps, which are less than 8Mb.
// - a number greater than 10 for large heaps, which are greater than 32Mb.
int TimeMarkSweepWouldTakeInMs() {
// Rough estimate of how many megabytes of heap can be processed in 1 ms.
static const int kMbPerMs = 2;
int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
return heap_size_mb / kMbPerMs;
}
// Returns true if no more GC work is left.
bool IdleGlobalGC();
void AdvanceIdleIncrementalMarking(intptr_t step_size);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;

49
deps/v8/src/hydrogen-instructions.cc

@ -885,6 +885,15 @@ HValue* HChange::Canonicalize() {
}
HValue* HWrapReceiver::Canonicalize() {
if (HasNoUses()) return NULL;
if (receiver()->type().IsJSObject()) {
return receiver();
}
return this;
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@ -2248,6 +2257,46 @@ void HIn::PrintDataTo(StringStream* stream) {
}
Representation HPhi::InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
if (value->IsUnknownOSRValue()) {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
continue;
}
if (value->representation().IsDouble()) double_occurred = true;
if (value->representation().IsInteger32()) int32_occurred = true;
if (value->representation().IsTagged()) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
if (constant->IsConvertibleToInteger()) {
int32_occurred = true;
} else if (constant->HasNumberValue()) {
double_occurred = true;
} else {
return Representation::Tagged();
}
} else {
return Representation::Tagged();
}
}
}
if (double_occurred) return Representation::Double();
if (int32_occurred) return Representation::Integer32();
return Representation::None();
}
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG

57
deps/v8/src/hydrogen-instructions.h

@ -185,7 +185,8 @@ class LChunkBuilder;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField)
V(DateField) \
V(WrapReceiver)
#define GVN_FLAG_LIST(V) \
V(Calls) \
@ -2260,20 +2261,7 @@ class HPhi: public HValue {
SetFlag(kFlexibleRepresentation);
}
virtual Representation InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
if (value->representation().IsDouble()) double_occurred = true;
if (value->representation().IsInteger32()) int32_occurred = true;
if (value->representation().IsTagged()) return Representation::Tagged();
}
if (double_occurred) return Representation::Double();
if (int32_occurred) return Representation::Integer32();
return Representation::None();
}
virtual Representation InferredRepresentation();
virtual Range* InferRange(Zone* zone);
virtual Representation RequiredInputRepresentation(int index) {
@ -2503,6 +2491,27 @@ class HBinaryOperation: public HTemplateInstruction<3> {
};
class HWrapReceiver: public HTemplateInstruction<2> {
public:
HWrapReceiver(HValue* receiver, HValue* function) {
set_representation(Representation::Tagged());
SetOperandAt(0, receiver);
SetOperandAt(1, function);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* receiver() { return OperandAt(0); }
HValue* function() { return OperandAt(1); }
virtual HValue* Canonicalize();
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
};
class HApplyArguments: public HTemplateInstruction<4> {
public:
HApplyArguments(HValue* function,
@ -3414,13 +3423,27 @@ class HCallStub: public HUnaryCall {
class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
HUnknownOSRValue() { set_representation(Representation::Tagged()); }
HUnknownOSRValue()
: incoming_value_(NULL) {
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
void set_incoming_value(HPhi* value) {
incoming_value_ = value;
}
HPhi* incoming_value() {
return incoming_value_;
}
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
private:
HPhi* incoming_value_;
};
@ -4284,7 +4307,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone) {
return new(zone) Range(0, String::kMaxUC16CharCode);
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
};

214
deps/v8/src/hydrogen.cc

@ -1766,6 +1766,12 @@ void HInferRepresentation::InferBasedOnInputs(HValue* current) {
ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
Representation inferred = current->InferredRepresentation();
if (inferred.IsSpecialization()) {
if (FLAG_trace_representation) {
PrintF("Changing #%d representation %s -> %s based on inputs\n",
current->id(),
r.Mnemonic(),
inferred.Mnemonic());
}
current->ChangeRepresentation(inferred);
AddDependantsToWorklist(current);
}
@ -1793,6 +1799,12 @@ void HInferRepresentation::InferBasedOnUses(HValue* value) {
Representation new_rep = TryChange(value);
if (!new_rep.IsNone()) {
if (!value->representation().Equals(new_rep)) {
if (FLAG_trace_representation) {
PrintF("Changing #%d representation %s -> %s based on uses\n",
value->id(),
r.Mnemonic(),
new_rep.Mnemonic());
}
value->ChangeRepresentation(new_rep);
AddDependantsToWorklist(value);
}
@ -2508,6 +2520,14 @@ HGraph* HGraphBuilder::CreateGraph() {
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis();
if (graph()->has_osr_loop_entry()) {
const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
}
}
HInferRepresentation rep(graph());
rep.Analyze();
@ -2613,6 +2633,10 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
AddInstruction(undefined_constant);
graph_->set_undefined_constant(undefined_constant);
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
graph()->SetArgumentsObject(object);
// Set the initial values of parameters including "this". "This" has
// parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@ -2639,10 +2663,9 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
if (!scope->arguments()->IsStackAllocated()) {
return Bailout("context-allocated arguments");
}
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
graph()->SetArgumentsObject(object);
environment()->Bind(scope->arguments(), object);
environment()->Bind(scope->arguments(),
graph()->GetArgumentsObject());
}
}
@ -3077,8 +3100,8 @@ bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
}
void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!HasOsrEntryAt(statement)) return;
bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
@ -3093,10 +3116,14 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
int osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
ZoneList<HUnknownOSRValue*>* osr_values =
new(zone()) ZoneList<HUnknownOSRValue*>(length);
for (int i = 0; i < first_expression_index; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Bind(i, osr_value);
osr_values->Add(osr_value);
}
if (first_expression_index != length) {
@ -3105,9 +3132,12 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
osr_values->Add(osr_value);
}
}
graph()->set_osr_values(osr_values);
AddSimulate(osr_entry_id);
AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
HContext* context = new(zone()) HContext;
@ -3116,6 +3146,7 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
set_current_block(loop_predecessor);
return true;
}
@ -3139,10 +3170,11 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@ -3181,10 +3213,12 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@ -3226,10 +3260,11 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@ -3321,10 +3356,11 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
PreProcessOsrEntry(stmt);
bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
@ -3639,22 +3675,27 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (!boilerplate->HasFastElements()) return false;
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
Handle<Object> value = JSObject::GetElement(boilerplate, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
total_size)) {
return false;
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastElements()) {
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
Handle<Object> value = JSObject::GetElement(boilerplate, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
total_size)) {
return false;
}
}
}
*total_size += FixedArray::SizeFor(length);
} else {
return false;
}
*total_size += FixedArray::SizeFor(length);
}
Handle<FixedArray> properties(boilerplate->properties());
@ -3734,18 +3775,12 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
if (property->emit_store()) {
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
Handle<String> name = Handle<String>::cast(key->handle());
HStoreNamedGeneric* store =
new(zone()) HStoreNamedGeneric(
context,
literal,
name,
value,
function_strict_mode_flag());
HInstruction* store = BuildStoreNamed(literal, value, property);
AddInstruction(store);
AddSimulate(key->id());
if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@ -3948,6 +3983,25 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
}
HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
HValue* value,
ObjectLiteral::Property* prop) {
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->handle());
ASSERT(!name.is_null());
LookupResult lookup(isolate());
Handle<Map> type = prop->GetReceiverType();
bool is_monomorphic = prop->IsMonomorphic() &&
ComputeStoredField(type, name, &lookup);
return is_monomorphic
? BuildStoreNamedField(object, name, value, type, &lookup,
true) // Needs smi and map check.
: BuildStoreNamedGeneric(object, name, value);
}
HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
HValue* value,
Expression* expr) {
@ -4474,6 +4528,10 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
}
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
HValue* context = environment()->LookupContext();
@ -5226,10 +5284,21 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
return false;
}
// Don't inline functions that uses the arguments object.
// If the function uses the arguments object check that inlining of functions
// with arguments object is enabled and the arguments-variable is
// stack allocated.
if (function->scope()->arguments() != NULL) {
TraceInline(target, caller, "target requires special argument handling");
return false;
if (!FLAG_inline_arguments) {
TraceInline(target, caller, "target uses arguments object");
return false;
}
if (!function->scope()->arguments()->IsStackAllocated()) {
TraceInline(target,
caller,
"target uses non-stackallocated arguments object");
return false;
}
}
// All declarations must be inlineable.
@ -5307,6 +5376,12 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
function,
call_kind,
function_state()->is_construct()));
// If the function uses arguments object create and bind one.
if (function->scope()->arguments() != NULL) {
ASSERT(function->scope()->arguments()->IsStackAllocated());
environment()->Bind(function->scope()->arguments(),
graph()->GetArgumentsObject());
}
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
if (HasStackOverflow()) {
@ -5645,13 +5720,6 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
HValue* arg_two_value = environment()->Lookup(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions.
if (function_state()->outer() != NULL) {
Bailout("Function.prototype.apply optimization in inlined function");
return true;
}
// Found pattern f.apply(receiver, arguments).
VisitForValue(prop->obj());
if (HasStackOverflow() || current_block() == NULL) return true;
@ -5662,13 +5730,46 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
VisitForValue(args->at(0));
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* receiver = Pop();
HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
HInstruction* result =
new(zone()) HApplyArguments(function, receiver, length, elements);
result->set_position(expr->position());
ast_context()->ReturnInstruction(result, expr->id());
return true;
if (function_state()->outer() == NULL) {
HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
HInstruction* length =
AddInstruction(new(zone()) HArgumentsLength(elements));
HValue* wrapped_receiver =
AddInstruction(new(zone()) HWrapReceiver(receiver, function));
HInstruction* result =
new(zone()) HApplyArguments(function,
wrapped_receiver,
length,
elements);
result->set_position(expr->position());
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else {
// We are inside inlined function and we know exactly what is inside
// arguments object.
HValue* context = environment()->LookupContext();
HValue* wrapped_receiver =
AddInstruction(new(zone()) HWrapReceiver(receiver, function));
PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
HEnvironment* arguments_env = environment()->arguments_environment();
int parameter_count = arguments_env->parameter_count();
for (int i = 1; i < arguments_env->parameter_count(); i++) {
PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
}
HInvokeFunction* call = new(zone()) HInvokeFunction(
context,
function,
parameter_count);
Drop(parameter_count);
call->set_position(expr->position());
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
}
@ -6654,6 +6755,15 @@ static bool IsLiteralCompareNil(HValue* left,
}
static bool IsLiteralCompareBool(HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
(right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
}
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@ -6701,6 +6811,12 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
if (IsLiteralCompareBool(left, op, right)) {
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not

33
deps/v8/src/hydrogen.h

@ -293,7 +293,6 @@ class HGraph: public ZoneObject {
HArgumentsObject* GetArgumentsObject() const {
return arguments_object_.get();
}
bool HasArgumentsObject() const { return arguments_object_.is_set(); }
void SetArgumentsObject(HArgumentsObject* object) {
arguments_object_.set(object);
@ -314,6 +313,26 @@ class HGraph: public ZoneObject {
void Verify(bool do_full_verify) const;
#endif
bool has_osr_loop_entry() {
return osr_loop_entry_.is_set();
}
HBasicBlock* osr_loop_entry() {
return osr_loop_entry_.get();
}
void set_osr_loop_entry(HBasicBlock* entry) {
osr_loop_entry_.set(entry);
}
ZoneList<HUnknownOSRValue*>* osr_values() {
return osr_values_.get();
}
void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
osr_values_.set(values);
}
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
@ -354,6 +373,9 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> constant_hole_;
SetOncePointer<HArgumentsObject> arguments_object_;
SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@ -378,6 +400,10 @@ class HEnvironment: public ZoneObject {
return outer;
}
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
@ -887,7 +913,7 @@ class HGraphBuilder: public AstVisitor {
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
void PreProcessOsrEntry(IterationStatement* statement);
bool PreProcessOsrEntry(IterationStatement* statement);
// True iff. we are compiling for OSR and the statement is the entry.
bool HasOsrEntryAt(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
@ -1077,6 +1103,9 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildStoreNamed(HValue* object,
HValue* value,
Expression* expr);
HInstruction* BuildStoreNamed(HValue* object,
HValue* value,
ObjectLiteral::Property* prop);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,

2
deps/v8/src/ia32/assembler-ia32-inl.h

@ -88,7 +88,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
return Assembler::kSpecialTargetSize;
}

36
deps/v8/src/ia32/assembler-ia32.h

@ -97,16 +97,25 @@ struct Register {
int code_;
};
const Register eax = { 0 };
const Register ecx = { 1 };
const Register edx = { 2 };
const Register ebx = { 3 };
const Register esp = { 4 };
const Register ebp = { 5 };
const Register esi = { 6 };
const Register edi = { 7 };
const Register no_reg = { -1 };
const int kRegister_eax_Code = 0;
const int kRegister_ecx_Code = 1;
const int kRegister_edx_Code = 2;
const int kRegister_ebx_Code = 3;
const int kRegister_esp_Code = 4;
const int kRegister_ebp_Code = 5;
const int kRegister_esi_Code = 6;
const int kRegister_edi_Code = 7;
const int kRegister_no_reg_Code = -1;
const Register eax = { kRegister_eax_Code };
const Register ecx = { kRegister_ecx_Code };
const Register edx = { kRegister_edx_Code };
const Register ebx = { kRegister_ebx_Code };
const Register esp = { kRegister_esp_Code };
const Register ebp = { kRegister_ebp_Code };
const Register esi = { kRegister_esi_Code };
const Register edi = { kRegister_edi_Code };
const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
@ -589,8 +598,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void set_target_at(Address instruction_payload,
Address target) {
inline static void deserialization_set_special_target_at(
Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@ -601,8 +610,7 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = kPointerSize;
static const int kExternalTargetSize = kPointerSize;
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address

41
deps/v8/src/ia32/code-stubs-ia32.cc

@ -7024,44 +7024,47 @@ struct AheadOfTimeWriteBarrierStubList {
};
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ ebx, eax, edi, EMIT_REMEMBERED_SET },
{ REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
{ ebx, ecx, edx, EMIT_REMEMBERED_SET },
{ ebx, edi, edx, OMIT_REMEMBERED_SET },
{ REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
{ REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal and CallFunctionStub.
{ ebx, ecx, edx, OMIT_REMEMBERED_SET },
{ REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ edx, ecx, ebx, EMIT_REMEMBERED_SET },
{ REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
// GenerateStoreField calls the stub with two different permutations of
// registers. This is the second.
{ ebx, ecx, edx, EMIT_REMEMBERED_SET },
{ REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
// StoreIC::GenerateNormal via GenerateDictionaryStore
{ ebx, edi, edx, EMIT_REMEMBERED_SET },
{ REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
// KeyedStoreIC::GenerateGeneric.
{ ebx, edx, ecx, EMIT_REMEMBERED_SET},
{ REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ edi, ebx, ecx, EMIT_REMEMBERED_SET},
{ edx, edi, ebx, EMIT_REMEMBERED_SET},
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ edx, ebx, edi, EMIT_REMEMBERED_SET},
{ edx, ebx, edi, OMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ eax, edx, esi, EMIT_REMEMBERED_SET},
{ edx, eax, edi, EMIT_REMEMBERED_SET},
{ REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ ebx, eax, ecx, EMIT_REMEMBERED_SET},
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
#undef REG
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@ -7089,7 +7092,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,

38
deps/v8/src/ia32/codegen-ia32.cc

@ -57,8 +57,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
@ -99,7 +98,40 @@ TranscendentalFunction CreateTranscendentalFunction(
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<TranscendentalFunction>(buffer);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
// If SSE2 is not available, we can use libc's implementation to ensure
// consistency since code by fullcodegen's calls into runtime in that case.
if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
{
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
__ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}

43
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -427,14 +427,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
// Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@ -692,11 +685,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
// 2 = context and function in the frame.
top_address =
input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@ -747,9 +738,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost ||
input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
== fp_value);
ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@ -939,17 +928,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done;
__ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel);
__ push(ecx);
__ jmp(&sentinel_done);
__ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(1));
__ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
@ -961,17 +939,6 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
if (type() == OSR) {
// If alignment padding is added, push the sentinel.
Label no_osr_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_osr_padding, Label::kNear);
__ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ bind(&no_osr_padding);
}
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the

59
deps/v8/src/ia32/full-codegen-ia32.cc

@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@ -100,7 +101,9 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 13;
}
@ -321,12 +324,20 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = 10;
}
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
static const int kMaxBackEdgeWeight = 127;
static const int kBackEdgeDistanceDivisor = 100;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@ -337,7 +348,8 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(127, Max(1, distance / 100));
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@ -398,7 +410,8 @@ void FullCodeGenerator::EmitReturnSequence() {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(127, Max(1, distance / 100));
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@ -1411,6 +1424,15 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void FullCodeGenerator::EmitAccessor(Expression* expression) {
if (expression == NULL) {
__ push(Immediate(isolate()->factory()->null_value()));
} else {
VisitForStackValue(expression);
}
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@ -1445,6 +1467,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@ -1456,6 +1479,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
@ -1487,24 +1512,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(key);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ push(Immediate(isolate()->factory()->null_value()));
} else {
__ push(Immediate(isolate()->factory()->null_value()));
VisitForStackValue(value);
}
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
accessor_table.lookup(key)->second->getter = value;
break;
case ObjectLiteral::Property::SETTER:
accessor_table.lookup(key)->second->setter = value;
break;
default: UNREACHABLE();
}
}
// Emit code to define accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
}
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(esp, 0));

108
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -79,9 +79,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
info()->osr_ast_id() != AstNode::kNoNumber;
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@ -156,29 +153,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
if (dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0),
Immediate(isolate()->factory()->frame_alignment_marker()));
__ bind(&do_not_pad);
}
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@ -579,7 +553,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@ -2125,17 +2098,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
if (dynamic_frame_alignment_) {
Label aligned;
// Frame alignment marker (padding) is below arguments,
// and receiver, so its return-address-relative offset is
// (num_arguments + 2) words.
__ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
Immediate(factory()->frame_alignment_marker()));
__ j(not_equal, &aligned);
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&aligned);
}
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@ -2625,15 +2587,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = ToRegister(instr->TempAt(0));
ASSERT(receiver.is(eax)); // Used for parameter count.
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@ -2675,6 +2632,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
ASSERT(receiver.is(eax)); // Used for parameter count.
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@ -4493,33 +4461,47 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
// Copy elements backing store header.
ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ mov(ecx, FieldOperand(source, i));
__ mov(FieldOperand(result, elements_offset + i), ecx);
}
}
// Copy elements backing store content.
ASSERT(!has_elements || elements->IsFixedArray());
int elements_length = has_elements ? elements->length() : 0;
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
// Copy elements backing store content.
int elements_length = elements->length();
if (elements->IsFixedDoubleArray()) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
int32_t value_low = value & 0xFFFFFFFF;
int32_t value_high = value >> 32;
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(FieldOperand(result, total_offset), Immediate(value_low));
__ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
}
} else if (elements->IsFixedArray()) {
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
}
}
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
UNREACHABLE();
}
}
}

6
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
@ -145,10 +144,6 @@ class LCodeGen BASE_EMBEDDED {
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
void set_dynamic_frame_alignment(bool value) {
dynamic_frame_alignment_ = value;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@ -333,7 +328,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;

20
deps/v8/src/ia32/lithium-ia32.cc

@ -368,11 +368,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) {
spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
spill_slot_count_++;
num_double_slots_++;
}
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
@ -1111,17 +1107,25 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LOperand* temp = TempRegister();
LWrapReceiver* result =
new(zone()) LWrapReceiver(receiver, function, temp);
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
LOperand* temp = FixedTemp(edx);
LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
elements,
temp);
elements);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}

30
deps/v8/src/ia32/lithium-ia32.h

@ -173,7 +173,8 @@ class LCodeGen;
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField)
V(DateField) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -456,18 +457,33 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
LOperand* temp) {
inputs_[0] = receiver;
inputs_[1] = function;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
};
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
LOperand* elements,
LOperand* temp) {
LOperand* elements) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@ -2273,7 +2289,6 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@ -2287,8 +2302,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@ -2330,7 +2343,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};

2
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -523,7 +523,7 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUC16CharCode);
ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(eax, Operand(current_character(), -minus));
__ and_(eax, mask);
__ cmp(eax, c);

15
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1245,14 +1245,9 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (!object.is_identical_to(holder)) {
__ JumpIfSmi(edx, miss);
}
// Check that the maps haven't changed.
__ JumpIfSmi(edx, miss);
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
}
@ -2829,14 +2824,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (!object.is_identical_to(holder)) {
__ JumpIfSmi(eax, &miss);
}
// Check that the maps haven't changed.
__ JumpIfSmi(eax, &miss);
CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.

11
deps/v8/src/ic.cc

@ -1017,6 +1017,15 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(*code);
} else if (state == MONOMORPHIC) {
// We are transitioning from monomorphic to megamorphic case.
// Place the current monomorphic stub and stub compiled for
// the receiver into stub cache.
Map* map = target()->FindFirstMap();
if (map != NULL) {
isolate()->stub_cache()->Set(*name, map, target());
}
isolate()->stub_cache()->Set(*name, receiver->map(), *code);
set_target(*megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
@ -1365,7 +1374,7 @@ MaybeObject* StoreIC::Store(State state,
// Strict mode doesn't allow setting non-existent global property
// or an assignment to a read only property.
if (strict_mode == kStrictMode) {
if (lookup.IsFound() && lookup.IsReadOnly()) {
if (lookup.IsProperty() && lookup.IsReadOnly()) {
return TypeError("strict_read_only_property", object, name);
} else if (IsContextual(object)) {
return ReferenceError("not_defined", name);

15
deps/v8/src/incremental-marking.cc

@ -178,7 +178,12 @@ class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_age() != heap_->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
MarkObject(target);
}
@ -396,7 +401,7 @@ bool IncrementalMarking::WorthActivating() {
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
!Serializer::enabled() &&
heap_->PromotedSpaceSize() > kActivationThreshold;
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@ -795,6 +800,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
Map* map = obj->map();
if (map == filler_map) continue;
if (obj->IsMap()) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
}
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);

10
deps/v8/src/isolate-inl.h

@ -49,6 +49,16 @@ SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
}
bool Isolate::IsDebuggerActive() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!NoBarrier_Load(&debugger_initialized_)) return false;
return debugger()->IsDebuggerActive();
#else
return false;
#endif
}
bool Isolate::DebuggerHasBreakPoints() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();

137
deps/v8/src/isolate.cc

@ -38,9 +38,11 @@
#include "heap-profiler.h"
#include "hydrogen.h"
#include "isolate.h"
#include "lazy-instance.h"
#include "lithium-allocator.h"
#include "log.h"
#include "messages.h"
#include "platform.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
@ -55,6 +57,31 @@
namespace v8 {
namespace internal {
struct GlobalState {
Thread::LocalStorageKey per_isolate_thread_data_key;
Thread::LocalStorageKey isolate_key;
Thread::LocalStorageKey thread_id_key;
Isolate* default_isolate;
Isolate::ThreadDataTable* thread_data_table;
Mutex* mutex;
};
struct InitializeGlobalState {
static void Construct(GlobalState* state) {
state->isolate_key = Thread::CreateThreadLocalKey();
state->thread_id_key = Thread::CreateThreadLocalKey();
state->per_isolate_thread_data_key = Thread::CreateThreadLocalKey();
state->thread_data_table = new Isolate::ThreadDataTable();
state->default_isolate = new Isolate();
state->mutex = OS::CreateMutex();
// Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
// because a non-null thread data may be already set.
Thread::SetThreadLocal(state->isolate_key, state->default_isolate);
}
};
static LazyInstance<GlobalState, InitializeGlobalState>::type global_state;
Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
@ -64,10 +91,11 @@ int ThreadId::AllocateThreadId() {
int ThreadId::GetCurrentThreadId() {
int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
const GlobalState& global = global_state.Get();
int thread_id = Thread::GetThreadLocalInt(global.thread_id_key);
if (thread_id == 0) {
thread_id = AllocateThreadId();
Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
Thread::SetThreadLocalInt(global.thread_id_key, thread_id);
}
return thread_id;
}
@ -311,44 +339,16 @@ void Isolate::PreallocatedStorageDelete(void* p) {
storage->LinkTo(&free_list_);
}
Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
class IsolateInitializer {
public:
IsolateInitializer() {
Isolate::EnsureDefaultIsolate();
}
};
static IsolateInitializer* EnsureDefaultIsolateAllocated() {
// TODO(isolates): Use the system threading API to do this once?
static IsolateInitializer static_initializer;
return &static_initializer;
}
// This variable only needed to trigger static intialization.
static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
ThreadId thread_id) {
ASSERT(!thread_id.Equals(ThreadId::Invalid()));
PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
{
ScopedLock lock(process_wide_mutex_);
ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
thread_data_table_->Insert(per_thread);
ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
GlobalState* const global = global_state.Pointer();
ScopedLock lock(global->mutex);
ASSERT(global->thread_data_table->Lookup(this, thread_id) == NULL);
global->thread_data_table->Insert(per_thread);
ASSERT(global->thread_data_table->Lookup(this, thread_id) == per_thread);
}
return per_thread;
}
@ -359,8 +359,9 @@ Isolate::PerIsolateThreadData*
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
ScopedLock lock(process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
GlobalState* const global = global_state.Pointer();
ScopedLock lock(global->mutex);
per_thread = global->thread_data_table->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = AllocatePerIsolateThreadData(thread_id);
}
@ -373,26 +374,25 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
ScopedLock lock(process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
GlobalState* const global = global_state.Pointer();
ScopedLock lock(global->mutex);
per_thread = global->thread_data_table->Lookup(this, thread_id);
}
return per_thread;
}
bool Isolate::IsDefaultIsolate() const {
return this == global_state.Get().default_isolate;
}
void Isolate::EnsureDefaultIsolate() {
ScopedLock lock(process_wide_mutex_);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
thread_data_table_ = new Isolate::ThreadDataTable();
default_isolate_ = new Isolate();
}
GlobalState* const global = global_state.Pointer();
// Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
// becase a non-null thread data may be already set.
if (Thread::GetThreadLocal(isolate_key_) == NULL) {
Thread::SetThreadLocal(isolate_key_, default_isolate_);
// because a non-null thread data may be already set.
if (Thread::GetThreadLocal(global->isolate_key) == NULL) {
Thread::SetThreadLocal(global->isolate_key, global->default_isolate);
}
}
@ -400,32 +400,48 @@ void Isolate::EnsureDefaultIsolate() {
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* Isolate::GetDefaultIsolateDebugger() {
EnsureDefaultIsolate();
return default_isolate_->debugger();
return global_state.Pointer()->default_isolate->debugger();
}
#endif
StackGuard* Isolate::GetDefaultIsolateStackGuard() {
EnsureDefaultIsolate();
return default_isolate_->stack_guard();
return global_state.Pointer()->default_isolate->stack_guard();
}
Thread::LocalStorageKey Isolate::isolate_key() {
return global_state.Get().isolate_key;
}
Thread::LocalStorageKey Isolate::thread_id_key() {
return global_state.Get().thread_id_key;
}
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key() {
return global_state.Get().per_isolate_thread_data_key;
}
void Isolate::EnterDefaultIsolate() {
EnsureDefaultIsolate();
ASSERT(default_isolate_ != NULL);
Isolate* const default_isolate = global_state.Pointer()->default_isolate;
ASSERT(default_isolate != NULL);
PerIsolateThreadData* data = CurrentPerIsolateThreadData();
// If not yet in default isolate - enter it.
if (data == NULL || data->isolate() != default_isolate_) {
default_isolate_->Enter();
if (data == NULL || data->isolate() != default_isolate) {
default_isolate->Enter();
}
}
Isolate* Isolate::GetDefaultIsolateForLocking() {
EnsureDefaultIsolate();
return default_isolate_;
return global_state.Pointer()->default_isolate;
}
@ -1548,8 +1564,8 @@ void Isolate::TearDown() {
Deinit();
{ ScopedLock lock(process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
{ ScopedLock lock(global_state.Pointer()->mutex);
global_state.Pointer()->thread_data_table->RemoveAllThreads(this);
}
if (!IsDefaultIsolate()) {
@ -1602,8 +1618,9 @@ void Isolate::Deinit() {
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
Thread::SetThreadLocal(isolate_key_, isolate);
Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
const GlobalState& global = global_state.Get();
Thread::SetThreadLocal(global.isolate_key, isolate);
Thread::SetThreadLocal(global.per_isolate_thread_data_key, data);
}

40
deps/v8/src/isolate.h

@ -430,19 +430,25 @@ class Isolate {
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
Thread::GetThreadLocal(per_isolate_thread_data_key_));
Thread::GetThreadLocal(per_isolate_thread_data_key()));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
const Thread::LocalStorageKey key = isolate_key();
Isolate* isolate = reinterpret_cast<Isolate*>(
Thread::GetExistingThreadLocal(isolate_key_));
Thread::GetExistingThreadLocal(key));
if (!isolate) {
EnsureDefaultIsolate();
isolate = reinterpret_cast<Isolate*>(
Thread::GetExistingThreadLocal(key));
}
ASSERT(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key()));
}
// Usually called by Init(), but can be called early e.g. to allow
@ -464,7 +470,7 @@ class Isolate {
// for legacy API reasons.
void TearDown();
bool IsDefaultIsolate() const { return this == default_isolate_; }
bool IsDefaultIsolate() const;
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
@ -489,14 +495,12 @@ class Isolate {
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
static Thread::LocalStorageKey isolate_key() {
return isolate_key_;
}
static Thread::LocalStorageKey isolate_key();
// Returns the key used to store process-wide thread IDs.
static Thread::LocalStorageKey thread_id_key() {
return thread_id_key_;
}
static Thread::LocalStorageKey thread_id_key();
static Thread::LocalStorageKey per_isolate_thread_data_key();
// If a client attempts to create a Locker without specifying an isolate,
// we assume that the client is using legacy behavior. Set up the current
@ -925,6 +929,7 @@ class Isolate {
}
#endif
inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
#ifdef DEBUG
@ -1032,6 +1037,9 @@ class Isolate {
private:
Isolate();
friend struct GlobalState;
friend struct InitializeGlobalState;
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
@ -1074,16 +1082,6 @@ class Isolate {
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
static Mutex* process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
static Thread::LocalStorageKey thread_id_key_;
static Isolate* default_isolate_;
static ThreadDataTable* thread_data_table_;
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
@ -1105,7 +1103,7 @@ class Isolate {
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
// PreInits and returns a default isolate. Needed when a new thread tries
// PreInits and returns a default isolate. Needed when a new thread tries
// to create a Locker for the first time (the lock itself is in the isolate).
static Isolate* GetDefaultIsolateForLocking();

28
deps/v8/src/jsregexp.cc

@ -1444,7 +1444,7 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
if (ascii) {
char_mask = String::kMaxAsciiCharCode;
} else {
char_mask = String::kMaxUC16CharCode;
char_mask = String::kMaxUtf16CodeUnit;
}
uc16 exor = c1 ^ c2;
// Check whether exor has only one bit set.
@ -1546,7 +1546,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
if (ascii) {
max_char = String::kMaxAsciiCharCode;
} else {
max_char = String::kMaxUC16CharCode;
max_char = String::kMaxUtf16CodeUnit;
}
Label success;
@ -1642,7 +1642,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->CheckCharacterLT(from, on_failure);
}
}
if (to != String::kMaxUC16CharCode) {
if (to != String::kMaxUtf16CodeUnit) {
if (cc->is_negated()) {
macro_assembler->CheckCharacterLT(to + 1, on_failure);
} else {
@ -1835,7 +1835,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
if (asc) {
char_mask = String::kMaxAsciiCharCode;
} else {
char_mask = String::kMaxUC16CharCode;
char_mask = String::kMaxUtf16CodeUnit;
}
mask_ = 0;
value_ = 0;
@ -1887,7 +1887,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if (compiler->ascii()) {
char_mask = String::kMaxAsciiCharCode;
} else {
char_mask = String::kMaxUC16CharCode;
char_mask = String::kMaxUtf16CodeUnit;
}
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
@ -1939,7 +1939,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
if (compiler->ascii()) {
char_mask = String::kMaxAsciiCharCode;
} else {
char_mask = String::kMaxUC16CharCode;
char_mask = String::kMaxUtf16CodeUnit;
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
@ -4079,7 +4079,7 @@ static void AddClassNegated(const uc16 *elmv,
int elmc,
ZoneList<CharacterRange>* ranges) {
ASSERT(elmv[0] != 0x0000);
ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
ASSERT(last <= elmv[i] - 1);
@ -4087,7 +4087,7 @@ static void AddClassNegated(const uc16 *elmv,
ranges->Add(CharacterRange(last, elmv[i] - 1));
last = elmv[i + 1] + 1;
}
ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
}
@ -4633,8 +4633,8 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
from = range.to();
i++;
}
if (from < String::kMaxUC16CharCode) {
negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
if (from < String::kMaxUtf16CodeUnit) {
negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
}
}
@ -4797,7 +4797,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value) {
entry->AddValue(value);
// Bail out if the last interval ended at 0xFFFF since otherwise
// adding 1 will wrap around to 0.
if (entry->to() == String::kMaxUC16CharCode)
if (entry->to() == String::kMaxUtf16CodeUnit)
break;
ASSERT(entry->to() + 1 > current.from());
current.set_from(entry->to() + 1);
@ -5117,7 +5117,7 @@ int TextNode::ComputeFirstCharacterSet(int budget) {
int new_length = length + 1;
if (length > 0) {
if (ranges->at(0).from() == 0) new_length--;
if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
if (ranges->at(length - 1).to() == String::kMaxUtf16CodeUnit) {
new_length--;
}
}
@ -5207,14 +5207,14 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
if (last < range.from())
AddRange(CharacterRange(last, range.from() - 1));
if (range.to() >= last) {
if (range.to() == String::kMaxUC16CharCode) {
if (range.to() == String::kMaxUtf16CodeUnit) {
return;
} else {
last = range.to() + 1;
}
}
}
AddRange(CharacterRange(last, String::kMaxUC16CharCode));
AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
}

216
deps/v8/src/lazy-instance.h

@ -0,0 +1,216 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The LazyInstance<Type, Traits> class manages a single instance of Type,
// which will be lazily created on the first time it's accessed. This class is
// useful for places you would normally use a function-level static, but you
// need to have guaranteed thread-safety. The Type constructor will only ever
// be called once, even if two threads are racing to create the object. Get()
// and Pointer() will always return the same, completely initialized instance.
//
// LazyInstance is completely thread safe, assuming that you create it safely.
// The class was designed to be POD initialized, so it shouldn't require a
// static constructor. It really only makes sense to declare a LazyInstance as
// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
//
// LazyInstance is similar to Singleton, except it does not have the singleton
// property. You can have multiple LazyInstance's of the same type, and each
// will manage a unique instance. It also preallocates the space for Type, as
// to avoid allocating the Type instance on the heap. This may help with the
// performance of creating the instance, and reducing heap fragmentation. This
// requires that Type be a complete type so we can determine the size. See
// notes for advanced users below for more explanations.
//
// Example usage:
// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
// void SomeMethod() {
// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
//
// MyClass* ptr = my_instance.Pointer();
// ptr->DoDoDo(); // MyClass::DoDoDo
// }
//
// Additionally you can override the way your instance is constructed by
// providing your own trait:
// Example usage:
// struct MyCreateTrait {
// static void Construct(MyClass* allocated_ptr) {
// new (allocated_ptr) MyClass(/* extra parameters... */);
// }
// };
// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
// LAZY_INSTANCE_INITIALIZER;
//
// Notes for advanced users:
// LazyInstance can actually be used in two different ways:
//
// - "Static mode" which is the default mode since it is the most efficient
// (no extra heap allocation). In this mode, the instance is statically
// allocated (stored in the global data section at compile time).
// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
// must be used to initialize static lazy instances.
//
// - "Dynamic mode". In this mode, the instance is dynamically allocated and
// constructed (using new) by default. This mode is useful if you have to
// deal with some code already allocating the instance for you (e.g.
// OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
// dynamic lazy instances.
#ifndef V8_LAZY_INSTANCE_H_
#define V8_LAZY_INSTANCE_H_
#include "once.h"
namespace v8 {
namespace internal {
#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
// Default to static mode.
#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
template <typename T>
struct LeakyInstanceTrait {
static void Destroy(T* /* instance */) {}
};
// Traits that define how an instance is allocated and accessed.
template <typename T>
struct StaticallyAllocatedInstanceTrait {
typedef char StorageType[sizeof(T)];
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
}
template <typename ConstructTrait>
static void InitStorageUsingTrait(StorageType* storage) {
ConstructTrait::Construct(MutableInstance(storage));
}
};
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
typedef T* StorageType;
static T* MutableInstance(StorageType* storage) {
return *storage;
}
template <typename CreateTrait>
static void InitStorageUsingTrait(StorageType* storage) {
*storage = CreateTrait::Create();
}
};
template <typename T>
struct DefaultConstructTrait {
// Constructs the provided object which was already allocated.
static void Construct(T* allocated_ptr) {
new(allocated_ptr) T();
}
};
template <typename T>
struct DefaultCreateTrait {
static T* Create() {
return new T();
}
};
// TODO(pliard): Handle instances destruction (using global destructors).
template <typename T, typename AllocationTrait, typename CreateTrait,
typename DestroyTrait /* not used yet. */ >
struct LazyInstanceImpl {
public:
typedef typename AllocationTrait::StorageType StorageType;
private:
static void InitInstance(StorageType* storage) {
AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
}
void Init() const {
CallOnce(&once_, &InitInstance, &storage_);
}
public:
T* Pointer() {
Init();
return AllocationTrait::MutableInstance(&storage_);
}
const T& Get() const {
Init();
return *AllocationTrait::MutableInstance(&storage_);
}
mutable OnceType once_;
// Note that the previous field, OnceType, is an AtomicWord which guarantees
// the correct alignment of the storage field below.
mutable StorageType storage_;
};
template <typename T,
typename CreateTrait = DefaultConstructTrait<T>,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyStaticInstance {
typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, CreateTrait,
DestroyTrait> type;
};
template <typename T,
typename CreateTrait = DefaultConstructTrait<T>,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyInstance {
// A LazyInstance is a LazyStaticInstance.
typedef typename LazyStaticInstance<T, CreateTrait, DestroyTrait>::type type;
};
template <typename T,
typename CreateTrait = DefaultConstructTrait<T>,
typename DestroyTrait = LeakyInstanceTrait<T> >
struct LazyDynamicInstance {
typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, CreateTrait,
DestroyTrait> type;
};
} } // namespace v8::internal
#endif // V8_LAZY_INSTANCE_H_

23
deps/v8/src/lithium-allocator.cc

@ -46,29 +46,6 @@
namespace v8 {
namespace internal {
#define DEFINE_OPERAND_CACHE(name, type) \
name name::cache[name::kNumCachedOperands]; \
void name::SetUpCache() { \
for (int i = 0; i < kNumCachedOperands; i++) { \
cache[i].ConvertTo(type, i); \
} \
} \
static bool name##_initialize() { \
name::SetUpCache(); \
return true; \
} \
static bool name##_cache_initialized = name##_initialize();
DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
DEFINE_OPERAND_CACHE(LRegister, REGISTER)
DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
#undef DEFINE_OPERAND_CACHE
static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
return a.Value() < b.Value() ? a : b;
}

25
deps/v8/src/lithium.cc

@ -94,6 +94,31 @@ void LOperand::PrintTo(StringStream* stream) {
}
}
#define DEFINE_OPERAND_CACHE(name, type) \
name* name::cache = NULL; \
void name::SetUpCache() { \
if (cache) return; \
cache = new name[kNumCachedOperands]; \
for (int i = 0; i < kNumCachedOperands; i++) { \
cache[i].ConvertTo(type, i); \
} \
} \
DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
DEFINE_OPERAND_CACHE(LRegister, REGISTER)
DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
#undef DEFINE_OPERAND_CACHE
void LOperand::SetUpCaches() {
LConstantOperand::SetUpCache();
LStackSlot::SetUpCache();
LDoubleStackSlot::SetUpCache();
LRegister::SetUpCache();
LDoubleRegister::SetUpCache();
}
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {

14
deps/v8/src/lithium.h

@ -69,6 +69,10 @@ class LOperand: public ZoneObject {
ASSERT(this->index() == index);
}
// Calls SetUpCache() for each subclass. Don't forget to update this method
// if you add a new LOperand subclass.
static void SetUpCaches();
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
@ -264,7 +268,7 @@ class LConstantOperand: public LOperand {
private:
static const int kNumCachedOperands = 128;
static LConstantOperand cache[];
static LConstantOperand* cache;
LConstantOperand() : LOperand() { }
explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
@ -299,7 +303,7 @@ class LStackSlot: public LOperand {
private:
static const int kNumCachedOperands = 128;
static LStackSlot cache[];
static LStackSlot* cache;
LStackSlot() : LOperand() { }
explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
@ -323,7 +327,7 @@ class LDoubleStackSlot: public LOperand {
private:
static const int kNumCachedOperands = 128;
static LDoubleStackSlot cache[];
static LDoubleStackSlot* cache;
LDoubleStackSlot() : LOperand() { }
explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
@ -347,7 +351,7 @@ class LRegister: public LOperand {
private:
static const int kNumCachedOperands = 16;
static LRegister cache[];
static LRegister* cache;
LRegister() : LOperand() { }
explicit LRegister(int index) : LOperand(REGISTER, index) { }
@ -371,7 +375,7 @@ class LDoubleRegister: public LOperand {
private:
static const int kNumCachedOperands = 16;
static LDoubleRegister cache[];
static LDoubleRegister* cache;
LDoubleRegister() : LOperand() { }
explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }

26
deps/v8/src/log.cc

@ -35,6 +35,7 @@
#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
#include "platform.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "string-stream.h"
@ -461,18 +462,20 @@ class Logger::NameBuffer {
utf8_pos_ += utf8_length;
return;
}
int uc16_length = Min(str->length(), kUc16BufferSize);
String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
int uc16_length = Min(str->length(), kUtf16BufferSize);
String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
uc16 c = uc16_buffer_[i];
uc16 c = utf16_buffer[i];
if (c <= String::kMaxAsciiCharCodeU) {
utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
} else {
int char_length = unibrow::Utf8::Length(c);
int char_length = unibrow::Utf8::Length(c, previous);
if (utf8_pos_ + char_length > kUtf8BufferSize) break;
unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
utf8_pos_ += char_length;
}
previous = c;
}
}
@ -504,11 +507,11 @@ class Logger::NameBuffer {
private:
static const int kUtf8BufferSize = 512;
static const int kUc16BufferSize = 128;
static const int kUtf16BufferSize = 128;
int utf8_pos_;
char utf8_buffer_[kUtf8BufferSize];
uc16 uc16_buffer_[kUc16BufferSize];
uc16 utf16_buffer[kUtf16BufferSize];
};
@ -1726,13 +1729,14 @@ void Logger::EnableSlidingStateWindow() {
}
}
// Protects the state below.
static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
ScopedLock lock(mutex_);
ScopedLock lock(active_samplers_mutex.Pointer());
for (int i = 0;
ActiveSamplersExist() && i < active_samplers_->length();
++i) {
@ -1759,7 +1763,7 @@ SamplerRegistry::State SamplerRegistry::GetState() {
void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
ScopedLock lock(mutex_);
ScopedLock lock(active_samplers_mutex.Pointer());
if (active_samplers_ == NULL) {
active_samplers_ = new List<Sampler*>;
} else {
@ -1771,7 +1775,7 @@ void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
ScopedLock lock(mutex_);
ScopedLock lock(active_samplers_mutex.Pointer());
ASSERT(active_samplers_ != NULL);
bool removed = active_samplers_->RemoveElement(sampler);
ASSERT(removed);

1
deps/v8/src/log.h

@ -454,7 +454,6 @@ class SamplerRegistry : public AllStatic {
return active_samplers_ != NULL && !active_samplers_->is_empty();
}
static Mutex* mutex_; // Protects the state below.
static List<Sampler*>* active_samplers_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);

9
deps/v8/src/mark-compact-inl.h

@ -52,13 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) {
}
void MarkCompactCollector::ClearCacheOnMap(Map* map) {
if (FLAG_cleanup_code_caches_at_gc) {
map->ClearCodeCache(heap());
}
}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@ -88,7 +81,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
if (obj->IsMap()) {
ClearCacheOnMap(Map::cast(obj));
heap_->ClearCacheOnMap(Map::cast(obj));
}
}

6
deps/v8/src/mark-compact.cc

@ -1049,7 +1049,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_state() == MEGAMORPHIC ||
heap->mark_compact_collector()->flush_monomorphic_ics_)) {
heap->mark_compact_collector()->flush_monomorphic_ics_ ||
target->ic_age() != heap->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
@ -1797,7 +1798,7 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
ClearCacheOnMap(map);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
@ -3427,7 +3428,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ResetLiveBytes();
space->ReleasePage(p);
}

3
deps/v8/src/mark-compact.h

@ -638,9 +638,6 @@ class MarkCompactCollector {
// Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
// Clears the cache of ICs related to this map.
INLINE(void ClearCacheOnMap(Map* map));
void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in

28
deps/v8/src/mips/assembler-mips-inl.h

@ -117,13 +117,31 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(pc_);
ASSERT(IsCodeTarget(rmode_) ||
rmode_ == RUNTIME_ENTRY ||
rmode_ == EMBEDDED_OBJECT ||
rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
return Assembler::kSpecialTargetSize;
}
@ -281,7 +299,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@ -307,7 +325,7 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&

23
deps/v8/src/mips/assembler-mips.cc

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
@ -850,7 +850,6 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
return rmode != RelocInfo::NONE;
}
void Assembler::GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
@ -1319,7 +1318,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@ -1329,7 +1328,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@ -1604,7 +1603,7 @@ void Assembler::clz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@ -1612,7 +1611,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
@ -1772,25 +1771,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@ -1831,7 +1830,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@ -1847,7 +1846,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2);
ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}

14
deps/v8/src/mips/assembler-mips.h

@ -553,10 +553,13 @@ class Assembler : public AssemblerBase {
static void JumpLabelToJumpRegister(Address pc);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code.
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
Address instruction_payload, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
target);
}
// This sets the branch destination.
@ -578,8 +581,7 @@ class Assembler : public AssemblerBase {
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
static const int kCallTargetSize = 0 * kInstrSize;
static const int kExternalTargetSize = 0 * kInstrSize;
static const int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit constant.
// Before jump-optimizations, this constant was used in

8
deps/v8/src/mips/builtins-mips.cc

@ -67,9 +67,11 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects a0 to contain the number of arguments
// JumpToExternalReference expects s0 to contain the number of arguments
// including the receiver and the extra arguments.
__ Addu(a0, a0, Operand(num_extra_args + 1));
__ Addu(s0, a0, num_extra_args + 1);
__ sll(s1, s0, kPointerSizeLog2);
__ Subu(s1, s1, kPointerSize);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@ -1095,8 +1097,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ Push(a1, a2);

323
deps/v8/src/mips/code-stubs-mips.cc

@ -70,13 +70,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in a0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(a0, &check_heap_number);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ Ret();
__ bind(&check_heap_number);
EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ Ret();
__ bind(&call_builtin);
__ push(a0);
@ -128,9 +128,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// found in the shared function info object.
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
// Create a new closure through the slower runtime call.
@ -179,8 +179,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Remove the on-stack argument and return.
__ mov(cp, v0);
__ Pop();
__ Ret();
__ DropAndRet(1);
// Need to collect. Call into runtime system.
__ bind(&gc);
@ -242,8 +241,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Remove the on-stack argument and return.
__ mov(cp, v0);
__ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ DropAndRet(2);
// Need to collect. Call into runtime system.
__ bind(&gc);
@ -368,8 +366,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
// Return and remove the on-stack parameters.
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ DropAndRet(3);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
@ -405,16 +402,14 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// Allocate the JS object and copy header together with all in-object
// properties from the boilerplate.
__ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
__ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
for (int i = 0; i < size; i += kPointerSize) {
__ lw(a1, FieldMemOperand(a3, i));
__ sw(a1, FieldMemOperand(a0, i));
__ sw(a1, FieldMemOperand(v0, i));
}
// Return and remove the on-stack parameters.
__ Drop(4);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ DropAndRet(4);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
@ -478,7 +473,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ And(exponent, source_, Operand(HeapNumber::kSignMask));
// Subtract from 0 if source was negative.
__ subu(at, zero_reg, source_);
__ movn(source_, at, exponent);
__ Movn(source_, at, exponent);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@ -490,15 +485,15 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
// Safe to use 'at' as dest reg here.
__ Or(at, exponent, Operand(exponent_word_for_1));
__ movn(exponent, at, source_); // Write exp when source not 0.
__ Movn(exponent, at, source_); // Write exp when source not 0.
// 1, 0 and -1 all have 0 for the second word.
__ Ret(USE_DELAY_SLOT);
__ mov(mantissa, zero_reg);
__ Ret();
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
__ clz(zeros_, source_);
__ Clz(zeros_, source_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here.
__ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
@ -514,9 +509,9 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
// And the top (top 20 bits).
__ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
__ or_(exponent, exponent, source_);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ or_(exponent, exponent, source_);
}
@ -721,7 +716,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
// Get mantissa[51:20].
// Get the position of the first set bit.
__ clz(dst1, int_scratch);
__ Clz(dst1, int_scratch);
__ li(scratch2, 31);
__ Subu(dst1, scratch2, dst1);
@ -1025,9 +1020,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
}
// Place heap_number_result in v0 and return to the pushed return address.
__ mov(v0, heap_number_result);
__ pop(ra);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ mov(v0, heap_number_result);
}
@ -1079,7 +1074,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ or_(scratch_, scratch_, sign_);
// Subtract from 0 if the value was negative.
__ subu(at, zero_reg, the_int_);
__ movn(the_int_, at, sign_);
__ Movn(the_int_, at, sign_);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
@ -1163,6 +1158,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ bind(&return_equal);
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
@ -1234,8 +1230,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal (lhs is already not zero).
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ mov(v0, lhs);
__ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@ -1273,8 +1269,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal.
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ li(v0, Operand(1));
__ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@ -1354,12 +1350,13 @@ void EmitNanCheck(MacroAssembler* masm, Condition cc) {
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
}
__ Ret(); // Return.
__ Ret();
__ bind(&neither_is_nan);
}
@ -1408,6 +1405,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
__ bind(&return_result_equal);
__ li(v0, Operand(EQUAL));
__ Ret();
}
@ -1439,6 +1437,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ BranchF(&less_than, NULL, lt, f12, f14);
// Not equal, not less, not NaN, must be greater.
__ li(v0, Operand(GREATER));
__ Ret();
@ -1469,8 +1468,8 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Return non-zero.
Label return_not_equal;
__ bind(&return_not_equal);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1));
__ Ret();
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
@ -1549,8 +1548,8 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ Ret();
__ bind(&object_test);
__ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
@ -1565,8 +1564,8 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
__ and_(a0, a2, a3);
__ And(a0, a0, Operand(1 << Map::kIsUndetectable));
__ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ xori(v0, a0, 1 << Map::kIsUndetectable);
}
@ -1673,8 +1672,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
__ Addu(sp, sp, Operand(1 * kPointerSize));
__ Ret();
__ DropAndRet(1);
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
@ -1696,8 +1694,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(a2, &not_two_smis);
__ sra(a1, a1, 1);
__ sra(a0, a0, 1);
__ Subu(v0, a1, a0);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a1, a0);
__ bind(&not_two_smis);
} else if (FLAG_debug_code) {
__ Or(a2, a1, a0);
@ -1750,15 +1748,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
__ c(OLT, D, f12, f14);
__ movt(v0, t0);
__ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ movf(v0, t1);
__ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0.
__ c(EQ, D, f12, f14);
__ movt(v0, t2);
__ Movt(v0, t2);
__ Ret();
@ -1899,7 +1897,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
// Undetectable -> false.
__ movn(tos_, zero_reg, at);
__ Movn(tos_, zero_reg, at);
__ Ret(ne, at, Operand(zero_reg));
}
}
@ -1916,8 +1914,8 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
Label skip;
__ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
__ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
__ Ret(); // the string length is OK as the return value
__ bind(&skip);
}
@ -1955,7 +1953,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
// The value of a root is never NULL, so we can avoid loading a non-null
// value into tos_ when we want to return 'true'.
if (!result) {
__ movz(tos_, zero_reg, at);
__ Movz(tos_, zero_reg, at);
}
__ Ret(eq, at, Operand(zero_reg));
}
@ -2092,8 +2090,8 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
__ Branch(slow, eq, t0, Operand(zero_reg));
// Return '0 - value'.
__ Subu(v0, zero_reg, a0);
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ subu(v0, zero_reg, a0);
}
@ -2423,8 +2421,8 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
// Negating it results in 'lt'.
__ Branch(&skip, lt, scratch2, Operand(zero_reg));
ASSERT(Smi::FromInt(0) == 0);
__ mov(v0, zero_reg);
__ Ret(); // Return smi 0 if the non-zero one was positive.
__ Ret(USE_DELAY_SLOT);
__ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
__ bind(&skip);
// We fall through here if we multiplied a negative number with 0, because
// that would mean we should produce -0.
@ -2479,23 +2477,23 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
break;
case Token::BIT_OR:
__ Or(v0, left, Operand(right));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ or_(v0, left, right);
break;
case Token::BIT_AND:
__ And(v0, left, Operand(right));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ and_(v0, left, right);
break;
case Token::BIT_XOR:
__ Xor(v0, left, Operand(right));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ xor_(v0, left, right);
break;
case Token::SAR:
// Remove tags from right operand.
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(scratch1, left, scratch1);
// Smi tag result.
__ And(v0, scratch1, Operand(~kSmiTagMask));
__ And(v0, scratch1, ~kSmiTagMask);
__ Ret();
break;
case Token::SHR:
@ -2607,8 +2605,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// kValueOffset. On MIPS this workaround is built into sdc1 so
// there's no point in generating even more instructions.
__ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
__ Ret();
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
@ -3482,8 +3480,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
__ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, cache_entry);
__ Ret();
__ bind(&invalid_cache);
// The cache is invalid. Call runtime which will recreate the
@ -3662,7 +3660,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ne,
double_exponent,
double_scratch);
// double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@ -3682,7 +3680,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ne,
double_exponent,
double_scratch);
// double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@ -3866,9 +3864,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ sw(a1, MemOperand(a0));
}
// Prepare arguments for C routine: a0 = argc, a1 = argv
// Prepare arguments for C routine.
// a0 = argc
__ mov(a0, s0);
__ mov(a1, s1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
@ -3888,30 +3887,28 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// coverage code can interfere with the proper calculation of ra.
Label find_ra;
masm->bal(&find_ra); // bal exposes branch delay slot.
masm->nop(); // Branch delay slot nop.
masm->mov(a1, s1);
masm->bind(&find_ra);
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
const int kNumInstructionsToJump = 6;
const int kNumInstructionsToJump = 5;
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
masm->Subu(sp, sp, kCArgsSlotsSize);
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
masm->jalr(t9);
masm->nop(); // Branch delay slot nop.
// Set up sp in the delay slot.
masm->addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
ASSERT_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
// Restore stack (remove arg slots).
__ Addu(sp, sp, kCArgsSlotsSize);
if (always_allocate) {
// It's okay to clobber a2 and a3 here. v0 & v1 contain result.
__ li(a2, Operand(scope_depth));
@ -3925,14 +3922,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ addiu(a2, v0, 1);
__ andi(t0, a2, kFailureTagMask);
__ Branch(&failure_returned, eq, t0, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
// Restore stack (remove arg slots) in branch delay slot.
__ addiu(sp, sp, kCArgsSlotsSize);
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
__ LeaveExitFrame(save_doubles_, s0);
__ Ret();
__ LeaveExitFrame(save_doubles_, s0, true);
// Check if we should retry or throw exception.
Label retry;
@ -3943,11 +3942,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
Failure* out_of_memory = Failure::OutOfMemoryException();
__ Branch(throw_out_of_memory_exception, eq,
v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
__ Branch(USE_DELAY_SLOT,
throw_out_of_memory_exception,
eq,
v0,
Operand(reinterpret_cast<int32_t>(out_of_memory)));
// If we throw the OOM exception, the value of a3 doesn't matter.
// Any instruction can be in the delay slot that's not a jump.
// Retrieve the pending exception and clear the variable.
__ li(a3, Operand(isolate->factory()->the_hole_value()));
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
@ -3955,8 +3959,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ Branch(throw_termination_exception, eq,
v0, Operand(isolate->factory()->termination_exception()));
__ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
__ Branch(throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
__ jmp(throw_normal_exception);
@ -3968,8 +3972,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// a0: number of arguments including receiver
// a1: pointer to builtin function
// s0: number of arguments including receiver
// s1: size of arguments excluding receiver
// s2: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
@ -3979,19 +3984,18 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
// NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
// The reason for this is that these arguments would need to be saved anyway
// so it's faster to set them up directly.
// See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
// Compute the argv pointer in a callee-saved register.
__ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
__ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Set up argc and the builtin function in callee-saved registers.
__ mov(s0, a0);
__ mov(s2, a1);
// s0: number of arguments (C callee-saved)
// s1: pointer to first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
@ -4083,6 +4087,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
}
__ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
// We build an EntryFrame.
@ -4155,7 +4160,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
__ li(t1, Operand(isolate->factory()->the_hole_value()));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(t1, MemOperand(t0));
@ -4199,7 +4204,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
__ Branch(&non_outermost_js_2, ne, t1,
__ Branch(&non_outermost_js_2,
ne,
t1,
Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
@ -4364,8 +4371,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
// Null is not instance of anything.
__ Branch(&object_not_null, ne, scratch,
Operand(masm->isolate()->factory()->null_value()));
__ Branch(&object_not_null,
ne,
scratch,
Operand(masm->isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@ -4470,8 +4479,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
__ Branch(&runtime, ne,
a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ Branch(&runtime,
ne,
a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
@ -4503,7 +4514,9 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label adaptor_frame, try_allocate;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
__ Branch(&adaptor_frame, eq, a2,
__ Branch(&adaptor_frame,
eq,
a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
@ -4693,8 +4706,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&arguments_loop, lt, t5, Operand(a2));
// Return and remove the on-stack parameters.
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
// a2 = argument count (tagged)
@ -4799,8 +4811,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Return and remove the on-stack parameters.
__ bind(&done);
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
@ -5008,7 +5019,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
@ -5116,14 +5127,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ Branch(&success, eq,
v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Label failure;
__ Branch(&failure, eq,
v0, Operand(NativeRegExpMacroAssembler::FAILURE));
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
__ Branch(&runtime, ne,
v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
__ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
@ -5149,8 +5157,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
__ li(v0, Operand(isolate->factory()->null_value()));
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
__ DropAndRet(4);
// Process the result from the native regexp code.
__ bind(&success);
@ -5211,14 +5218,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sll(a3, a3, kSmiTagSize); // Convert to Smi.
__ sw(a3, MemOperand(a0, 0));
__ Branch(&next_capture, USE_DELAY_SLOT);
__ addiu(a0, a0, kPointerSize); // In branch delay slot.
__ addiu(a0, a0, kPointerSize); // In branch delay slot.
__ bind(&done);
// Return last match info.
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
__ DropAndRet(4);
// External string. Short external strings have already been ruled out.
// a0: scratch
@ -5330,8 +5336,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ addiu(a3, a3, kPointerSize); // In branch delay slot.
__ bind(&done);
__ Addu(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ DropAndRet(3);
__ bind(&slowcase);
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
@ -5879,10 +5884,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
__ Or(c1, c1, scratch1);
__ bind(&tmp);
__ Branch(not_found,
Uless_equal,
scratch,
Operand(static_cast<int>('9' - '0')));
__ Branch(
not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
__ bind(&not_array_index);
// Calculate the two character string hash.
@ -6037,7 +6040,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
__ ori(at, zero_reg, StringHasher::kZeroHash);
__ movz(hash, at, hash);
__ Movz(hash, at, hash);
}
@ -6136,7 +6139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ And(t0, a1, Operand(kIsIndirectStringMask));
__ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
// t0 is used as a scratch register and can be overwritten in either case.
__ And(t0, a1, Operand(kSlicedNotConsMask));
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
@ -6327,7 +6330,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Subu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1);
__ movn(scratch1, scratch2, scratch4);
__ Movn(scratch1, scratch2, scratch4);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
@ -6409,8 +6412,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
__ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ DropAndRet(2);
__ bind(&not_same);
@ -6485,7 +6487,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
__ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
__ mov(v0, a0); // Assume we'll return first string (from a0).
__ movz(v0, a1, a2); // If first is empty, return second (from a1).
__ Movz(v0, a1, a2); // If first is empty, return second (from a1).
__ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
__ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
__ and_(t4, t4, t5); // Branch if both strings were non-empty.
@ -6553,8 +6555,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
__ Branch(&string_add_flat_result, lt, t2,
Operand(ConsString::kMinLength));
__ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
@ -6815,16 +6816,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER)); // In delay slot.
__ li(v0, Operand(GREATER));
__ Ret();
__ bind(&fpu_eq);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(EQUAL)); // In delay slot.
__ li(v0, Operand(EQUAL));
__ Ret();
__ bind(&fpu_lt);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(LESS)); // In delay slot.
__ li(v0, Operand(LESS));
__ Ret();
}
__ bind(&unordered);
@ -6895,6 +6896,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
// Registers containing left and right operands respectively.
Register left = a1;
Register right = a0;
@ -6922,41 +6925,52 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
Label left_ne_right;
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
__ Branch(&left_ne_right, ne, left, Operand(right));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, zero_reg); // In the delay slot.
__ Ret();
__ bind(&left_ne_right);
// Handle not identical strings.
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kSymbolTag != 0);
__ And(tmp3, tmp1, Operand(tmp2));
__ And(tmp5, tmp3, Operand(kIsSymbolMask));
Label is_symbol;
__ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
__ mov(v0, a0); // In the delay slot.
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
__ Ret();
__ bind(&is_symbol);
if (equality) {
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kSymbolTag != 0);
__ And(tmp3, tmp1, Operand(tmp2));
__ And(tmp5, tmp3, Operand(kIsSymbolMask));
Label is_symbol;
__ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // In the delay slot.
__ bind(&is_symbol);
}
// Check that both strings are sequential ASCII.
Label runtime;
__ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
&runtime);
__ JumpIfBothInstanceTypesAreNotSequentialAscii(
tmp1, tmp2, tmp3, tmp4, &runtime);
// Compare flat ASCII strings. Returns when done.
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2, tmp3);
if (equality) {
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2, tmp3);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(
masm, left, right, tmp1, tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
GenerateMiss(masm);
@ -6975,8 +6989,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
ASSERT(GetCondition() == eq);
__ Subu(v0, a0, Operand(a1));
__ Ret();
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
__ bind(&miss);
GenerateMiss(masm);
@ -7009,8 +7023,9 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(ra);
__ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ push(t0);
__ CallExternalReference(miss, 3);
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
__ sw(t0, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@ -7067,8 +7082,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
// Push return address (accessible to GC through exit frame pc).
// This spot for ra was reserved in EnterExitFrame.
masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET), true);
masm->li(ra,
Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET),
CONSTANT_SIZE);
// Call the function.
masm->Jump(t9);
// Make sure the stored 'ra' points to this position.
@ -7320,17 +7337,17 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
__ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
__ Ret();
}
__ bind(&in_dictionary);
__ Ret(USE_DELAY_SLOT);
__ li(result, 1);
__ Ret();
__ bind(&not_in_dictionary);
__ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
__ Ret();
}
@ -7664,7 +7681,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
&slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);

7
deps/v8/src/mips/codegen-mips.cc

@ -37,8 +37,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
@ -50,6 +49,10 @@ TranscendentalFunction CreateTranscendentalFunction(
}
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

15
deps/v8/src/mips/constants-mips.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -39,11 +39,20 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
kMips32r2,
kMips32r1,
kLoongson
};
#ifdef _MIPS_ARCH_MIPS32R2
#define mips32r2 1
static const ArchVariants kArchVariant = kMips32r2;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
#else
#define mips32r2 0
static const ArchVariants kArchVariant = kMips32r1;
#endif

4
deps/v8/src/mips/debug-mips.cc

@ -152,8 +152,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(a0, zero_reg); // No arguments.
__ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
__ PrepareCEntryArgs(0); // No arguments.
__ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);

8
deps/v8/src/mips/deoptimizer-mips.cc

@ -119,7 +119,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
const int kInstrSize = Assembler::kInstrSize;
// This structure comes from FullCodeGenerator::EmitStackCheck.
// The call of the stack guard check has the following form:
// sltu at, sp, t0
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
// lui t9, <stack guard address> upper
// ori t9, <stack guard address> lower
@ -167,7 +167,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->sltu(at, sp, t0);
if (FLAG_count_based_interrupts) {
patcher.masm()->slt(at, a3, zero_reg);
} else {
patcher.masm()->sltu(at, sp, t0);
}
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.

18
deps/v8/src/mips/disasm-mips.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -515,7 +515,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "cvt.w.d 'fd, 'fs");
break;
case CVT_L_D: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "cvt.l.d 'fd, 'fs");
} else {
Unknown(instr);
@ -526,7 +526,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "trunc.w.d 'fd, 'fs");
break;
case TRUNC_L_D: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "trunc.l.d 'fd, 'fs");
} else {
Unknown(instr);
@ -592,7 +592,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case L:
switch (instr->FunctionFieldRaw()) {
case CVT_D_L: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "cvt.d.l 'fd, 'fs");
} else {
Unknown(instr);
@ -600,7 +600,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
}
case CVT_S_L: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "cvt.s.l 'fd, 'fs");
} else {
Unknown(instr);
@ -636,7 +636,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
@ -653,7 +653,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
@ -770,7 +770,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
@ -778,7 +778,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
}
case EXT: {
if (mips32r2) {
if (kArchVariant == kMips32r2) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);

264
deps/v8/src/mips/full-codegen-mips.cc

@ -42,6 +42,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@ -119,8 +120,10 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
return 11 * Instruction::kInstrSize;
UNREACHABLE();
return 10 * Instruction::kInstrSize;
}
@ -142,32 +145,11 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
if (maybe_cell->To(&cell)) {
__ li(a2, Handle<JSGlobalPropertyCell>(cell));
__ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ Subu(a3, a3, Operand(Smi::FromInt(1)));
__ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
ASSERT_EQ(masm_->pc_offset(), self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -341,6 +323,34 @@ void FullCodeGenerator::ClearAccumulator() {
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ li(a2, Operand(profiling_counter_));
__ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ Subu(a3, a3, Operand(Smi::FromInt(delta)));
__ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
}
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = 10;
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
__ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
}
static const int kMaxBackEdgeWeight = 127;
static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@ -351,16 +361,35 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ sltu(at, sp, t0);
__ beq(at, zero_reg, &ok);
// CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
StackCheckStub stub;
__ CallStub(&stub);
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
// CallStub will emit a li t9 first, so it is safe to use the delay slot.
InterruptStub stub;
__ CallStub(&stub);
} else {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ sltu(at, sp, t0);
__ beq(at, zero_reg, &ok);
// CallStub will emit a li t9 first, so it is safe to use the delay slot.
StackCheckStub stub;
__ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
if (FLAG_count_based_interrupts) {
EmitProfilingCounterReset();
}
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@ -383,6 +412,32 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
__ Branch(&ok, ge, a3, Operand(zero_reg));
__ push(v0);
if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
__ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ push(a2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
InterruptStub stub;
__ CallStub(&stub);
}
__ pop(v0);
EmitProfilingCounterReset();
__ bind(&ok);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@ -902,7 +957,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
@ -1195,7 +1250,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, mode);
CallIC(ic, mode);
}
@ -1251,7 +1306,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
} else { // LET || CONST_HARMONY
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
@ -1279,7 +1334,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
break;
}
@ -1343,7 +1398,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Uninitalized const bindings outside of harmony mode are unholed.
ASSERT(var->mode() == CONST);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
__ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
context()->Plug(v0);
break;
@ -1421,6 +1476,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void FullCodeGenerator::EmitAccessor(Expression* expression) {
if (expression == NULL) {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
} else {
VisitForStackValue(expression);
}
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@ -1456,6 +1521,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@ -1482,7 +1548,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1505,27 +1571,29 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
case ObjectLiteral::Property::SETTER:
// Duplicate receiver on stack.
__ lw(a0, MemOperand(sp));
__ push(a0);
VisitForStackValue(key);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
} else {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
VisitForStackValue(value);
}
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
accessor_table.lookup(key)->second->setter = value;
break;
}
}
// Emit code to define accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
__ lw(a0, MemOperand(sp)); // Duplicate receiver.
__ push(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
}
if (expr->has_function()) {
ASSERT(result_saved);
__ lw(a0, MemOperand(sp));
@ -1753,7 +1821,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@ -1762,7 +1830,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@ -1790,7 +1858,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -1873,7 +1941,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@ -1914,7 +1982,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic);
CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@ -1927,7 +1995,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic);
CallIC(ic);
break;
}
}
@ -1945,7 +2013,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@ -2064,7 +2132,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2116,7 +2184,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2151,6 +2219,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id) {
ic_total_count_++;
__ Call(code, rmode, ast_id);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@ -2168,7 +2244,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ Call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2201,7 +2277,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2600,7 +2676,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label entry, loop;
// The use of t2 to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
__ li(t2, Operand(FACTORY->value_of_symbol()));
__ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@ -2970,6 +3046,52 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label runtime, done;
Register object = v0;
Register result = v0;
Register scratch0 = t5;
Register scratch1 = a1;
#ifdef DEBUG
__ AbortIfSmi(object);
__ GetObjectType(object, scratch1, scratch1);
__ Assert(eq, "Trying to get date field from non-date.",
scratch1, Operand(JS_DATE_TYPE));
#endif
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch1, Operand(stamp));
__ lw(scratch1, MemOperand(scratch1));
__ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
__ Branch(&runtime, ne, scratch1, Operand(scratch0));
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch1);
__ li(a1, Operand(index));
__ Move(a0, object);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
context()->Plug(v0);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@ -3769,7 +3891,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ Call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -3925,7 +4047,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@ -4036,7 +4158,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -4069,7 +4191,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4087,7 +4209,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4113,7 +4235,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic);
CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@ -4291,7 +4413,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);

47
deps/v8/src/mips/ic-mips.cc

@ -512,8 +512,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ Push(a3, a2);
// Call the entry.
__ li(a0, Operand(2));
__ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
__ PrepareCEntryArgs(2);
__ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
CEntryStub stub(1);
__ CallStub(&stub);
@ -758,8 +758,6 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
Heap* heap = masm->isolate()->heap();
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@ -773,10 +771,12 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
__ CheckMap(scratch1,
scratch2,
Heap::kNonStrictArgumentsElementsMapRootIndex,
slow_case,
DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@ -788,7 +788,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
__ li(scratch3, Operand(kPointerSize >> 1));
__ mul(scratch3, key, scratch3);
__ Mul(scratch3, key, scratch3);
__ Addu(scratch3, scratch3, Operand(kOffset));
__ Addu(scratch2, scratch1, scratch3);
@ -801,7 +801,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
__ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ li(scratch3, Operand(kPointerSize >> 1));
__ mul(scratch3, scratch2, scratch3);
__ Mul(scratch3, scratch2, scratch3);
__ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
__ Addu(scratch2, scratch1, scratch3);
return MemOperand(scratch2);
@ -820,13 +820,15 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
__ CheckMap(backing_store,
scratch,
Heap::kFixedArrayMapRootIndex,
slow_case,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ li(scratch, Operand(kPointerSize >> 1));
__ mul(scratch, key, scratch);
__ Mul(scratch, key, scratch);
__ Addu(scratch,
scratch,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -844,8 +846,8 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
__ Ret(USE_DELAY_SLOT);
__ lw(v0, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in a2.
MemOperand unmapped_location =
@ -853,8 +855,8 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ lw(a2, unmapped_location);
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ Branch(&slow, eq, a2, Operand(a3));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
}
@ -1253,8 +1255,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(&check_if_double_array, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ Branch(
&check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@ -1262,8 +1265,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Branch(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ Branch(&slow, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@ -1285,8 +1287,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register scratch_value = t0;
Register address = t1;
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(&fast_double_with_map_check, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ Branch(&fast_double_with_map_check,
ne,
elements_map,
Heap::kFixedArrayMapRootIndex);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
@ -1323,8 +1327,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ Branch(&slow, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,

81
deps/v8/src/mips/lithium-codegen-mips.cc

@ -612,7 +612,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@ -635,13 +634,9 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&skip);
}
if (cc == al) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// TODO(plind): The Arm port is a little different here, due to their
// DeOpt jump table, which is not used for Mips yet.
__ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
}
// TODO(plind): The Arm port is a little different here, due to their
// DeOpt jump table, which is not used for Mips yet.
__ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
}
@ -1018,7 +1013,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Generate standard code.
__ li(at, constant);
__ mul(result, left, at);
__ Mul(result, left, at);
}
}
@ -1036,7 +1031,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
__ mul(result, left, right);
__ Mul(result, left, right);
}
if (bailout_on_minus_zero) {
@ -1261,6 +1256,46 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(a0));
ASSERT(result.is(v0));
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
#ifdef DEBUG
__ AbortIfSmi(object);
__ GetObjectType(object, scratch, scratch);
__ Assert(eq, "Trying to get date field from non-date.",
scratch, Operand(JS_DATE_TYPE));
#endif
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch, Operand(stamp));
__ lw(scratch, MemOperand(scratch));
__ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
__ Branch(&runtime, ne, scratch, Operand(scratch0()));
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ li(a1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
}
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@ -2042,7 +2077,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
__ li(result, Operand(factory()->the_hole_value()), true);
__ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
// The inlined call site cache did not match. Check null and string before
@ -2097,7 +2132,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ li(temp, Operand(delta * kPointerSize), true);
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
CallCodeGeneric(stub.GetCode(),
@ -2624,8 +2659,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
__ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
__ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
}
@ -2650,15 +2685,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(a0)); // Used for parameter count.
ASSERT(function.is(a1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(v0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@ -2699,6 +2729,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ lw(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(a0)); // Used for parameter count.
ASSERT(function.is(a1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(v0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.

15
deps/v8/src/mips/lithium-mips.cc

@ -1097,6 +1097,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), a1);
LOperand* receiver = UseFixed(instr->receiver(), a0);
@ -1604,6 +1612,13 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
}
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterAtStart(instr->index());
LOperand* length = UseRegister(instr->length());

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save