Browse Source

Upgrade V8 to 3.4.9

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
6054dcc130
  1. 1
      deps/v8/AUTHORS
  2. 61
      deps/v8/ChangeLog
  3. 39
      deps/v8/SConstruct
  4. 46
      deps/v8/include/v8.h
  5. 9
      deps/v8/samples/shell.cc
  6. 33
      deps/v8/src/api.cc
  7. 10
      deps/v8/src/arm/assembler-arm.h
  8. 3
      deps/v8/src/arm/builtins-arm.cc
  9. 26
      deps/v8/src/arm/code-stubs-arm.cc
  10. 16
      deps/v8/src/arm/code-stubs-arm.h
  11. 30
      deps/v8/src/arm/deoptimizer-arm.cc
  12. 201
      deps/v8/src/arm/full-codegen-arm.cc
  13. 3
      deps/v8/src/arm/ic-arm.cc
  14. 199
      deps/v8/src/arm/lithium-arm.cc
  15. 188
      deps/v8/src/arm/lithium-arm.h
  16. 236
      deps/v8/src/arm/lithium-codegen-arm.cc
  17. 115
      deps/v8/src/arm/macro-assembler-arm.cc
  18. 22
      deps/v8/src/arm/macro-assembler-arm.h
  19. 15
      deps/v8/src/array.js
  20. 14
      deps/v8/src/ast.h
  21. 49
      deps/v8/src/compilation-cache.cc
  22. 10
      deps/v8/src/compilation-cache.h
  23. 14
      deps/v8/src/compiler.cc
  24. 2
      deps/v8/src/contexts.h
  25. 21
      deps/v8/src/date.js
  26. 281
      deps/v8/src/dateparser-inl.h
  27. 42
      deps/v8/src/dateparser.cc
  28. 198
      deps/v8/src/dateparser.h
  29. 15
      deps/v8/src/debug-debugger.js
  30. 190
      deps/v8/src/deoptimizer.cc
  31. 96
      deps/v8/src/deoptimizer.h
  32. 3
      deps/v8/src/flag-definitions.h
  33. 37
      deps/v8/src/frames.cc
  34. 8
      deps/v8/src/frames.h
  35. 8
      deps/v8/src/full-codegen.cc
  36. 16
      deps/v8/src/full-codegen.h
  37. 710
      deps/v8/src/gdb-jit.cc
  38. 8
      deps/v8/src/gdb-jit.h
  39. 7
      deps/v8/src/handles.cc
  40. 2
      deps/v8/src/handles.h
  41. 4
      deps/v8/src/heap.cc
  42. 41
      deps/v8/src/hydrogen-instructions.cc
  43. 287
      deps/v8/src/hydrogen-instructions.h
  44. 423
      deps/v8/src/hydrogen.cc
  45. 17
      deps/v8/src/hydrogen.h
  46. 6
      deps/v8/src/ia32/assembler-ia32.h
  47. 11
      deps/v8/src/ia32/code-stubs-ia32.cc
  48. 14
      deps/v8/src/ia32/code-stubs-ia32.h
  49. 29
      deps/v8/src/ia32/deoptimizer-ia32.cc
  50. 208
      deps/v8/src/ia32/full-codegen-ia32.cc
  51. 2
      deps/v8/src/ia32/ic-ia32.cc
  52. 257
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  53. 222
      deps/v8/src/ia32/lithium-ia32.cc
  54. 187
      deps/v8/src/ia32/lithium-ia32.h
  55. 11
      deps/v8/src/ia32/macro-assembler-ia32.cc
  56. 79
      deps/v8/src/ic.cc
  57. 6
      deps/v8/src/ic.h
  58. 5
      deps/v8/src/isolate.cc
  59. 4
      deps/v8/src/isolate.h
  60. 24
      deps/v8/src/log.cc
  61. 6
      deps/v8/src/log.h
  62. 57
      deps/v8/src/mark-compact.cc
  63. 4
      deps/v8/src/mark-compact.h
  64. 13
      deps/v8/src/mips/code-stubs-mips.cc
  65. 16
      deps/v8/src/mips/code-stubs-mips.h
  66. 5
      deps/v8/src/mips/deoptimizer-mips.cc
  67. 237
      deps/v8/src/mips/full-codegen-mips.cc
  68. 21
      deps/v8/src/mips/macro-assembler-mips.cc
  69. 4
      deps/v8/src/mips/macro-assembler-mips.h
  70. 35
      deps/v8/src/mirror-debugger.js
  71. 50
      deps/v8/src/objects.cc
  72. 9
      deps/v8/src/objects.h
  73. 126
      deps/v8/src/parser.cc
  74. 6
      deps/v8/src/parser.h
  75. 232
      deps/v8/src/platform-solaris.cc
  76. 9
      deps/v8/src/prettyprinter.cc
  77. 108
      deps/v8/src/profile-generator.cc
  78. 2
      deps/v8/src/profile-generator.h
  79. 2
      deps/v8/src/rewriter.cc
  80. 143
      deps/v8/src/runtime-profiler.cc
  81. 23
      deps/v8/src/runtime-profiler.h
  82. 236
      deps/v8/src/runtime.cc
  83. 25
      deps/v8/src/runtime.h
  84. 150
      deps/v8/src/scopes.cc
  85. 31
      deps/v8/src/scopes.h
  86. 11
      deps/v8/src/string.js
  87. 4
      deps/v8/src/stub-cache.cc
  88. 1
      deps/v8/src/type-info.cc
  89. 4
      deps/v8/src/v8-counters.h
  90. 44
      deps/v8/src/v8.cc
  91. 2
      deps/v8/src/version.cc
  92. 6
      deps/v8/src/x64/assembler-x64.h
  93. 13
      deps/v8/src/x64/code-stubs-x64.cc
  94. 16
      deps/v8/src/x64/code-stubs-x64.h
  95. 32
      deps/v8/src/x64/deoptimizer-x64.cc
  96. 199
      deps/v8/src/x64/full-codegen-x64.cc
  97. 2
      deps/v8/src/x64/ic-x64.cc
  98. 260
      deps/v8/src/x64/lithium-codegen-x64.cc
  99. 199
      deps/v8/src/x64/lithium-x64.cc
  100. 190
      deps/v8/src/x64/lithium-x64.h

1
deps/v8/AUTHORS

@ -36,6 +36,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com>

61
deps/v8/ChangeLog

@ -1,3 +1,18 @@
2011-07-04: Version 3.4.9
Added support for debugger inspection of locals in optimized frames
(issue 1140).
Fixed SConstruct to pass correct defines to samples/preparser when
building with library=shared.
Made date parser handle ES5 Date Time Strings correctly (issue 1498).
Fixed a bug in Object.defineProperty on the arguments object.
Performance improvements on all platforms.
2011-06-29: Version 3.4.8
Ensure 16-byte stack alignment on Solaris (issue 1505).
@ -54,41 +69,41 @@
2011-06-15: Version 3.4.4
Added snapshot compression support and --stress-opt flag to d8.
Added snapshot compression support and --stress-opt flag to d8.
Improved performance of try/catch.
Improved performance of try/catch.
Several GYP-related changes: Added support for building Xcode project
files. Make the ARM simulator build with GYP again. Generate Makefiles
for all architectures on Linux.
Several GYP-related changes: Added support for building Xcode project
files. Make the ARM simulator build with GYP again. Generate Makefiles
for all architectures on Linux.
Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
receiver for strict mode callbacks. (issue 1436)
Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
receiver for strict mode callbacks. (issue 1436)
Fixed a bug where an array load was incorrectly hoisted by GVN.
Fixed a bug where an array load was incorrectly hoisted by GVN.
Handle 'undefined' correctly when === has been specialized for doubles.
(issue 1434)
Handle 'undefined' correctly when === has been specialized for doubles.
(issue 1434)
Corrected the limit of local variables in an optimized function from 64
to 63.
Corrected the limit of local variables in an optimized function from 64
to 63.
Correctly set ReadOnly flag on indexed properties when using the API Set
method. (issue 1470)
Correctly set ReadOnly flag on indexed properties when using the API Set
method. (issue 1470)
Give the correct error message when Object.isExtensible is called on a
non-object. (issue 1452)
Give the correct error message when Object.isExtensible is called on a
non-object. (issue 1452)
Added GetOwnPropertyNames method for Object in the API. Patch by Peter
Varga.
Added GetOwnPropertyNames method for Object in the API. Patch by Peter
Varga.
Do not redefine properties unneccesarily in seal and freeze. (issue
1447)
Do not redefine properties unneccesarily in seal and freeze. (issue
1447)
IsExecutionTerminating has an Isolate parameter now.
IsExecutionTerminating has an Isolate parameter now.
Distinguish keyed loads with a symbol key from fast elements loads,
avoiding some useless deoptimizations. (issue 1471)
Distinguish keyed loads with a symbol key from fast elements loads,
avoiding some useless deoptimizations. (issue 1471)
2011-06-08: Version 3.4.3

39
deps/v8/SConstruct

@ -89,7 +89,7 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
@ -230,7 +230,7 @@ LIBRARY_FLAGS = {
'msvc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
'CXXFLAGS': ['/GR-', '/Gy'],
'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
@ -400,12 +400,15 @@ DTOA_EXTRA_FLAGS = {
CCTEST_EXTRA_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
},
'gcc': {
'all': {
'LIBPATH': [abspath('.')],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
@ -436,9 +439,6 @@ CCTEST_EXTRA_FLAGS = {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
@ -453,12 +453,15 @@ CCTEST_EXTRA_FLAGS = {
SAMPLE_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include')],
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
@ -472,6 +475,9 @@ SAMPLE_FLAGS = {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
# On Solaris, to get isinf, INFINITY, fpclassify and other macros one
# needs to define __C99FEATURES__.
'CPPDEFINES': ['__C99FEATURES__'],
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
'LINKFLAGS': ['-mt']
@ -572,9 +578,6 @@ SAMPLE_FLAGS = {
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'prof:on': {
'LINKFLAGS': ['/MAP']
},
@ -625,13 +628,16 @@ SAMPLE_FLAGS = {
PREPARSER_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')]
'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:win32': {
@ -727,9 +733,6 @@ PREPARSER_FLAGS = {
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'prof:on': {
'LINKFLAGS': ['/MAP']
},
@ -782,7 +785,7 @@ D8_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'console:readline': {
@ -1155,8 +1158,8 @@ def VerifyOptions(env):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
if env['gdbjit'] == 'on' and ((env['os'] != 'linux' and env['os'] != 'macos') or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux/OSX target.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':

46
deps/v8/include/v8.h

@ -2557,18 +2557,6 @@ typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCCallback)();
/**
* Profiler modules.
*
* In V8, profiler consists of several modules. Each can be turned on / off
* independently.
*/
enum ProfilerModules {
PROFILER_MODULE_NONE = 0,
PROFILER_MODULE_CPU = 1
};
/**
* Collection of V8 heap information.
*
@ -2995,40 +2983,6 @@ class V8EXPORT V8 {
*/
static bool IsProfilerPaused();
/**
* Resumes specified profiler modules. Can be called several times to
* mark the opening of a profiler events block with the given tag.
*
* "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
* \param tag Profile tag.
*/
static void ResumeProfilerEx(int flags, int tag = 0);
/**
* Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
* a block of profiler events opened by a call to "ResumeProfilerEx" with the
* same tag value. There is no need for blocks to be properly nested.
* The profiler is paused when the last opened block is closed.
*
* "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
* \param tag Profile tag.
*/
static void PauseProfilerEx(int flags, int tag = 0);
/**
* Returns active (resumed) profiler modules.
* See ProfilerModules enum.
*
* \returns active profiler modules.
*/
static int GetActiveProfilerModules();
/**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving

9
deps/v8/samples/shell.cc

@ -498,12 +498,15 @@ void ExternalArrayWeakCallback(v8::Persistent<v8::Value> object, void* data) {
v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
v8::ExternalArrayType type,
size_t element_size) {
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
assert(element_size == 1 ||
element_size == 2 ||
element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
return v8::ThrowException(
v8::String::New("Array constructor needs one parameter."));
}
static const int kMaxLength = 0x3fffffff;
size_t length = 0;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
@ -513,7 +516,7 @@ v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
return v8::ThrowException(
v8::String::New("Array length must not be negative."));
}
if (raw_length > v8::internal::ExternalArray::kMaxLength) {
if (raw_length > kMaxLength) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
@ -522,7 +525,7 @@ v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
return v8::ThrowException(
v8::String::New("Array length must be a number."));
}
if (length > static_cast<size_t>(v8::internal::ExternalArray::kMaxLength)) {
if (length > static_cast<size_t>(kMaxLength)) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}

33
deps/v8/src/api.cc

@ -4831,47 +4831,26 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
PauseProfilerEx(PROFILER_MODULE_CPU);
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->PauseProfiler();
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
ResumeProfilerEx(PROFILER_MODULE_CPU);
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->ResumeProfiler();
#endif
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
#else
return true;
#endif
}
void V8::ResumeProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->ResumeProfiler(flags, tag);
#endif
}
void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
LOGGER->PauseProfiler(flags, tag);
#endif
}
int V8::GetActiveProfilerModules() {
#ifdef ENABLE_LOGGING_AND_PROFILING
return LOGGER->GetActiveProfilerModules();
return isolate->logger()->IsProfilerPaused();
#else
return PROFILER_MODULE_NONE;
return true;
#endif
}

10
deps/v8/src/arm/assembler-arm.h

@ -378,7 +378,6 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@ -1141,8 +1140,13 @@ class Assembler : public AssemblerBase {
void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
}
// Check the number of instructions generated from label to here.
int InstructionsGeneratedSince(Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Check whether an immediate fits an addressing mode 1 instruction.

3
deps/v8/src/arm/builtins-arm.cc

@ -1044,8 +1044,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
__ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
__ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,

26
deps/v8/src/arm/code-stubs-arm.cc

@ -392,11 +392,11 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub1.GetCode());
// Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub2.GetCode());
__ pop(lr);
}
}
@ -473,7 +473,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, Operand(object));
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(lr);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub.GetCode());
__ pop(lr);
}
@ -1058,7 +1058,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert lhs to a double in r2, r3.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub1.GetCode());
// Load rhs to a double in r0, r1.
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
@ -1100,7 +1100,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub2.GetCode());
__ pop(lr);
}
// Fall through to both_loaded_as_doubles.
@ -1731,22 +1731,14 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Prepare to push argument.
__ mov(r3, Operand(r0));
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
__ mov(r1, Operand(Smi::FromInt(op_)));
__ mov(r3, Operand(r0)); // the operand
__ mov(r2, Operand(Smi::FromInt(op_)));
__ mov(r1, Operand(Smi::FromInt(mode_)));
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
__ Push(r3, r2, r1, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
masm->isolate()),
4,
1);
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}

16
deps/v8/src/arm/code-stubs-arm.h

@ -60,18 +60,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
}
UnaryOpStub(
int key,
UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@ -89,8 +82,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),

30
deps/v8/src/arm/deoptimizer-arm.cc

@ -267,6 +267,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -382,6 +385,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@ -516,7 +522,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Set the continuation for the topmost frame.
if (is_topmost) {
if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@ -529,8 +535,28 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
#define __ masm()->
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.

201
deps/v8/src/arm/full-codegen-arm.cc

@ -92,17 +92,19 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
reg.set_code(delta_to_patch_site / kOff12Mask);
__ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
reg.set_code(delta_to_patch_site / kOff12Mask);
__ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
info_emitted_ = true;
info_emitted_ = true;
#endif
} else {
__ nop(); // Signals no inlined code.
}
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
MacroAssembler* masm_;
Label patch_site_;
@ -129,6 +131,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -147,13 +150,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
int locals_count = scope()->num_stack_slots();
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
@ -173,7 +176,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@ -189,7 +192,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@ -220,10 +223,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ mov(r1, Operand(Smi::FromInt(num_parameters)));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
@ -345,7 +349,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@ -786,7 +790,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ Call(ic);
// Value in r0 is ignored (declarations are statements).
}
}
@ -860,7 +864,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site, clause->CompareId());
__ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
@ -1167,7 +1172,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, mode, AstNode::kNoNumber);
__ Call(ic, mode);
}
@ -1248,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ mov(r0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@ -1270,7 +1275,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
} else if (slot->type() == Slot::LOOKUP) {
@ -1414,7 +1419,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1654,7 +1659,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1662,7 +1667,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1689,7 +1694,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site, expr->id());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@ -1770,7 +1776,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(r1);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), NULL, expr->id());
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@ -1810,7 +1818,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ Call(ic);
break;
}
case KEYED_PROPERTY: {
@ -1823,7 +1831,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ Call(ic);
break;
}
}
@ -1847,7 +1855,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -1945,7 +1953,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -1991,7 +1999,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2043,7 +2051,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2077,7 +2085,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2117,7 +2125,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(r1);
// Push the receiver of the enclosing function and do runtime call.
__ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
// Push the strict mode flag.
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
@ -2260,7 +2269,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
// for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@ -2278,7 +2287,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@ -2669,7 +2678,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in r0.
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
@ -2681,7 +2690,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -3568,6 +3577,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Load the function into r0.
VisitForAccumulatorValue(args->at(0));
// Prepare for the test.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// Test for strict mode function.
__ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, if_true);
// Test for native function.
__ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, if_true);
// Not native or strict-mode function.
__ b(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@ -3600,7 +3642,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
EmitCallIC(ic, mode, expr->id());
__ Call(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -3742,7 +3784,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@ -3853,7 +3895,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in r0.
@ -3884,7 +3927,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3901,7 +3944,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3927,7 +3970,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
@ -4126,7 +4169,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
@ -4187,70 +4231,6 @@ Register FullCodeGenerator::context_register() {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
RelocInfo::Mode mode,
unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
default:
break;
}
if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
__ Call(ic, mode);
} else {
ASSERT(mode == RelocInfo::CODE_TARGET);
mode = RelocInfo::CODE_TARGET_WITH_ID;
__ CallWithAstId(ic, mode, ast_id);
}
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
JumpPatchSite* patch_site,
unsigned ast_id) {
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
default:
break;
}
if (ast_id == kNoASTId) {
__ Call(ic, RelocInfo::CODE_TARGET);
} else {
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
}
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
@ -4263,19 +4243,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
if (scope()->is_global_scope()) {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ mov(ip, Operand(Smi::FromInt(0)));
} else if (scope()->is_eval_scope()) {
} else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
ASSERT(scope()->is_function_scope());
ASSERT(declaration_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);

3
deps/v8/src/arm/ic-arm.cc

@ -952,6 +952,9 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
DONT_DO_SMI_CHECK);
__ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch));
__ b(cs, slow_case);

199
deps/v8/src/arm/lithium-arm.cc

@ -265,12 +265,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
void LTypeofIs::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@ -340,13 +334,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@ -990,18 +977,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
HTest* test = HTest::cast(current);
instr->set_hydrogen_value(test->value());
HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@ -1046,80 +1022,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
ASSERT(!v->HasSideEffects());
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
HValue* left = compare->left();
HValue* right = compare->right();
Representation r = compare->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right));
} else {
ASSERT(r.IsDouble());
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right));
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsIsUndetectable()) {
HIsUndetectable* compare = HIsUndetectable::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsCompareObjectEq()) {
HCompareObjectEq* compare = HCompareObjectEq::cast(v);
return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else if (v->IsConstant()) {
if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
} else {
Abort("Undefined compare before branch");
return NULL;
}
return new LBranch(UseRegisterAtStart(v));
}
@ -1477,85 +1388,84 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
Representation r = instr->GetInputRepresentation();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
return new LCmpIDAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
return new LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEq(
HCompareConstantEq* instr) {
LOperand* left = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LCmpConstantEq(left));
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value));
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsObject(value));
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
return DefineAsRegister(new LIsSmi(value));
return new LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsUndetectable(value));
return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LHasInstanceType(value));
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@ -1568,19 +1478,19 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LHasCachedArrayIndex(value));
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseTempRegister(instr->value());
return DefineSameAsFirst(new LClassOfTest(value));
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister());
}
@ -2169,13 +2079,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall());
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
return new LIsConstructCallAndBranch(TempRegister());
}

188
deps/v8/src/arm/lithium-arm.h

@ -77,13 +77,9 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@ -103,9 +99,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@ -113,15 +107,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@ -173,7 +162,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@ -232,7 +220,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@ -456,16 +443,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
void SetBranchTargets(int true_block_id, int false_block_id) {
true_block_id_ = true_block_id;
false_block_id_ = false_block_id;
}
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
int true_block_id_;
int false_block_id_;
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
};
@ -581,23 +567,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@ -606,7 +575,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@ -632,17 +601,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@ -652,17 +610,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCmpConstantEq(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
@ -674,22 +622,10 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
bool is_strict() const { return hydrogen()->is_strict(); }
};
class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNullAndBranch(LOperand* value) {
@ -697,7 +633,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@ -705,16 +641,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
};
class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@ -723,22 +649,12 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@ -746,22 +662,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsUndetectable(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@ -771,22 +677,12 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
};
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@ -795,7 +691,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -812,17 +708,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@ -831,18 +716,7 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
public:
explicit LClassOfTest(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -857,7 +731,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -871,7 +745,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@ -1000,7 +874,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@ -1979,21 +1853,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream);
};
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@ -2001,7 +1860,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@ -2009,13 +1868,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {

236
deps/v8/src/arm/lithium-codegen-arm.cc

@ -1531,7 +1531,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->representation();
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
@ -1547,7 +1547,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
if (instr->hydrogen()->value()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
EmitBranch(true_block, false_block, eq);
@ -1645,34 +1645,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
Register scratch = scratch0();
Label unordered, done;
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
} else {
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
__ b(cc, &done);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@ -1695,17 +1667,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
@ -1717,17 +1678,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label done;
__ cmp(left, Operand(instr->hydrogen()->right()));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1738,39 +1688,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(reg, ip);
if (instr->is_strict()) {
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
} else {
Label true_value, false_value, done;
__ b(eq, &true_value);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(ip, reg);
__ b(eq, &true_value);
__ JumpIfSmi(reg, &false_value);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
__ b(ne, &true_value);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
@ -1830,25 +1747,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
__ b(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
@ -1866,18 +1764,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
Label done;
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ JumpIfSmi(input_reg, &done);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1888,25 +1774,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label false_label, done;
__ JumpIfSmi(input, &false_label);
__ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
__ tst(result, Operand(1 << Map::kIsUndetectable));
__ b(eq, &false_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -1922,7 +1789,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
static InstanceType TestType(HHasInstanceType* instr) {
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@ -1931,7 +1798,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
static Condition BranchCondition(HHasInstanceType* instr) {
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return eq;
@ -1942,23 +1809,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label done;
__ tst(input, Operand(kSmiTagMask));
__ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
__ b(eq, &done);
__ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
Condition cond = BranchCondition(instr->hydrogen());
__ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
__ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
__ bind(&done);
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->InputAt(0));
@ -1988,20 +1838,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@ -2074,27 +1910,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Handle<String> class_name = instr->hydrogen()->class_name();
Label done, is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
__ b(ne, &is_false);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
@ -4349,29 +4164,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
input,
instr->type_literal());
__ b(final_branch_condition, &true_label);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -4455,26 +4247,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
EmitIsConstructCall(result, scratch0());
__ b(eq, &true_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());

115
deps/v8/src/arm/macro-assembler-arm.cc

@ -91,7 +91,7 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
@ -118,10 +118,8 @@ int MacroAssembler::CallSize(Register target, Condition cond) {
void MacroAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
#ifdef DEBUG
int pre_position = pc_offset();
#endif
Label start;
bind(&start);
#if USE_BLX
blx(target, cond);
#else
@ -129,34 +127,29 @@ void MacroAssembler::Call(Register target, Condition cond) {
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, cond), post_position);
#endif
ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
intptr_t target, RelocInfo::Mode rmode, Condition cond) {
Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
void MacroAssembler::Call(intptr_t target,
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
#ifdef DEBUG
int pre_position = pc_offset();
#endif
Label start;
bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
@ -168,7 +161,7 @@ void MacroAssembler::Call(intptr_t target,
// we have to do it explicitly.
positions_recorder()->WriteRecordedPositions();
mov(ip, Operand(target, rmode));
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@ -176,82 +169,36 @@ void MacroAssembler::Call(intptr_t target,
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
byte* target, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Call(
byte* target, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
}
int MacroAssembler::CallSize(
Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
void MacroAssembler::CallWithAstId(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
ASSERT(ast_id != kNoASTId);
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
ast_id_for_reloc_info_ = ast_id;
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
#endif
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond) {
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
ast_id_for_reloc_info_ = ast_id;
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
#endif
Call(reinterpret_cast<Address>(code.location()), rmode, cond);
ASSERT_EQ(CallSize(code, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
@ -994,9 +941,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(r5, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
Call(adaptor);
call_wrapper.AfterCall();
b(done);
} else {
@ -1719,7 +1666,7 @@ void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}

22
deps/v8/src/arm/macro-assembler-arm.h

@ -90,19 +90,19 @@ class MacroAssembler: public Assembler {
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode,
Condition cond = al);
void CallWithAstId(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al);
void Ret(Condition cond = al);
@ -1036,10 +1036,6 @@ class MacroAssembler: public Assembler {
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target,
RelocInfo::Mode rmode,
Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,

15
deps/v8/src/array.js

@ -742,14 +742,15 @@ function ArraySort(comparefn) {
else return x < y ? -1 : 1;
};
}
var global_receiver = %GetGlobalReceiver();
var receiver =
%_IsNativeOrStrictMode(comparefn) ? void 0 : %GetGlobalReceiver();
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
var order = %_CallFunction(receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
@ -771,14 +772,14 @@ function ArraySort(comparefn) {
var v1 = a[to - 1];
var middle_index = from + ((to - from) >> 1);
var v2 = a[middle_index];
var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
var c01 = %_CallFunction(receiver, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
var c02 = %_CallFunction(receiver, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
@ -787,7 +788,7 @@ function ArraySort(comparefn) {
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
var c12 = %_CallFunction(receiver, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
@ -808,7 +809,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
%_SwapElements(a, i, low_end);
low_end++;
@ -817,7 +818,7 @@ function ArraySort(comparefn) {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
%_SwapElements(a, i, high_start);
if (order < 0) {

14
deps/v8/src/ast.h

@ -772,20 +772,26 @@ class TryStatement: public Statement {
class TryCatchStatement: public TryStatement {
public:
TryCatchStatement(Block* try_block, Handle<String> name, Block* catch_block)
TryCatchStatement(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block)
: TryStatement(try_block),
name_(name),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
}
DECLARE_NODE_TYPE(TryCatchStatement)
Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
Handle<String> name() const { return name_; }
virtual bool IsInlineable() const;
private:
Handle<String> name_;
Scope* scope_;
Variable* variable_;
Block* catch_block_;
};

49
deps/v8/src/compilation-cache.cc

@ -52,8 +52,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate, kEvalGlobalGenerations),
eval_contextual_(isolate, kEvalContextualGenerations),
reg_exp_(isolate, kRegExpGenerations),
enabled_(true),
eager_optimizing_set_(NULL) {
enabled_(true) {
CompilationSubCache* subcaches[kSubCacheCount] =
{&script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
@ -62,10 +61,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
}
CompilationCache::~CompilationCache() {
delete eager_optimizing_set_;
eager_optimizing_set_ = NULL;
}
CompilationCache::~CompilationCache() {}
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
@ -457,47 +453,6 @@ void CompilationCache::PutRegExp(Handle<String> source,
}
static bool SourceHashCompare(void* key1, void* key2) {
return key1 == key2;
}
HashMap* CompilationCache::EagerOptimizingSet() {
if (eager_optimizing_set_ == NULL) {
eager_optimizing_set_ = new HashMap(&SourceHashCompare);
}
return eager_optimizing_set_;
}
bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
if (FLAG_opt_eagerly) return true;
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
}
void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Lookup(key, hash, true);
}
void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Remove(key, hash);
}
void CompilationCache::ResetEagerOptimizingData() {
HashMap* set = EagerOptimizingSet();
if (set->occupancy() > 0) set->Clear();
}
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();

10
deps/v8/src/compilation-cache.h

@ -223,14 +223,6 @@ class CompilationCache {
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Support for eager optimization tracking.
bool ShouldOptimizeEagerly(Handle<JSFunction> function);
void MarkForEagerOptimizing(Handle<JSFunction> function);
void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@ -274,8 +266,6 @@ class CompilationCache {
// Current enable state of the compilation cache.
bool enabled_;
HashMap* eager_optimizing_set_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CompilationCache);

14
deps/v8/src/compiler.cc

@ -109,8 +109,6 @@ void CompilationInfo::DisableOptimization() {
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
Isolate* isolate = code->GetIsolate();
isolate->compilation_cache()->MarkForLazyOptimizing(closure());
}
@ -413,7 +411,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code()));
info->code(),
info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
@ -422,7 +421,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
*info->code(),
*result,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Hint to the runtime system used when allocating space for initial
@ -618,6 +617,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
function->ReplaceCode(*code);
} else {
// Update the shared function info with the compiled code and the
@ -659,9 +659,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
} else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
function)) {
isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
@ -788,7 +785,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()),
Handle<Code>(info->code())));
Handle<Code>(info->code()),
info));
}
} } // namespace v8::internal

2
deps/v8/src/contexts.h

@ -225,7 +225,6 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
DERIVED_GET_TRAP_INDEX,
@ -234,6 +233,7 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.

21
deps/v8/src/date.js

@ -981,11 +981,22 @@ function PadInt(n, digits) {
function DateToISOString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return this.getUTCFullYear() +
var year = this.getUTCFullYear();
var year_string;
if (year >= 0 && year <= 9999) {
year_string = PadInt(year, 4);
} else {
if (year < 0) {
year_string = "-" + PadInt(-year, 6);
} else {
year_string = "+" + PadInt(year, 6);
}
}
return year_string +
'-' + PadInt(this.getUTCMonth() + 1, 2) +
'-' + PadInt(this.getUTCDate(), 2) +
'-' + PadInt(this.getUTCDate(), 2) +
'T' + PadInt(this.getUTCHours(), 2) +
':' + PadInt(this.getUTCMinutes(), 2) +
':' + PadInt(this.getUTCMinutes(), 2) +
':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
@ -995,8 +1006,8 @@ function DateToISOString() {
function DateToJSON(key) {
var o = ToObject(this);
var tv = DefaultNumber(o);
if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
return null;
if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
return null;
}
return o.toISOString();
}

281
deps/v8/src/dateparser-inl.h

@ -39,16 +39,71 @@ bool DateParser::Parse(Vector<Char> str,
UnicodeCache* unicode_cache) {
ASSERT(out->length() >= OUTPUT_SIZE);
InputReader<Char> in(unicode_cache, str);
DateStringTokenizer<Char> scanner(&in);
TimeZoneComposer tz;
TimeComposer time;
DayComposer day;
while (!in.IsEnd()) {
if (in.IsAsciiDigit()) {
// Parse a number (possibly with 1 or 2 trailing colons).
int n = in.ReadUnsignedNumber();
if (in.Skip(':')) {
if (in.Skip(':')) {
// Specification:
// Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
// with Safari.
// ES5 ISO 8601 dates:
// [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
// where yyyy is in the range 0000..9999 and
// +/-yyyyyy is in the range -999999..+999999 -
// but -000000 is invalid (year zero must be positive),
// MM is in the range 01..12,
// DD is in the range 01..31,
// MM and DD defaults to 01 if missing,,
// HH is generally in the range 00..23, but can be 24 if mm, ss
// and sss are zero (or missing), representing midnight at the
// end of a day,
// mm and ss are in the range 00..59,
// sss is in the range 000..999,
// hh is in the range 00..23,
// mm, ss, and sss default to 00 if missing, and
// timezone defaults to Z if missing.
// Extensions:
// We also allow sss to have more or less than three digits (but at
// least one).
// We allow hh:mm to be specified as hhmm.
// Legacy dates:
// Any unrecognized word before the first number is ignored.
// Parenthesized text is ignored.
// An unsigned number followed by ':' is a time value, and is
// added to the TimeComposer. A number followed by '::' adds a second
// zero as well. A number followed by '.' is also a time and must be
// followed by milliseconds.
// Any other number is a date component and is added to DayComposer.
// A month name (or really: any word having the same first three letters
// as a month name) is recorded as a named month in the Day composer.
// A word recognizable as a time-zone is recorded as such, as is
// '(+|-)(hhmm|hh:)'.
// Legacy dates don't allow extra signs ('+' or '-') or umatched ')'
// after a number has been read (before the first number, any garbage
// is allowed).
// Intersection of the two:
// A string that matches both formats (e.g. 1970-01-01) will be
// parsed as an ES5 date-time string - which means it will default
// to UTC time-zone. That's unavoidable if following the ES5
// specification.
// After a valid "T" has been read while scanning an ES5 datetime string,
// the input can no longer be a valid legacy date, since the "T" is a
// garbage string after a number has been read.
// First try getting as far as possible with as ES5 Date Time String.
DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
if (next_unhandled_token.IsInvalid()) return false;
bool has_read_number = !day.IsEmpty();
// If there's anything left, continue with the legacy parser.
for (DateToken token = next_unhandled_token;
!token.IsEndOfInput();
token = scanner.Next()) {
if (token.IsNumber()) {
has_read_number = true;
int n = token.number();
if (scanner.SkipSymbol(':')) {
if (scanner.SkipSymbol(':')) {
// n + "::"
if (!time.IsEmpty()) return false;
time.Add(n);
@ -56,12 +111,13 @@ bool DateParser::Parse(Vector<Char> str,
} else {
// n + ":"
if (!time.Add(n)) return false;
in.Skip('.');
if (scanner.Peek().IsSymbol('.')) scanner.Next();
}
} else if (in.Skip('.') && time.IsExpecting(n)) {
} else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
time.Add(n);
if (!in.IsAsciiDigit()) return false;
int n = in.ReadMilliseconds();
if (!scanner.Peek().IsNumber()) return false;
int n = ReadMilliseconds(scanner.Next());
if (n < 0) return false;
time.AddFinal(n);
} else if (tz.IsExpecting(n)) {
tz.SetAbsoluteMinute(n);
@ -69,59 +125,206 @@ bool DateParser::Parse(Vector<Char> str,
time.AddFinal(n);
// Require end, white space, "Z", "+" or "-" immediately after
// finalizing time.
if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
!in.IsAsciiSign()) return false;
DateToken peek = scanner.Peek();
if (!peek.IsEndOfInput() &&
!peek.IsWhiteSpace() &&
!peek.IsKeywordZ() &&
!peek.IsAsciiSign()) return false;
} else {
if (!day.Add(n)) return false;
in.Skip('-'); // Ignore suffix '-' for year, month, or day.
// Skip trailing 'T' for ECMAScript 5 date string format but make
// sure that it is followed by a digit (for the time).
if (in.Skip('T') && !in.IsAsciiDigit()) return false;
scanner.SkipSymbol('-');
}
} else if (in.IsAsciiAlphaOrAbove()) {
} else if (token.IsKeyword()) {
// Parse a "word" (sequence of chars. >= 'A').
uint32_t pre[KeywordTable::kPrefixLength];
int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
int index = KeywordTable::Lookup(pre, len);
KeywordType type = KeywordTable::GetType(index);
KeywordType type = token.keyword_type();
int value = token.keyword_value();
if (type == AM_PM && !time.IsEmpty()) {
time.SetHourOffset(KeywordTable::GetValue(index));
time.SetHourOffset(value);
} else if (type == MONTH_NAME) {
day.SetNamedMonth(KeywordTable::GetValue(index));
in.Skip('-'); // Ignore suffix '-' for month names
} else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
tz.Set(KeywordTable::GetValue(index));
day.SetNamedMonth(value);
scanner.SkipSymbol('-');
} else if (type == TIME_ZONE_NAME && has_read_number) {
tz.Set(value);
} else {
// Garbage words are illegal if a number has been read.
if (in.HasReadNumber()) return false;
if (has_read_number) return false;
}
} else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).
tz.SetSign(in.GetAsciiSignValue());
in.Next();
int n = in.ReadUnsignedNumber();
if (in.Skip(':')) {
tz.SetSign(token.ascii_sign());
// The following number may be empty.
int n = 0;
if (scanner.Peek().IsNumber()) {
n = scanner.Next().number();
}
has_read_number = true;
if (scanner.Peek().IsSymbol(':')) {
tz.SetAbsoluteHour(n);
tz.SetAbsoluteMinute(kNone);
} else {
tz.SetAbsoluteHour(n / 100);
tz.SetAbsoluteMinute(n % 100);
}
} else if (in.Is('(')) {
// Ignore anything from '(' to a matching ')' or end of string.
in.SkipParentheses();
} else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
} else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
has_read_number) {
// Extra sign or ')' is illegal if a number has been read.
return false;
} else {
// Ignore other characters.
in.Next();
// Ignore other characters and whitespace.
}
}
return day.Write(out) && time.Write(out) && tz.Write(out);
}
template<typename CharType>
DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
int pre_pos = in_->position();
if (in_->IsEnd()) return DateToken::EndOfInput();
if (in_->IsAsciiDigit()) {
int n = in_->ReadUnsignedNumeral();
int length = in_->position() - pre_pos;
return DateToken::Number(n, length);
}
if (in_->Skip(':')) return DateToken::Symbol(':');
if (in_->Skip('-')) return DateToken::Symbol('-');
if (in_->Skip('+')) return DateToken::Symbol('+');
if (in_->Skip('.')) return DateToken::Symbol('.');
if (in_->Skip(')')) return DateToken::Symbol(')');
if (in_->IsAsciiAlphaOrAbove()) {
ASSERT(KeywordTable::kPrefixLength == 3);
uint32_t buffer[3] = {0, 0, 0};
int length = in_->ReadWord(buffer, 3);
int index = KeywordTable::Lookup(buffer, length);
return DateToken::Keyword(KeywordTable::GetType(index),
KeywordTable::GetValue(index),
length);
}
if (in_->SkipWhiteSpace()) {
return DateToken::WhiteSpace(in_->position() - pre_pos);
}
if (in_->SkipParentheses()) {
return DateToken::Unknown();
}
in_->Next();
return DateToken::Unknown();
}
template <typename Char>
DateParser::DateToken DateParser::ParseES5DateTime(
DateStringTokenizer<Char>* scanner,
DayComposer* day,
TimeComposer* time,
TimeZoneComposer* tz) {
ASSERT(day->IsEmpty());
ASSERT(time->IsEmpty());
ASSERT(tz->IsEmpty());
// Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
if (scanner->Peek().IsAsciiSign()) {
// Keep the sign token, so we can pass it back to the legacy
// parser if we don't use it.
DateToken sign_token = scanner->Next();
if (!scanner->Peek().IsFixedLengthNumber(6)) return sign_token;
int sign = sign_token.ascii_sign();
int year = scanner->Next().number();
if (sign < 0 && year == 0) return sign_token;
day->Add(sign * year);
} else if (scanner->Peek().IsFixedLengthNumber(4)) {
day->Add(scanner->Next().number());
} else {
return scanner->Next();
}
if (scanner->SkipSymbol('-')) {
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
day->Add(scanner->Next().number());
if (scanner->SkipSymbol('-')) {
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
day->Add(scanner->Next().number());
}
}
// Check for optional time string: 'T'HH':'mm[':'ss['.'sss]]Z
if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
} else {
// ES5 Date Time String time part is present.
scanner->Next();
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!Between(scanner->Peek().number(), 0, 24)) {
return DateToken::Invalid();
}
// Allow 24:00[:00[.000]], but no other time starting with 24.
bool hour_is_24 = (scanner->Peek().number() == 24);
time->Add(scanner->Next().number());
if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!TimeComposer::IsMinute(scanner->Peek().number()) ||
(hour_is_24 && scanner->Peek().number() > 0)) {
return DateToken::Invalid();
}
time->Add(scanner->Next().number());
if (scanner->SkipSymbol(':')) {
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!TimeComposer::IsSecond(scanner->Peek().number()) ||
(hour_is_24 && scanner->Peek().number() > 0)) {
return DateToken::Invalid();
}
time->Add(scanner->Next().number());
if (scanner->SkipSymbol('.')) {
if (!scanner->Peek().IsNumber() ||
(hour_is_24 && scanner->Peek().number() > 0)) {
return DateToken::Invalid();
}
// Allow more or less than the mandated three digits.
time->Add(ReadMilliseconds(scanner->Next()));
}
}
// Check for optional timezone designation: 'Z' | ('+'|'-')hh':'mm
if (scanner->Peek().IsKeywordZ()) {
scanner->Next();
tz->Set(0);
} else if (scanner->Peek().IsSymbol('+') ||
scanner->Peek().IsSymbol('-')) {
tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
if (scanner->Peek().IsFixedLengthNumber(4)) {
// hhmm extension syntax.
int hourmin = scanner->Next().number();
int hour = hourmin / 100;
int min = hourmin % 100;
if (!TimeComposer::IsHour(hour) || !TimeComposer::IsMinute(min)) {
return DateToken::Invalid();
}
tz->SetAbsoluteHour(hour);
tz->SetAbsoluteMinute(min);
} else {
// hh:mm standard syntax.
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!TimeComposer::IsHour(scanner->Peek().number())) {
return DateToken::Invalid();
}
tz->SetAbsoluteHour(scanner->Next().number());
if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!TimeComposer::IsMinute(scanner->Peek().number())) {
return DateToken::Invalid();
}
tz->SetAbsoluteMinute(scanner->Next().number());
}
}
if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
}
// Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
if (tz->IsEmpty()) tz->Set(0);
day->set_iso_date();
return DateToken::EndOfInput();
}
} } // namespace v8::internal
#endif // V8_DATEPARSER_INL_H_

42
deps/v8/src/dateparser.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -44,7 +44,7 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
int day = kNone;
if (named_month_ == kNone) {
if (index_ == 3 && !IsDay(comp_[0])) {
if (is_iso_date_ || (index_ == 3 && !IsDay(comp_[0]))) {
// YMD
year = comp_[0];
month = comp_[1];
@ -71,8 +71,10 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
}
}
if (Between(year, 0, 49)) year += 2000;
else if (Between(year, 50, 99)) year += 1900;
if (!is_iso_date_) {
if (Between(year, 0, 49)) year += 2000;
else if (Between(year, 50, 99)) year += 1900;
}
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
@ -151,6 +153,7 @@ const int8_t DateParser::KeywordTable::
{'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
{'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
{'\0', '\0', '\0', DateParser::INVALID, 0},
};
@ -175,4 +178,35 @@ int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
}
int DateParser::ReadMilliseconds(DateToken token) {
// Read first three significant digits of the original numeral,
// as inferred from the value and the number of digits.
// I.e., use the number of digits to see if there were
// leading zeros.
int number = token.number();
int length = token.length();
if (length < 3) {
// Less than three digits. Multiply to put most significant digit
// in hundreds position.
if (length == 1) {
number *= 100;
} else if (length == 2) {
number *= 10;
}
} else if (length > 3) {
if (length > kMaxSignificantDigits) length = kMaxSignificantDigits;
// More than three digits. Divide by 10^(length - 3) to get three
// most significant digits.
int factor = 1;
do {
ASSERT(factor <= 100000000); // factor won't overflow.
factor *= 10;
length--;
} while (length > 3);
number /= factor;
}
return number;
}
} } // namespace v8::internal

198
deps/v8/src/dateparser.h

@ -61,9 +61,14 @@ class DateParser : public AllStatic {
static inline bool Between(int x, int lo, int hi) {
return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
}
// Indicates a missing value.
static const int kNone = kMaxInt;
// Maximal number of digits used to build the value of a numeral.
// Remaining digits are ignored.
static const int kMaxSignificantDigits = 9;
// InputReader provides basic string parsing and character classification.
template <typename Char>
class InputReader BASE_EMBEDDED {
@ -71,32 +76,28 @@ class DateParser : public AllStatic {
InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
: index_(0),
buffer_(s),
has_read_number_(false),
unicode_cache_(unicode_cache) {
Next();
}
int position() { return index_; }
// Advance to the next character of the string.
void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
// Read a string of digits as an unsigned number (cap just below kMaxInt).
int ReadUnsignedNumber() {
has_read_number_ = true;
int n;
for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
n = n * 10 + ch_ - '0';
}
return n;
void Next() {
ch_ = (index_ < buffer_.length()) ? buffer_[index_] : 0;
index_++;
}
// Read a string of digits, take the first three or fewer as an unsigned
// number of milliseconds, and ignore any digits after the first three.
int ReadMilliseconds() {
has_read_number_ = true;
// Read a string of digits as an unsigned number. Cap value at
// kMaxSignificantDigits, but skip remaining digits if the numeral
// is longer.
int ReadUnsignedNumeral() {
int n = 0;
int power;
for (power = 100; IsAsciiDigit(); Next(), power = power / 10) {
n = n + power * (ch_ - '0');
int i = 0;
while (IsAsciiDigit()) {
if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
i++;
Next();
}
return n;
}
@ -151,18 +152,138 @@ class DateParser : public AllStatic {
// Return 1 for '+' and -1 for '-'.
int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
// Indicates whether any (possibly empty!) numbers have been read.
bool HasReadNumber() const { return has_read_number_; }
private:
int index_;
Vector<Char> buffer_;
bool has_read_number_;
uint32_t ch_;
UnicodeCache* unicode_cache_;
};
enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
enum KeywordType {
INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
};
struct DateToken {
public:
bool IsInvalid() { return tag_ == kInvalidTokenTag; }
bool IsUnknown() { return tag_ == kUnknownTokenTag; }
bool IsNumber() { return tag_ == kNumberTag; }
bool IsSymbol() { return tag_ == kSymbolTag; }
bool IsWhiteSpace() { return tag_ == kWhiteSpaceTag; }
bool IsEndOfInput() { return tag_ == kEndOfInputTag; }
bool IsKeyword() { return tag_ >= kKeywordTagStart; }
int length() { return length_; }
int number() {
ASSERT(IsNumber());
return value_;
}
KeywordType keyword_type() {
ASSERT(IsKeyword());
return static_cast<KeywordType>(tag_);
}
int keyword_value() {
ASSERT(IsKeyword());
return value_;
}
char symbol() {
ASSERT(IsSymbol());
return static_cast<char>(value_);
}
bool IsSymbol(char symbol) {
return IsSymbol() && this->symbol() == symbol;
}
bool IsKeywordType(KeywordType tag) {
return tag_ == tag;
}
bool IsFixedLengthNumber(int length) {
return IsNumber() && length_ == length;
}
bool IsAsciiSign() {
return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
}
int ascii_sign() {
ASSERT(IsAsciiSign());
return 44 - value_;
}
bool IsKeywordZ() {
return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
}
bool IsUnknown(int character) {
return IsUnknown() && value_ == character;
}
// Factory functions.
static DateToken Keyword(KeywordType tag, int value, int length) {
return DateToken(tag, length, value);
}
static DateToken Number(int value, int length) {
return DateToken(kNumberTag, length, value);
}
static DateToken Symbol(char symbol) {
return DateToken(kSymbolTag, 1, symbol);
}
static DateToken EndOfInput() {
return DateToken(kEndOfInputTag, 0, -1);
}
static DateToken WhiteSpace(int length) {
return DateToken(kWhiteSpaceTag, length, -1);
}
static DateToken Unknown() {
return DateToken(kUnknownTokenTag, 1, -1);
}
static DateToken Invalid() {
return DateToken(kInvalidTokenTag, 0, -1);
}
private:
enum TagType {
kInvalidTokenTag = -6,
kUnknownTokenTag = -5,
kWhiteSpaceTag = -4,
kNumberTag = -3,
kSymbolTag = -2,
kEndOfInputTag = -1,
kKeywordTagStart = 0
};
DateToken(int tag, int length, int value)
: tag_(tag),
length_(length),
value_(value) { }
int tag_;
int length_; // Number of characters.
int value_;
};
template <typename Char>
class DateStringTokenizer {
public:
explicit DateStringTokenizer(InputReader<Char>* in)
: in_(in), next_(Scan()) { }
DateToken Next() {
DateToken result = next_;
next_ = Scan();
return result;
}
DateToken Peek() {
return next_;
}
bool SkipSymbol(char symbol) {
if (next_.IsSymbol(symbol)) {
next_ = Scan();
return true;
}
return false;
}
private:
DateToken Scan();
InputReader<Char>* in_;
DateToken next_;
};
static int ReadMilliseconds(DateToken number);
// KeywordTable maps names of months, time zones, am/pm to numbers.
class KeywordTable : public AllStatic {
@ -201,6 +322,7 @@ class DateParser : public AllStatic {
}
bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
bool Write(FixedArray* output);
bool IsEmpty() { return hour_ == kNone; }
private:
int sign_;
int hour_;
@ -228,10 +350,10 @@ class DateParser : public AllStatic {
bool Write(FixedArray* output);
static bool IsMinute(int x) { return Between(x, 0, 59); }
private:
static bool IsHour(int x) { return Between(x, 0, 23); }
static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsSecond(int x) { return Between(x, 0, 59); }
private:
static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsMillisecond(int x) { return Between(x, 0, 999); }
static const int kSize = 4;
@ -242,22 +364,42 @@ class DateParser : public AllStatic {
class DayComposer BASE_EMBEDDED {
public:
DayComposer() : index_(0), named_month_(kNone) {}
DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
bool IsEmpty() const { return index_ == 0; }
bool Add(int n) {
return index_ < kSize ? (comp_[index_++] = n, true) : false;
if (index_ < kSize) {
comp_[index_] = n;
index_++;
return true;
}
return false;
}
void SetNamedMonth(int n) { named_month_ = n; }
bool Write(FixedArray* output);
private:
void set_iso_date() { is_iso_date_ = true; }
static bool IsMonth(int x) { return Between(x, 1, 12); }
static bool IsDay(int x) { return Between(x, 1, 31); }
private:
static const int kSize = 3;
int comp_[kSize];
int index_;
int named_month_;
// If set, ensures that data is always parsed in year-month-date order.
bool is_iso_date_;
};
// Tries to parse an ES5 Date Time String. Returns the next token
// to continue with in the legacy date string parser. If parsing is
// complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
// returns DateToken::Invalid(). Otherwise parsing continues in the
// legacy parser.
template <typename Char>
static DateParser::DateToken ParseES5DateTime(
DateStringTokenizer<Char>* scanner,
DayComposer* day,
TimeComposer* time,
TimeZoneComposer* tz);
};

15
deps/v8/src/debug-debugger.js

@ -2311,21 +2311,10 @@ DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
}
var modules = parseInt(request.arguments.modules);
if (isNaN(modules)) {
return response.failed('Modules is not an integer');
}
var tag = parseInt(request.arguments.tag);
if (isNaN(tag)) {
tag = 0;
}
if (request.arguments.command == 'resume') {
%ProfilerResume(modules, tag);
%ProfilerResume();
} else if (request.arguments.command == 'pause') {
%ProfilerPause(modules, tag);
%ProfilerPause();
} else {
return response.failed('Unknown command');
}

190
deps/v8/src/deoptimizer.cc

@ -44,6 +44,9 @@ DeoptimizerData::DeoptimizerData() {
lazy_deoptimization_entry_code_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_ = NULL;
#endif
}
@ -58,6 +61,16 @@ DeoptimizerData::~DeoptimizerData() {
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void DeoptimizerData::Iterate(ObjectVisitor* v) {
if (deoptimized_frame_info_ != NULL) {
deoptimized_frame_info_->Iterate(v);
}
}
#endif
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@ -70,7 +83,8 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
type,
bailout_id,
from,
fp_to_sp_delta);
fp_to_sp_delta,
NULL);
ASSERT(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
@ -86,6 +100,92 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
return result;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int frame_index,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
ASSERT(frame->is_optimized());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
// Get the function and code from the frame.
JSFunction* function = JSFunction::cast(frame->function());
Code* code = frame->LookupCode();
Address code_start_address = code->instruction_start();
// Locate the deoptimization point in the code. As we are at a call the
// return address must be at a place in the code with deoptimization support.
int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
// Scope this as the safe point constructor will disallow allocation.
{
SafepointTable table(code);
for (unsigned i = 0; i < table.length(); ++i) {
Address address = code_start_address + table.GetPcOffset(i);
if (address == frame->pc()) {
SafepointEntry safepoint_entry = table.GetEntry(i);
ASSERT(safepoint_entry.deoptimization_index() !=
Safepoint::kNoDeoptimizationIndex);
deoptimization_index = safepoint_entry.deoptimization_index();
break;
}
}
}
ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
unsigned stack_slots = code->stack_slots();
unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
Deoptimizer::DEBUGGER,
deoptimization_index,
frame->pc(),
fp_to_sp_delta,
code);
Address tos = frame->fp() - fp_to_sp_delta;
deoptimizer->FillInputFrame(tos, frame);
// Calculate the output frames.
Deoptimizer::ComputeOutputFrames(deoptimizer);
// Create the GC safe output frame information and register it for GC
// handling.
ASSERT_LT(frame_index, deoptimizer->output_count());
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(deoptimizer, frame_index);
isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
// Get the "simulated" top and size for the requested frame.
Address top =
reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
unsigned size =
deoptimizer->output_[frame_index]->GetFrameSize() / kPointerSize;
// Done with the GC-unsafe frame descriptions. This re-enables allocation.
deoptimizer->DeleteFrameDescriptions();
// Allocate a heap number for the doubles belonging to this frame.
deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
top, size, info);
// Finished using the deoptimizer instance.
delete deoptimizer;
return info;
}
void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
delete info;
isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
}
#endif
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
@ -209,18 +309,24 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta)
int fp_to_sp_delta,
Code* optimized_code)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
input_(NULL),
output_count_(0),
output_(NULL),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
PrintF("**** DEOPT: ");
if (type == DEBUGGER) {
PrintF("**** DEOPT FOR DEBUGGER: ");
} else {
PrintF("**** DEOPT: ");
}
function->PrintName();
PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
bailout_id,
@ -248,10 +354,16 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
optimized_code_ = function_->code();
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
} else if (type == DEBUGGER) {
optimized_code_ = optimized_code;
ASSERT(optimized_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
#ifdef DEBUG
input_->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
}
@ -417,6 +529,7 @@ void Deoptimizer::DoComputeOutputFrames() {
void Deoptimizer::MaterializeHeapNumbers() {
ASSERT_NE(DEBUGGER, bailout_type_);
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@ -432,6 +545,35 @@ void Deoptimizer::MaterializeHeapNumbers() {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, intptr_t size, DeoptimizedFrameInfo* info) {
ASSERT_EQ(DEBUGGER, bailout_type_);
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
Address slot = d.slot_address();
if (top <= slot && slot < top + size) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
int expression_index = static_cast<int>(
info->expression_count_ - (slot - top) / kPointerSize - 1);
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for expression stack index %d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
expression_index);
}
info->SetExpression(expression_index, *num);
}
}
}
#endif
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset) {
@ -972,18 +1114,32 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
if (slot_index >= 0) {
// Local or spill slots. Skip the fixed part of the frame
// including all arguments.
unsigned base = static_cast<unsigned>(
GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
unsigned base =
GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
unsigned base = static_cast<unsigned>(GetFrameSize() -
deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
unsigned base = GetFrameSize() -
deoptimizer->ComputeIncomingArgumentSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
}
}
unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
ASSERT_EQ(Code::FUNCTION, kind_);
unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
return size / kPointerSize;
}
Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
ASSERT_EQ(Code::FUNCTION, kind_);
unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
void TranslationBuffer::Add(int32_t value) {
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
@ -1256,4 +1412,24 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
}
DeoptimizedFrameInfo::DeoptimizedFrameInfo(
Deoptimizer* deoptimizer, int frame_index) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
expression_count_ = output_frame->GetExpressionCount(deoptimizer);
expression_stack_ = new Object*[expression_count_];
for (int i = 0; i < expression_count_; i++) {
SetExpression(i, output_frame->GetExpression(deoptimizer, i));
}
}
DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
delete expression_stack_;
}
void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}
} } // namespace v8::internal

96
deps/v8/src/deoptimizer.h

@ -41,7 +41,7 @@ namespace internal {
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
@ -81,11 +81,19 @@ class DeoptimizerData {
DeoptimizerData();
~DeoptimizerData();
#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
#endif
private:
LargeObjectChunk* eager_deoptimization_entry_code_;
LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif
// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
@ -103,7 +111,10 @@ class Deoptimizer : public Malloced {
enum BailoutType {
EAGER,
LAZY,
OSR
OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
int output_count() const { return output_count_; }
@ -116,6 +127,16 @@ class Deoptimizer : public Malloced {
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
// The returned object with information on the optimized frame needs to be
// freed before another one can be generated.
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
int frame_index,
Isolate* isolate);
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate);
#endif
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
// patching. If there is not enough room a new relocation
@ -171,6 +192,10 @@ class Deoptimizer : public Malloced {
~Deoptimizer();
void MaterializeHeapNumbers();
#ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, intptr_t size, DeoptimizedFrameInfo* info);
#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@ -233,7 +258,8 @@ class Deoptimizer : public Malloced {
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta);
int fp_to_sp_delta,
Code* optimized_code);
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@ -269,6 +295,11 @@ class Deoptimizer : public Malloced {
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
void FillInputFrame(Address tos, JavaScriptFrame* frame);
Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
@ -290,6 +321,7 @@ class Deoptimizer : public Malloced {
friend class FrameDescription;
friend class DeoptimizingCodeListNode;
friend class DeoptimizedFrameInfo;
};
@ -308,7 +340,10 @@ class FrameDescription {
free(description);
}
intptr_t GetFrameSize() const { return frame_size_; }
uint32_t GetFrameSize() const {
ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
return static_cast<uint32_t>(frame_size_);
}
JSFunction* GetFunction() const { return function_; }
@ -360,6 +395,17 @@ class FrameDescription {
void SetContinuation(intptr_t pc) { continuation_ = pc; }
#ifdef DEBUG
Code::Kind GetKind() const { return kind_; }
void SetKind(Code::Kind kind) { kind_ = kind; }
#endif
// Get the expression stack height for a unoptimized frame.
unsigned GetExpressionCount(Deoptimizer* deoptimizer);
// Get the expression stack value for an unoptimized frame.
Object* GetExpression(Deoptimizer* deoptimizer, int index);
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
}
@ -391,6 +437,9 @@ class FrameDescription {
private:
static const uint32_t kZapUint32 = 0xbeeddead;
// Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
// keep the variable-size array frame_content_ of type intptr_t at
// the end of the structure aligned.
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
@ -399,6 +448,9 @@ class FrameDescription {
intptr_t pc_;
intptr_t fp_;
Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@ -597,6 +649,42 @@ class SlotRef BASE_EMBEDDED {
};
#ifdef ENABLE_DEBUGGER_SUPPORT
// Class used to represent an unoptimized frame when the debugger
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
// by the debugger frame information is copied to an object of this type.
class DeoptimizedFrameInfo : public Malloced {
public:
DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
virtual ~DeoptimizedFrameInfo();
// GC support.
void Iterate(ObjectVisitor* v);
// Return the height of the expression stack.
int expression_count() { return expression_count_; }
// Get an expression from the expression stack.
Object* GetExpression(int index) {
ASSERT(0 <= index && index < expression_count());
return expression_stack_[index];
}
private:
// Set an expression on the expression stack.
void SetExpression(int index, Object* obj) {
ASSERT(0 <= index && index < expression_count());
expression_stack_[index] = obj;
}
int expression_count_;
Object** expression_stack_;
friend class Deoptimizer;
};
#endif
} } // namespace v8::internal
#endif // V8_DEOPTIMIZER_H_

3
deps/v8/src/flag-definitions.h

@ -203,7 +203,6 @@ DEFINE_bool(deopt, true, "support deoptimization")
DEFINE_bool(trace_deopt, false, "trace deoptimization")
// compiler.cc
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(always_full_compiler, false,
@ -372,6 +371,8 @@ DEFINE_bool(debug_script_collected_events, true,
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
DEFINE_string(gdbjit_dump_filter, "",
"dump only objects containing this substring")
//
// Debug only flags

37
deps/v8/src/frames.cc

@ -528,6 +528,17 @@ Address StandardFrame::GetExpressionAddress(int n) const {
}
Object* StandardFrame::GetExpression(Address fp, int index) {
return Memory::Object_at(GetExpressionAddress(fp, index));
}
Address StandardFrame::GetExpressionAddress(Address fp, int n) {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp + offset - n * kPointerSize;
}
int StandardFrame::ComputeExpressionsCount() const {
const int offset =
StandardFrameConstants::kExpressionsOffset + kPointerSize;
@ -646,6 +657,16 @@ bool JavaScriptFrame::IsConstructor() const {
}
int JavaScriptFrame::GetArgumentsLength() const {
// If there is an arguments adaptor frame get the arguments length from it.
if (has_adapted_arguments()) {
return Smi::cast(GetExpression(caller_fp(), 0))->value();
} else {
return GetNumberOfIncomingArguments();
}
}
Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
return function->unchecked_code();
@ -812,6 +833,22 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
}
int OptimizedFrame::GetInlineCount() {
ASSERT(is_optimized());
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
USE(opcode);
int frame_count = it.Next();
return frame_count;
}
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
ASSERT(is_optimized());

8
deps/v8/src/frames.h

@ -383,6 +383,7 @@ class StandardFrame: public StackFrame {
inline Object* GetExpression(int index) const;
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
static Object* GetExpression(Address fp, int index);
virtual void SetCallerFp(Address caller_fp);
@ -411,6 +412,7 @@ class StandardFrame: public StackFrame {
// Returns the address of the n'th expression stack element.
Address GetExpressionAddress(int n) const;
static Address GetExpressionAddress(Address fp, int n);
// Determines if the n'th expression stack element is in a stack
// handler or not. Requires traversing all handlers in this frame.
@ -483,6 +485,7 @@ class JavaScriptFrame: public StandardFrame {
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
inline bool has_adapted_arguments() const;
int GetArgumentsLength() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@ -495,6 +498,9 @@ class JavaScriptFrame: public StandardFrame {
// Determine the code for the frame.
virtual Code* unchecked_code() const;
// Returns the levels of inlining for this frame.
virtual int GetInlineCount() { return 1; }
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions);
@ -533,6 +539,8 @@ class OptimizedFrame : public JavaScriptFrame {
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
virtual int GetInlineCount();
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)

8
deps/v8/src/full-codegen.cc

@ -401,7 +401,7 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
// Adjust by a (parameter or local) base offset.
switch (slot->type()) {
case Slot::PARAMETER:
offset += (scope()->num_parameters() + 1) * kPointerSize;
offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
break;
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
@ -1106,7 +1106,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
__ Push(stmt->name());
__ Push(stmt->variable()->name());
__ push(result_register());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushCatchContext, 3);
@ -1114,7 +1114,11 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
context_register());
}
Scope* saved_scope = scope();
scope_ = stmt->scope();
ASSERT(scope_->declarations()->is_empty());
Visit(stmt->catch_block());
scope_ = saved_scope;
__ jmp(&done);
// Try block code. Sets up the exception handler chain.

16
deps/v8/src/full-codegen.h

@ -80,6 +80,7 @@ class FullCodeGenerator: public AstVisitor {
explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
info_(NULL),
scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
context_(NULL),
@ -531,23 +532,11 @@ class FullCodeGenerator: public AstVisitor {
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
Scope* scope() { return scope_; }
static Register result_register();
static Register context_register();
// Helper for calling an IC stub.
void EmitCallIC(Handle<Code> ic,
RelocInfo::Mode mode,
unsigned ast_id);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// or non NULL patch_site which is not activated indicates no inlined smi code
// and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic,
JumpPatchSite* patch_site,
unsigned ast_id);
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
@ -758,6 +747,7 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm_;
CompilationInfo* info_;
Scope* scope_;
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;

710
deps/v8/src/gdb-jit.cc

@ -34,16 +34,29 @@
#include "global-handles.h"
#include "messages.h"
#include "natives.h"
#include "scopeinfo.h"
namespace v8 {
namespace internal {
#ifdef __APPLE__
#define __MACH_O
class MachO;
class MachOSection;
typedef MachO DebugObject;
typedef MachOSection DebugSection;
#else
#define __ELF
class ELF;
class ELFSection;
typedef ELF DebugObject;
typedef ELFSection DebugSection;
#endif
class Writer BASE_EMBEDDED {
public:
explicit Writer(ELF* elf)
: elf_(elf),
explicit Writer(DebugObject* debug_object)
: debug_object_(debug_object),
position_(0),
capacity_(1024),
buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
@ -112,7 +125,7 @@ class Writer BASE_EMBEDDED {
}
}
ELF* elf() { return elf_; }
DebugObject* debug_object() { return debug_object_; }
byte* buffer() { return buffer_; }
@ -165,7 +178,7 @@ class Writer BASE_EMBEDDED {
return reinterpret_cast<T*>(&buffer_[offset]);
}
ELF* elf_;
DebugObject* debug_object_;
uintptr_t position_;
uintptr_t capacity_;
byte* buffer_;
@ -173,21 +186,120 @@ class Writer BASE_EMBEDDED {
class StringTable;
class ELFSection : public ZoneObject {
template<typename THeader>
class DebugSectionBase : public ZoneObject {
public:
struct Header {
uint32_t name;
uint32_t type;
uintptr_t flags;
uintptr_t address;
uintptr_t offset;
uintptr_t size;
uint32_t link;
uint32_t info;
uintptr_t alignment;
uintptr_t entry_size;
virtual ~DebugSectionBase() { }
virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
uintptr_t start = writer->position();
if (WriteBody(writer)) {
uintptr_t end = writer->position();
header->offset = start;
#if defined(__MACH_O)
header->addr = 0;
#endif
header->size = end - start;
}
}
virtual bool WriteBody(Writer* writer) {
return false;
}
typedef THeader Header;
};
struct MachOSectionHeader {
char sectname[16];
char segname[16];
#if defined(V8_TARGET_ARCH_IA32)
uint32_t addr;
uint32_t size;
#else
uint64_t addr;
uint64_t size;
#endif
uint32_t offset;
uint32_t align;
uint32_t reloff;
uint32_t nreloc;
uint32_t flags;
uint32_t reserved1;
uint32_t reserved2;
};
class MachOSection : public DebugSectionBase<MachOSectionHeader> {
public:
enum Type {
S_REGULAR = 0x0u,
S_ATTR_COALESCED = 0xbu,
S_ATTR_SOME_INSTRUCTIONS = 0x400u,
S_ATTR_DEBUG = 0x02000000u,
S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
};
MachOSection(const char* name,
const char* segment,
uintptr_t align,
uint32_t flags)
: name_(name),
segment_(segment),
align_(align),
flags_(flags) {
ASSERT(IsPowerOf2(align));
if (align_ != 0) {
align_ = WhichPowerOf2(align_);
}
}
virtual ~MachOSection() { }
virtual void PopulateHeader(Writer::Slot<Header> header) {
header->addr = 0;
header->size = 0;
header->offset = 0;
header->align = align_;
header->reloff = 0;
header->nreloc = 0;
header->flags = flags_;
header->reserved1 = 0;
header->reserved2 = 0;
memset(header->sectname, 0, sizeof(header->sectname));
memset(header->segname, 0, sizeof(header->segname));
ASSERT(strlen(name_) < sizeof(header->sectname));
ASSERT(strlen(segment_) < sizeof(header->segname));
strncpy(header->sectname, name_, sizeof(header->sectname));
strncpy(header->segname, segment_, sizeof(header->segname));
}
private:
const char* name_;
const char* segment_;
uintptr_t align_;
uint32_t flags_;
};
struct ELFSectionHeader {
uint32_t name;
uint32_t type;
uintptr_t flags;
uintptr_t address;
uintptr_t offset;
uintptr_t size;
uint32_t link;
uint32_t info;
uintptr_t alignment;
uintptr_t entry_size;
};
#if defined(__ELF)
class ELFSection : public DebugSectionBase<ELFSectionHeader> {
public:
enum Type {
TYPE_NULL = 0,
TYPE_PROGBITS = 1,
@ -252,15 +364,45 @@ class ELFSection : public ZoneObject {
header->entry_size = 0;
}
private:
const char* name_;
Type type_;
uintptr_t align_;
uint16_t index_;
};
#endif // defined(__ELF)
#if defined(__MACH_O)
class MachOTextSection : public MachOSection {
public:
MachOTextSection(uintptr_t align,
uintptr_t addr,
uintptr_t size)
: MachOSection("__text",
"__TEXT",
align,
MachOSection::S_REGULAR |
MachOSection::S_ATTR_SOME_INSTRUCTIONS |
MachOSection::S_ATTR_PURE_INSTRUCTIONS),
addr_(addr),
size_(size) { }
protected:
virtual void PopulateHeader(Writer::Slot<Header> header) {
MachOSection::PopulateHeader(header);
header->addr = addr_;
header->size = size_;
}
private:
uintptr_t addr_;
uintptr_t size_;
};
#endif // defined(__MACH_O)
#if defined(__ELF)
class FullHeaderELFSection : public ELFSection {
public:
FullHeaderELFSection(const char* name,
@ -349,8 +491,139 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
header->alignment = align_;
PopulateHeader(header);
}
#endif // defined(__ELF)
#if defined(__MACH_O)
class MachO BASE_EMBEDDED {
public:
MachO() : sections_(6) { }
uint32_t AddSection(MachOSection* section) {
sections_.Add(section);
return sections_.length() - 1;
}
void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
Writer::Slot<MachOHeader> header = WriteHeader(w);
uintptr_t load_command_start = w->position();
Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
code_start,
code_size);
WriteSections(w, cmd, header, load_command_start);
}
private:
struct MachOHeader {
uint32_t magic;
uint32_t cputype;
uint32_t cpusubtype;
uint32_t filetype;
uint32_t ncmds;
uint32_t sizeofcmds;
uint32_t flags;
#if defined(V8_TARGET_ARCH_X64)
uint32_t reserved;
#endif
};
struct MachOSegmentCommand {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
#if defined(V8_TARGET_ARCH_IA32)
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
uint32_t filesize;
#else
uint64_t vmaddr;
uint64_t vmsize;
uint64_t fileoff;
uint64_t filesize;
#endif
uint32_t maxprot;
uint32_t initprot;
uint32_t nsects;
uint32_t flags;
};
enum MachOLoadCommandCmd {
LC_SEGMENT_32 = 0x00000001u,
LC_SEGMENT_64 = 0x00000019u
};
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
#if defined(V8_TARGET_ARCH_IA32)
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
#elif defined(V8_TARGET_ARCH_X64)
header->magic = 0xFEEDFACFu;
header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
header->reserved = 0;
#else
#error Unsupported target architecture.
#endif
header->filetype = 0x1; // MH_OBJECT
header->ncmds = 1;
header->sizeofcmds = 0;
header->flags = 0;
return header;
}
Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
uintptr_t code_start,
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
#if defined(V8_TARGET_ARCH_IA32)
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
#endif
cmd->vmaddr = code_start;
cmd->vmsize = code_size;
cmd->fileoff = 0;
cmd->filesize = 0;
cmd->maxprot = 7;
cmd->initprot = 7;
cmd->flags = 0;
cmd->nsects = sections_.length();
memset(cmd->segname, 0, 16);
cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
cmd->nsects;
return cmd;
}
void WriteSections(Writer* w,
Writer::Slot<MachOSegmentCommand> cmd,
Writer::Slot<MachOHeader> header,
uintptr_t load_command_start) {
Writer::Slot<MachOSection::Header> headers =
w->CreateSlotsHere<MachOSection::Header>(sections_.length());
cmd->fileoff = w->position();
header->sizeofcmds = w->position() - load_command_start;
for (int section = 0; section < sections_.length(); ++section) {
sections_[section]->PopulateHeader(headers.at(section));
sections_[section]->WriteBody(headers.at(section), w);
}
cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
}
ZoneList<MachOSection*> sections_;
};
#endif // defined(__MACH_O)
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
ELF() : sections_(6) {
@ -596,7 +869,7 @@ class ELFSymbolTable : public ELFSection {
// String table for this symbol table should follow it in the section table.
StringTable* strtab =
static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0,
@ -640,6 +913,7 @@ class ELFSymbolTable : public ELFSection {
ZoneList<ELFSymbol> locals_;
ZoneList<ELFSymbol> globals_;
};
#endif // defined(__ELF)
class CodeDescription BASE_EMBEDDED {
@ -657,12 +931,14 @@ class CodeDescription BASE_EMBEDDED {
Code* code,
Handle<Script> script,
GDBJITLineInfo* lineinfo,
GDBJITInterface::CodeTag tag)
GDBJITInterface::CodeTag tag,
CompilationInfo* info)
: name_(name),
code_(code),
script_(script),
lineinfo_(lineinfo),
tag_(tag) {
tag_(tag),
info_(info) {
}
const char* name() const {
@ -677,6 +953,14 @@ class CodeDescription BASE_EMBEDDED {
return tag_;
}
CompilationInfo* info() const {
return info_;
}
bool IsInfoAvailable() const {
return info_ != NULL;
}
uintptr_t CodeStart() const {
return reinterpret_cast<uintptr_t>(code_->instruction_start());
}
@ -724,12 +1008,13 @@ class CodeDescription BASE_EMBEDDED {
Handle<Script> script_;
GDBJITLineInfo* lineinfo_;
GDBJITInterface::CodeTag tag_;
CompilationInfo* info_;
#ifdef V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
};
#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
ELF* elf,
int text_section_index) {
@ -754,14 +1039,42 @@ static void CreateSymbolsTable(CodeDescription* desc,
ELFSymbol::TYPE_FUNC,
text_section_index));
}
#endif // defined(__ELF)
class DebugInfoSection : public ELFSection {
class DebugInfoSection : public DebugSection {
public:
explicit DebugInfoSection(CodeDescription* desc)
: ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
#if defined(__ELF)
: ELFSection(".debug_info", TYPE_PROGBITS, 1),
#else
: MachOSection("__debug_info",
"__DWARF",
1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
desc_(desc) { }
// DWARF2 standard
enum DWARF2LocationOp {
DW_OP_reg0 = 0x50,
DW_OP_reg1 = 0x51,
DW_OP_reg2 = 0x52,
DW_OP_reg3 = 0x53,
DW_OP_reg4 = 0x54,
DW_OP_reg5 = 0x55,
DW_OP_reg6 = 0x56,
DW_OP_reg7 = 0x57,
DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
};
enum DWARF2Encoding {
DW_ATE_ADDRESS = 0x1,
DW_ATE_SIGNED = 0x5
};
bool WriteBody(Writer* w) {
uintptr_t cu_start = w->position();
Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
w->Write<uint16_t>(2); // DWARF version.
@ -773,6 +1086,123 @@ class DebugInfoSection : public ELFSection {
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
w->WriteULEB128(3);
w->Write<uint8_t>(kPointerSize);
w->WriteString("v8value");
if (desc_->IsInfoAvailable()) {
CompilationInfo* info = desc_->info();
ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
w->WriteULEB128(2);
w->WriteString(desc_->name());
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
#if defined(V8_TARGET_ARCH_IA32)
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif defined(V8_TARGET_ARCH_X64)
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
#else
#error Unsupported target architecture.
#endif
fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
int params = scope_info.number_of_parameters();
int slots = scope_info.number_of_stack_slots();
int context_slots = scope_info.number_of_context_slots();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int locals = scope_info.NumberOfLocals();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
*scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
w->WriteSLEB128(
JavaScriptFrameConstants::kLastParameterOffset +
kPointerSize * (params - param - 1));
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
EmbeddedVector<char, 256> buffer;
StringBuilder builder(buffer.start(), buffer.length());
for (int slot = 0; slot < slots; ++slot) {
w->WriteULEB128(current_abbreviation++);
builder.Reset();
builder.AddFormatted("slot%d", slot);
w->WriteString(builder.Finalize());
}
// See contexts.h for more information.
ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
ASSERT(Context::CLOSURE_INDEX == 0);
ASSERT(Context::PREVIOUS_INDEX == 1);
ASSERT(Context::EXTENSION_INDEX == 2);
ASSERT(Context::GLOBAL_INDEX == 3);
w->WriteULEB128(current_abbreviation++);
w->WriteString(".closure");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".previous");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".extension");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".global");
for (int context_slot = 0;
context_slot < context_slots;
++context_slot) {
w->WriteULEB128(current_abbreviation++);
builder.Reset();
builder.AddFormatted("context_slot%d", context_slot + internal_slots);
w->WriteString(builder.Finalize());
}
for (int local = 0; local < locals; ++local) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
*scope_info.LocalName(local)->ToCString(DISALLOW_NULLS));
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
w->WriteSLEB128(
JavaScriptFrameConstants::kLocal0Offset -
kPointerSize * local);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
{
w->WriteULEB128(current_abbreviation++);
w->WriteString("__function");
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
{
w->WriteULEB128(current_abbreviation++);
w->WriteString("__context");
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
w->WriteSLEB128(StandardFrameConstants::kContextOffset);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
}
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@ -782,13 +1212,28 @@ class DebugInfoSection : public ELFSection {
};
class DebugAbbrevSection : public ELFSection {
class DebugAbbrevSection : public DebugSection {
public:
DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
explicit DebugAbbrevSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
#else
: MachOSection("__debug_abbrev",
"__DWARF",
1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
desc_(desc) { }
// DWARF2 standard, figure 14.
enum DWARF2Tags {
DW_TAG_COMPILE_UNIT = 0x11
DW_TAG_FORMAL_PARAMETER = 0x05,
DW_TAG_POINTER_TYPE = 0xf,
DW_TAG_COMPILE_UNIT = 0x11,
DW_TAG_STRUCTURE_TYPE = 0x13,
DW_TAG_BASE_TYPE = 0x24,
DW_TAG_SUBPROGRAM = 0x2e,
DW_TAG_VARIABLE = 0x34
};
// DWARF2 standard, figure 16.
@ -799,23 +1244,55 @@ class DebugAbbrevSection : public ELFSection {
// DWARF standard, figure 17.
enum DWARF2Attribute {
DW_AT_LOCATION = 0x2,
DW_AT_NAME = 0x3,
DW_AT_BYTE_SIZE = 0xb,
DW_AT_STMT_LIST = 0x10,
DW_AT_LOW_PC = 0x11,
DW_AT_HIGH_PC = 0x12
DW_AT_HIGH_PC = 0x12,
DW_AT_ENCODING = 0x3e,
DW_AT_FRAME_BASE = 0x40,
DW_AT_TYPE = 0x49
};
// DWARF2 standard, figure 19.
enum DWARF2AttributeForm {
DW_FORM_ADDR = 0x1,
DW_FORM_BLOCK4 = 0x4,
DW_FORM_STRING = 0x8,
DW_FORM_DATA4 = 0x6
DW_FORM_DATA4 = 0x6,
DW_FORM_BLOCK = 0x9,
DW_FORM_DATA1 = 0xb,
DW_FORM_FLAG = 0xc,
DW_FORM_REF4 = 0x13
};
void WriteVariableAbbreviation(Writer* w,
int abbreviation_code,
bool has_value,
bool is_parameter) {
w->WriteULEB128(abbreviation_code);
w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
w->Write<uint8_t>(DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
if (has_value) {
w->WriteULEB128(DW_AT_TYPE);
w->WriteULEB128(DW_FORM_REF4);
w->WriteULEB128(DW_AT_LOCATION);
w->WriteULEB128(DW_FORM_BLOCK4);
}
w->WriteULEB128(0);
w->WriteULEB128(0);
}
bool WriteBody(Writer* w) {
w->WriteULEB128(1);
int current_abbreviation = 1;
bool extra_info = desc_->IsInfoAvailable();
ASSERT(desc_->IsLineInfoAvailable());
w->WriteULEB128(current_abbreviation++);
w->WriteULEB128(DW_TAG_COMPILE_UNIT);
w->Write<uint8_t>(DW_CHILDREN_NO);
w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
@ -826,16 +1303,101 @@ class DebugAbbrevSection : public ELFSection {
w->WriteULEB128(DW_FORM_DATA4);
w->WriteULEB128(0);
w->WriteULEB128(0);
w->WriteULEB128(0);
if (extra_info) {
CompilationInfo* info = desc_->info();
ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
int params = scope_info.number_of_parameters();
int slots = scope_info.number_of_stack_slots();
int context_slots = scope_info.number_of_context_slots();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int locals = scope_info.NumberOfLocals();
int total_children =
params + slots + context_slots + internal_slots + locals + 2;
// The extra duplication below seems to be necessary to keep
// gdb from getting upset on OSX.
w->WriteULEB128(current_abbreviation++); // Abbreviation code.
w->WriteULEB128(DW_TAG_SUBPROGRAM);
w->Write<uint8_t>(
total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
w->WriteULEB128(DW_FORM_ADDR);
w->WriteULEB128(DW_AT_HIGH_PC);
w->WriteULEB128(DW_FORM_ADDR);
w->WriteULEB128(DW_AT_FRAME_BASE);
w->WriteULEB128(DW_FORM_BLOCK4);
w->WriteULEB128(0);
w->WriteULEB128(0);
w->WriteULEB128(current_abbreviation++);
w->WriteULEB128(DW_TAG_STRUCTURE_TYPE);
w->Write<uint8_t>(DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_BYTE_SIZE);
w->WriteULEB128(DW_FORM_DATA1);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(0);
w->WriteULEB128(0);
for (int param = 0; param < params; ++param) {
WriteVariableAbbreviation(w, current_abbreviation++, true, true);
}
for (int slot = 0; slot < slots; ++slot) {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
for (int internal_slot = 0;
internal_slot < internal_slots;
++internal_slot) {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
for (int context_slot = 0;
context_slot < context_slots;
++context_slot) {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
for (int local = 0; local < locals; ++local) {
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
}
// The function.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
// The context.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
if (total_children != 0) {
w->WriteULEB128(0); // Terminate the sibling list.
}
}
w->WriteULEB128(0); // Terminate the table.
return true;
}
private:
CodeDescription* desc_;
};
class DebugLineSection : public ELFSection {
class DebugLineSection : public DebugSection {
public:
explicit DebugLineSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".debug_line", TYPE_PROGBITS, 1),
#else
: MachOSection("__debug_line",
"__DWARF",
1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
desc_(desc) { }
// DWARF2 standard, figure 34.
@ -992,8 +1554,7 @@ class DebugLineSection : public ELFSection {
#ifdef V8_TARGET_ARCH_X64
class UnwindInfoSection : public ELFSection {
class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription *desc);
virtual bool WriteBody(Writer *w);
@ -1079,8 +1640,13 @@ void UnwindInfoSection::WriteLength(Writer *w,
UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
{ }
#ifdef __ELF
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
#else
: MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
MachOSection::S_REGULAR),
#endif
desc_(desc) { }
int UnwindInfoSection::WriteCIE(Writer *w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
@ -1212,15 +1778,14 @@ bool UnwindInfoSection::WriteBody(Writer *w) {
#endif // V8_TARGET_ARCH_X64
static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
elf->AddSection(new DebugInfoSection(desc));
elf->AddSection(new DebugAbbrevSection);
elf->AddSection(new DebugLineSection(desc));
obj->AddSection(new DebugInfoSection(desc));
obj->AddSection(new DebugAbbrevSection(desc));
obj->AddSection(new DebugLineSection(desc));
}
#ifdef V8_TARGET_ARCH_X64
elf->AddSection(new UnwindInfoSection(desc));
obj->AddSection(new UnwindInfoSection(desc));
#endif
}
@ -1260,6 +1825,13 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(MaybeObject* object) {
object->Print();
fprintf(stdout, "\n");
}
#endif
}
@ -1283,17 +1855,23 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
}
static void RegisterCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry,
bool dump_if_enabled,
const char* name_hint) {
#if defined(DEBUG) && !defined(WIN32)
static int file_num = 0;
if (FLAG_gdbjit_dump) {
if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
static const char* kElfFilePrefix = "/tmp/elfdump";
static const char* kObjFileExt = ".o";
char file_name[64];
OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
kElfFilePrefix, file_num++, kObjFileExt);
OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
"%s%s%d%s",
kElfFilePrefix,
(name_hint != NULL) ? name_hint : "",
file_num++,
kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
@ -1327,7 +1905,18 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
#ifdef __MACH_O
MachO mach_o;
Writer w(&mach_o);
mach_o.AddSection(new MachOTextSection(kCodeAlignment,
desc->CodeStart(),
desc->CodeSize()));
CreateDWARFSections(desc, &mach_o);
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
ELF elf;
Writer w(&elf);
@ -1345,6 +1934,7 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
CreateDWARFSections(desc, &elf);
elf.Write(&w);
#endif
return CreateCodeEntry(w.buffer(), w.position());
}
@ -1393,7 +1983,8 @@ static GDBJITLineInfo* UntagLineInfo(void* ptr) {
void GDBJITInterface::AddCode(Handle<String> name,
Handle<Script> script,
Handle<Code> code) {
Handle<Code> code,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
// Force initialization of line_ends array.
@ -1401,9 +1992,9 @@ void GDBJITInterface::AddCode(Handle<String> name,
if (!name.is_null()) {
SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script);
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
}
@ -1450,7 +2041,8 @@ Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
void GDBJITInterface::AddCode(const char* name,
Code* code,
GDBJITInterface::CodeTag tag,
Script* script) {
Script* script,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex_);
@ -1465,7 +2057,8 @@ void GDBJITInterface::AddCode(const char* name,
script != NULL ? Handle<Script>(script)
: Handle<Script>(),
lineinfo,
tag);
tag,
info);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@ -1480,7 +2073,18 @@ void GDBJITInterface::AddCode(const char* name,
delete lineinfo;
e->value = entry;
RegisterCodeEntry(entry);
const char* name_hint = NULL;
bool should_dump = false;
if (FLAG_gdbjit_dump) {
if (strlen(FLAG_gdbjit_dump_filter) == 0) {
name_hint = name;
should_dump = true;
} else if (name != NULL) {
name_hint = strstr(name, FLAG_gdbjit_dump_filter);
should_dump = (name_hint != NULL);
}
}
RegisterCodeEntry(entry, should_dump, name_hint);
}
@ -1500,7 +2104,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
builder.AddFormatted(": code object %p", static_cast<void*>(code));
}
AddCode(builder.Finalize(), code, tag);
AddCode(builder.Finalize(), code, tag, NULL, NULL);
}

8
deps/v8/src/gdb-jit.h

@ -43,6 +43,8 @@
namespace v8 {
namespace internal {
class CompilationInfo;
#define CODE_TAGS_LIST(V) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@ -113,11 +115,13 @@ class GDBJITInterface: public AllStatic {
static void AddCode(const char* name,
Code* code,
CodeTag tag,
Script* script = NULL);
Script* script,
CompilationInfo* info);
static void AddCode(Handle<String> name,
Handle<Script> script,
Handle<Code> code);
Handle<Code> code,
CompilationInfo* info);
static void AddCode(CodeTag tag, String* name, Code* code);

7
deps/v8/src/handles.cc

@ -214,9 +214,10 @@ void NormalizeProperties(Handle<JSObject> object,
}
void NormalizeElements(Handle<JSObject> object) {
CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
object->NormalizeElements());
Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->NormalizeElements(),
NumberDictionary);
}

2
deps/v8/src/handles.h

@ -170,7 +170,7 @@ class HandleScope {
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties);
void NormalizeElements(Handle<JSObject> object);
Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(

4
deps/v8/src/heap.cc

@ -33,6 +33,7 @@
#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "liveobjectlist-inl.h"
@ -4664,6 +4665,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
if (isolate_->deoptimizer_data() != NULL) {
isolate_->deoptimizer_data()->Iterate(v);
}
#endif
v->Synchronize("debug");
isolate_->compilation_cache()->Iterate(v);

41
deps/v8/src/hydrogen-instructions.cc

@ -669,7 +669,7 @@ void HCallRuntime::PrintDataTo(StringStream* stream) {
}
void HClassOfTest::PrintDataTo(StringStream* stream) {
void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("class_of_test(");
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
@ -747,7 +747,7 @@ void HUnaryOperation::PrintDataTo(StringStream* stream) {
}
void HHasInstanceType::PrintDataTo(StringStream* stream) {
void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
case FIRST_JS_RECEIVER_TYPE:
@ -768,7 +768,7 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) {
}
void HTypeofIs::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->ToAsciiVector());
@ -1231,25 +1231,28 @@ Range* HShl::InferRange() {
void HCompare::PrintDataTo(StringStream* stream) {
void HCompareGeneric::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
HBinaryOperation::PrintDataTo(stream);
}
void HCompare::SetInputRepresentation(Representation r) {
void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
}
void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
input_representation_ = r;
if (r.IsTagged()) {
SetAllSideEffects();
ClearFlag(kUseGVN);
} else if (r.IsDouble()) {
if (r.IsDouble()) {
SetFlag(kDeoptimizeOnUndefined);
ClearAllSideEffects();
SetFlag(kUseGVN);
} else {
ClearAllSideEffects();
SetFlag(kUseGVN);
ASSERT(r.IsInteger32());
}
}
@ -1566,17 +1569,7 @@ HType HConstant::CalculateInferredType() {
}
HType HCompare::CalculateInferredType() {
return HType::Boolean();
}
HType HCompareObjectEq::CalculateInferredType() {
return HType::Boolean();
}
HType HUnaryPredicate::CalculateInferredType() {
HType HCompareGeneric::CalculateInferredType() {
return HType::Boolean();
}

287
deps/v8/src/hydrogen-instructions.h

@ -72,6 +72,7 @@ class LChunkBuilder;
V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
@ -89,11 +90,12 @@ class LChunkBuilder;
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampToUint8) \
V(ClassOfTest) \
V(Compare) \
V(CompareObjectEq) \
V(ClassOfTestAndBranch) \
V(CompareIDAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(CompareConstantEq) \
V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
V(DeleteProperty) \
@ -109,17 +111,17 @@ class LChunkBuilder;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndex) \
V(HasInstanceType) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
V(IsConstructCall) \
V(IsNull) \
V(IsObject) \
V(IsSmi) \
V(IsUndetectable) \
V(IsConstructCallAndBranch) \
V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@ -163,13 +165,12 @@ class LChunkBuilder;
V(StringCharFromCode) \
V(StringLength) \
V(Sub) \
V(Test) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(ToInt32) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(UseConst) \
@ -781,6 +782,7 @@ class HControlInstruction: public HInstruction {
public:
virtual HBasicBlock* SuccessorAt(int i) = 0;
virtual int SuccessorCount() = 0;
virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
virtual void PrintDataTo(StringStream* stream);
@ -815,12 +817,13 @@ class HTemplateControlInstruction: public HControlInstruction {
public:
int SuccessorCount() { return S; }
HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
int OperandCount() { return V; }
HValue* OperandAt(int i) { return inputs_[i]; }
protected:
void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
@ -869,6 +872,9 @@ class HDeoptimize: public HControlInstruction {
UNREACHABLE();
return NULL;
}
virtual void SetSuccessorAt(int i, HBasicBlock* block) {
UNREACHABLE();
}
void AddEnvironmentValue(HValue* value) {
values_.Add(NULL);
@ -922,18 +928,21 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
};
class HTest: public HUnaryControlInstruction {
class HBranch: public HUnaryControlInstruction {
public:
HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
HBranch(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
: HUnaryControlInstruction(value, true_target, false_target) {
ASSERT(true_target != NULL && false_target != NULL);
}
explicit HBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Test)
DECLARE_CONCRETE_INSTRUCTION(Branch)
};
@ -2520,43 +2529,58 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
};
class HCompare: public HBinaryOperation {
class HCompareGeneric: public HBinaryOperation {
public:
HCompare(HValue* left, HValue* right, Token::Value token)
HCompareGeneric(HValue* left, HValue* right, Token::Value token)
: HBinaryOperation(left, right), token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
SetAllSideEffects();
}
void SetInputRepresentation(Representation r);
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
return Representation::Tagged();
}
Representation GetInputRepresentation() const {
return input_representation_;
return Representation::Tagged();
}
Token::Value token() const { return token_; }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
virtual intptr_t Hashcode() {
return HValue::Hashcode() * 7 + token_;
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
private:
Token::Value token_;
};
class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
public:
HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
}
DECLARE_CONCRETE_INSTRUCTION(Compare)
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
protected:
virtual bool DataEquals(HValue* other) {
HCompare* comp = HCompare::cast(other);
return token_ == comp->token();
void SetInputRepresentation(Representation r);
Representation GetInputRepresentation() const {
return input_representation_;
}
virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
Representation input_representation_;
@ -2564,61 +2588,39 @@ class HCompare: public HBinaryOperation {
};
class HCompareObjectEq: public HBinaryOperation {
class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
public:
HCompareObjectEq(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
HCompareObjectEqAndBranch(HValue* left, HValue* right) {
SetOperandAt(0, left);
SetOperandAt(1, right);
}
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(CompareObjectEq)
protected:
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
};
class HCompareConstantEq: public HUnaryOperation {
class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
public:
HCompareConstantEq(HValue* left, int right, Token::Value op)
: HUnaryOperation(left), op_(op), right_(right) {
HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
: HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
ASSERT(op == Token::EQ_STRICT);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
Token::Value op() const { return op_; }
int right() const { return right_; }
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
virtual HType CalculateInferredType() { return HType::Boolean(); }
DECLARE_CONCRETE_INSTRUCTION(CompareConstantEq);
protected:
virtual bool DataEquals(HValue* other) {
HCompareConstantEq* other_instr = HCompareConstantEq::cast(other);
return (op_ == other_instr->op_ &&
right_ == other_instr->right_);
}
DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
private:
const Token::Value op_;
@ -2626,139 +2628,112 @@ class HCompareConstantEq: public HUnaryOperation {
};
class HUnaryPredicate: public HUnaryOperation {
class HIsNullAndBranch: public HUnaryControlInstruction {
public:
explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
HIsNullAndBranch(HValue* value, bool is_strict)
: HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
bool is_strict() const { return is_strict_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
};
class HIsNull: public HUnaryPredicate {
public:
HIsNull(HValue* value, bool is_strict)
: HUnaryPredicate(value), is_strict_(is_strict) { }
bool is_strict() const { return is_strict_; }
DECLARE_CONCRETE_INSTRUCTION(IsNull)
protected:
virtual bool DataEquals(HValue* other) {
HIsNull* b = HIsNull::cast(other);
return is_strict_ == b->is_strict();
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
private:
bool is_strict_;
};
class HIsObject: public HUnaryPredicate {
class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
explicit HIsObjectAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(IsObject)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
protected:
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
};
class HIsSmi: public HUnaryPredicate {
class HIsSmiAndBranch: public HUnaryControlInstruction {
public:
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
explicit HIsSmiAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmi)
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
class HIsUndetectable: public HUnaryPredicate {
class HIsUndetectableAndBranch: public HUnaryControlInstruction {
public:
explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
explicit HIsUndetectableAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
protected:
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
};
class HIsConstructCall: public HTemplateInstruction<0> {
class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
public:
HIsConstructCall() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall)
protected:
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
};
class HHasInstanceType: public HUnaryPredicate {
class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
public:
HHasInstanceType(HValue* value, InstanceType type)
: HUnaryPredicate(value), from_(type), to_(type) { }
HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
: HUnaryPredicate(value), from_(from), to_(to) {
HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
: HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
: HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
}
InstanceType from() { return from_; }
InstanceType to() { return to_; }
virtual bool EmitAtUses() {
return !HasSideEffects() && !HasMultipleUses();
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType)
protected:
virtual bool DataEquals(HValue* other) {
HHasInstanceType* b = HHasInstanceType::cast(other);
return (from_ == b->from()) && (to_ == b->to());
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
class HHasCachedArrayIndex: public HUnaryPredicate {
class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
public:
explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
explicit HHasCachedArrayIndexAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
protected:
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
};
@ -2780,42 +2755,40 @@ class HGetCachedArrayIndex: public HUnaryOperation {
};
class HClassOfTest: public HUnaryPredicate {
class HClassOfTestAndBranch: public HUnaryControlInstruction {
public:
HClassOfTest(HValue* value, Handle<String> class_name)
: HUnaryPredicate(value), class_name_(class_name) { }
HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
: HUnaryControlInstruction(value, NULL, NULL),
class_name_(class_name) { }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
Handle<String> class_name() const { return class_name_; }
protected:
virtual bool DataEquals(HValue* other) {
HClassOfTest* b = HClassOfTest::cast(other);
return class_name_.is_identical_to(b->class_name_);
}
private:
Handle<String> class_name_;
};
class HTypeofIs: public HUnaryPredicate {
class HTypeofIsAndBranch: public HUnaryControlInstruction {
public:
HTypeofIs(HValue* value, Handle<String> type_literal)
: HUnaryPredicate(value), type_literal_(type_literal) { }
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(TypeofIs)
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
protected:
virtual bool DataEquals(HValue* other) {
HTypeofIs* b = HTypeofIs::cast(other);
return type_literal_.is_identical_to(b->type_literal_);
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
private:

423
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

17
deps/v8/src/hydrogen.h

@ -498,6 +498,12 @@ class AstContext {
// the instruction as value.
virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
// Finishes the current basic block and materialize a boolean for
// value context, nothing for effect, generate a branch for test context.
// Call this function in tail position in the Visit functions for
// expressions.
virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
@ -532,6 +538,7 @@ class EffectContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
virtual void ReturnControl(HControlInstruction* instr, int ast_id);
};
@ -544,6 +551,7 @@ class ValueContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
virtual void ReturnControl(HControlInstruction* instr, int ast_id);
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@ -566,6 +574,7 @@ class TestContext: public AstContext {
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
virtual void ReturnControl(HControlInstruction* instr, int ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@ -706,6 +715,10 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
int join_id);
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@ -779,10 +792,6 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* loop_entry,
BreakAndContinueInfo* break_info);
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
int join_id);
// Create a back edge in the flow graph. body_exit is the predecessor
// block and loop_entry is the successor block. loop_successor is the
// block where control flow exits the loop normally (e.g., via failure of

6
deps/v8/src/ia32/assembler-ia32.h

@ -835,7 +835,7 @@ class Assembler : public AssemblerBase {
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
RelocInfo::Mode rmode,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Jumps
@ -990,7 +990,9 @@ class Assembler : public AssemblerBase {
void Print();
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
}
// Mark address of the ExitJSFrame code.
void RecordJSReturn();

11
deps/v8/src/ia32/code-stubs-ia32.cc

@ -554,12 +554,10 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(eax);
// the argument is now on top.
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
__ push(eax); // the operand
__ push(Immediate(Smi::FromInt(op_)));
__ push(Immediate(Smi::FromInt(mode_)));
__ push(Immediate(Smi::FromInt(operand_type_)));
__ push(ecx); // Push return address.
@ -567,8 +565,7 @@ void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
masm->isolate()), 4, 1);
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}

14
deps/v8/src/ia32/code-stubs-ia32.h

@ -62,16 +62,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
}
UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@ -89,8 +84,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingUnaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),

29
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -348,6 +348,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -461,6 +464,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@ -587,7 +593,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@ -600,6 +606,27 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers ebp and esp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
}
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {

208
deps/v8/src/ia32/full-codegen-ia32.cc

@ -78,16 +78,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ test(eax, Immediate(delta_to_patch_site));
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
info_emitted_ = true;
#endif
} else {
__ nop(); // Signals no inlined code.
}
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance distance) {
@ -121,6 +123,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -140,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
@ -152,7 +155,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = scope()->num_stack_slots();
int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@ -166,7 +169,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@ -183,7 +186,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@ -213,11 +216,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
__ SafePush(Immediate(Smi::FromInt(num_parameters)));
// Arguments to ArgumentsAccessStub and/or New...:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@ -342,7 +346,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ mov(esp, ebp);
__ pop(ebp);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning is large enough
@ -754,7 +758,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
}
}
}
@ -827,7 +831,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site, clause->CompareId());
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@ -1120,7 +1125,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
EmitCallIC(ic, mode, AstNode::kNoNumber);
__ call(ic, mode);
}
@ -1200,7 +1205,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ SafeSet(eax, Immediate(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@ -1222,7 +1227,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
} else if (slot->type() == Slot::LOOKUP) {
@ -1368,7 +1373,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
__ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1601,14 +1606,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1629,7 +1634,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site, expr->id());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
// Smi case.
@ -1712,8 +1718,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(edx);
BinaryOpStub stub(op, mode);
// NULL signals no inlined smi code.
EmitCallIC(stub.GetCode(), NULL, expr->id());
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@ -1753,7 +1760,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
break;
}
case KEYED_PROPERTY: {
@ -1776,7 +1783,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
break;
}
}
@ -1800,7 +1807,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -1893,7 +1900,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -1933,7 +1940,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -1984,7 +1991,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2017,7 +2024,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2056,7 +2063,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
@ -2193,7 +2200,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
// for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@ -2211,7 +2218,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
// Push Global receiver.
@ -2599,7 +2606,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
__ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
__ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@ -2611,7 +2618,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
__ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
__ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@ -3527,6 +3534,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Load the function into eax.
VisitForAccumulatorValue(args->at(0));
// Prepare for the test.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// Test for strict mode function.
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, if_true);
// Test for native function.
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, if_true);
// Not native or strict-mode function.
__ jmp(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@ -3557,7 +3597,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ call(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@ -3696,7 +3736,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax);
}
@ -3816,7 +3856,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in eax.
@ -3849,7 +3890,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3866,7 +3907,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@ -3894,7 +3935,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL &&
@ -4089,7 +4130,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@ -4148,58 +4190,6 @@ Register FullCodeGenerator::context_register() {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
RelocInfo::Mode mode,
unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(isolate()->counters()->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
break;
case Code::STORE_IC:
__ IncrementCounter(isolate()->counters()->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
default:
break;
}
__ call(ic, mode, ast_id);
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
JumpPatchSite* patch_site,
unsigned ast_id) {
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET, ast_id);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
@ -4212,19 +4202,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
if (scope()->is_global_scope()) {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ push(Immediate(Smi::FromInt(0)));
} else if (scope()->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
} else if (declaration_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
// Fetch it from the context.
__ push(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
ASSERT(scope()->is_function_scope());
ASSERT(declaration_scope->is_function_scope());
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@ -4236,12 +4227,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
__ pop(edx);
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
__ add(edx, Operand(edx)); // Convert to smi.
__ mov(Operand(esp, 0), edx);
__ SmiTag(edx);
__ push(edx);
// Store result register while executing finally block.
__ push(result_register());
}
@ -4249,15 +4240,12 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ mov(edx, Operand(esp, 0));
__ sar(edx, 1); // Convert smi to int.
__ pop(edx);
__ SmiUntag(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
__ mov(Operand(esp, 0), edx);
// And return.
__ ret(0);
__ jmp(Operand(edx));
}

2
deps/v8/src/ia32/ic-ia32.cc

@ -528,6 +528,8 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch));
__ j(greater_equal, slow_case);

257
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1367,7 +1367,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->representation();
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
@ -1380,7 +1380,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
if (instr->hydrogen()->value()->type().IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
@ -1474,32 +1474,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
Label unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
Label done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ mov(ToRegister(result), factory()->true_value());
__ j(cc, &done, Label::kNear);
__ bind(&unordered);
__ mov(ToRegister(result), factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@ -1520,23 +1494,9 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
__ mov(result, factory()->true_value());
Label done;
__ j(equal, &done, Label::kNear);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Operand right = ToOperand(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1545,19 +1505,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label done;
__ cmp(left, instr->hydrogen()->right());
__ mov(result, factory()->true_value());
__ j(equal, &done, Label::kNear);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1568,43 +1515,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
__ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
__ mov(result, factory()->true_value());
Label done;
__ j(equal, &done, Label::kNear);
__ mov(result, factory()->false_value());
__ bind(&done);
} else {
Label true_value, false_value, done;
__ j(equal, &true_value, Label::kNear);
__ cmp(reg, factory()->undefined_value());
__ j(equal, &true_value, Label::kNear);
__ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value, Label::kNear);
__ bind(&false_value);
__ mov(result, factory()->false_value());
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ mov(result, factory()->true_value());
__ bind(&done);
}
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@ -1658,25 +1568,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
__ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&is_true);
__ mov(result, factory()->true_value());
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -1692,19 +1583,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
Operand input = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label done;
__ mov(result, factory()->true_value());
__ JumpIfSmi(input, &done, Label::kNear);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->InputAt(0));
@ -1716,26 +1594,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label false_label, done;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(input, &false_label, Label::kNear);
__ mov(result, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(result, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(zero, &false_label, Label::kNear);
__ mov(result, factory()->true_value());
__ jmp(&done);
__ bind(&false_label);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -1752,7 +1610,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
static InstanceType TestType(HHasInstanceType* instr) {
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@ -1761,7 +1619,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
static Condition BranchCondition(HHasInstanceType* instr) {
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@ -1772,24 +1630,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label done, is_false;
__ JumpIfSmi(input, &is_false, Label::kNear);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())),
&is_false, Label::kNear);
__ mov(result, factory()->true_value());
__ jmp(&done, Label::kNear);
__ bind(&is_false);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -1819,21 +1659,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ mov(result, factory()->true_value());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
Label done;
__ j(zero, &done, Label::kNear);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@ -1904,29 +1729,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
Label done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
__ j(not_equal, &is_false, Label::kNear);
__ bind(&is_true);
__ mov(result, factory()->true_value());
__ jmp(&done, Label::kNear);
__ bind(&is_false);
__ mov(result, factory()->false_value());
__ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -3882,14 +3684,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
__ test(ToRegister(input), Immediate(kSmiTagMask));
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->InputAt(0);
__ test(ToRegister(input), Immediate(kSmiTagMask));
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
@ -3941,8 +3743,8 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, instr->hydrogen()->target());
Operand operand = ToOperand(instr->InputAt(0));
__ cmp(operand, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
@ -4189,29 +3991,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
input,
instr->type_literal());
__ j(final_branch_condition, &true_label, Label::kNear);
__ bind(&false_label);
__ mov(result, factory()->false_value());
__ jmp(&done, Label::kNear);
__ bind(&true_label);
__ mov(result, factory()->true_value());
__ bind(&done);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -4292,24 +4071,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label done;
EmitIsConstructCall(result);
__ j(equal, &true_label, Label::kNear);
__ mov(result, factory()->false_value());
__ jmp(&done, Label::kNear);
__ bind(&true_label);
__ mov(result, factory()->true_value());
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());

222
deps/v8/src/ia32/lithium-ia32.cc

@ -267,12 +267,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
void LTypeofIs::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@ -344,13 +338,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@ -985,18 +972,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
HTest* test = HTest::cast(current);
instr->set_hydrogen_value(test->value());
HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@ -1041,84 +1017,17 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
ASSERT(!v->HasSideEffects());
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(),
TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
HValue* left = compare->left();
HValue* right = compare->right();
Representation r = compare->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
} else {
ASSERT(r.IsDouble());
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right));
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsIsUndetectable()) {
HIsUndetectable* compare = HIsUndetectable::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
TempRegister());
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
} else if (v->IsCompareObjectEq()) {
HCompareObjectEq* compare = HCompareObjectEq::cast(v);
return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else if (v->IsConstant()) {
if (v->EmitAtUses()) {
ASSERT(v->IsConstant());
ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
} else {
Abort("Undefined compare before branch");
return NULL;
}
return new LBranch(UseRegisterAtStart(v));
}
@ -1489,85 +1398,85 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
return new LCmpIDAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
LOperand* right = UseAtStart(instr->right());
return new LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEq(
HCompareConstantEq* instr) {
LOperand* left = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LCmpConstantEq(left));
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value));
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = instr->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LIsObject(value));
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
return DefineAsRegister(new LIsSmi(value));
return new LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsUndetectable(value));
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr ->value()->representation().IsTagged());
return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LHasInstanceType(value));
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
@ -1580,20 +1489,20 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LHasCachedArrayIndex(value));
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseTempRegister(instr->value());
return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
}
@ -1631,7 +1540,7 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
Use(instr->length())));
UseAtStart(instr->length())));
}
@ -1724,7 +1633,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckNonSmi(value));
}
@ -1745,13 +1654,13 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@ -2207,13 +2116,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall);
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
return new LIsConstructCallAndBranch(TempRegister());
}

187
deps/v8/src/ia32/lithium-ia32.h

@ -71,15 +71,11 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
@ -97,9 +93,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@ -107,15 +101,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@ -167,7 +156,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@ -226,7 +214,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@ -456,16 +443,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
void SetBranchTargets(int true_block_id, int false_block_id) {
true_block_id_ = true_block_id;
false_block_id_ = false_block_id;
}
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
int true_block_id_;
int false_block_id_;
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
};
@ -567,23 +553,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@ -592,7 +561,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@ -617,17 +586,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@ -640,17 +598,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCmpConstantEq(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
@ -659,20 +606,7 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
bool is_strict() const { return hydrogen()->is_strict(); }
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@ -684,7 +618,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@ -692,16 +626,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@ -715,17 +639,6 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@ -733,22 +646,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsUndetectable(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@ -763,17 +666,6 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
};
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@ -783,7 +675,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -800,17 +692,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@ -823,13 +704,6 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@ -841,20 +715,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
public:
LClassOfTest(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
virtual void PrintDataTo(StringStream* stream);
};
class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@ -865,7 +725,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -879,7 +739,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@ -1015,7 +875,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@ -2034,21 +1894,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream);
};
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@ -2056,7 +1901,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }

11
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1766,17 +1766,14 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
mov(dst, esi);
}
// We should not have found a with or catch context by walking the context
// chain (i.e., the static scope chain and runtime context chain do not
// agree). A variable occurring in such a scope should have slot type
// LOOKUP and not CONTEXT.
// We should not have found a with context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
Check(not_equal, "Variable resolved to with context.");
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
Check(not_equal, "Variable resolved to catch context.");
}
}

79
deps/v8/src/ic.cc

@ -956,7 +956,7 @@ MaybeObject* LoadIC::Load(State state,
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) {
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
@ -1097,16 +1097,6 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
String* KeyedLoadIC::GetStubNameForCache(IC::State ic_state) {
if (ic_state == MONOMORPHIC) {
return isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
} else {
ASSERT(ic_state == MEGAMORPHIC);
return isolate()->heap()->KeyedLoadElementPolymorphic_symbol();
}
}
MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedLoadFastElementStub().TryGetCode();
@ -1230,10 +1220,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
LookupForRead(*object, *name, &lookup);
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
if (!lookup.IsProperty() && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
@ -1636,18 +1624,14 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
StrictModeFlag strict_mode,
Code* generic_stub) {
State ic_state = target()->ic_state();
Code* monomorphic_stub;
// Always compute the MONOMORPHIC stub, even if the MEGAMORPHIC stub ends up
// being used. This is necessary because the megamorphic stub needs to have
// access to more information than what is stored in the receiver map in some
// cases (external arrays need the array type from the MONOMORPHIC stub).
MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
is_store,
strict_mode,
generic_stub);
if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
Code* monomorphic_stub;
MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
is_store,
strict_mode,
generic_stub);
if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
return monomorphic_stub;
}
ASSERT(target() != generic_stub);
@ -1698,9 +1682,9 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
}
// Build the MEGAMORPHIC stub.
Code* stub;
maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
&handler_ics,
strict_mode);
MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
&handler_ics,
strict_mode);
if (!maybe_stub->To(&stub)) return maybe_stub;
MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
if (maybe_update->IsFailure()) return maybe_update;
@ -1716,22 +1700,7 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
ASSERT(string_stub() != NULL);
return string_stub();
} else if (receiver_map->has_external_array_elements()) {
// Determine the array type from the default MONOMORPHIC already generated
// stub. There is no other way to determine the type of the external array
// directly from the receiver type.
Code::Kind kind = this->kind();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
NORMAL,
strict_mode);
String* monomorphic_name = GetStubNameForCache(MONOMORPHIC);
Object* maybe_default_stub = receiver_map->FindInCodeCache(monomorphic_name,
flags);
if (maybe_default_stub->IsUndefined()) {
return generic_stub;
}
Code* default_stub = Code::cast(maybe_default_stub);
Map* first_map = default_stub->FindFirstMap();
return GetExternalArrayStubWithoutMapCheck(first_map->elements_kind());
return GetExternalArrayStubWithoutMapCheck(receiver_map->elements_kind());
} else if (receiver_map->has_fast_elements()) {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetFastElementStubWithoutMapCheck(is_js_array);
@ -1747,7 +1716,8 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
Code* generic_stub) {
Code* result = NULL;
if (receiver->HasFastElements() ||
receiver->HasExternalArrayElements()) {
receiver->HasExternalArrayElements() ||
receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
receiver, is_store, strict_mode);
@ -1759,16 +1729,6 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
}
String* KeyedStoreIC::GetStubNameForCache(IC::State ic_state) {
if (ic_state == MONOMORPHIC) {
return isolate()->heap()->KeyedStoreElementMonomorphic_symbol();
} else {
ASSERT(ic_state == MEGAMORPHIC);
return isolate()->heap()->KeyedStoreElementPolymorphic_symbol();
}
}
MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedStoreFastElementStub(is_js_array).TryGetCode();
@ -1856,6 +1816,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
stub = non_strict_arguments_stub();
} else if (!force_generic) {
if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
HandleScope scope(isolate());
MaybeObject* maybe_stub = ComputeStub(receiver,
true,
strict_mode,
@ -2333,15 +2294,15 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
HandleScope scope(isolate);
Handle<Object> operand = args.at<Object>(0);
int key = args.smi_at(1);
Token::Value op = static_cast<Token::Value>(args.smi_at(2));
Token::Value op = static_cast<Token::Value>(args.smi_at(1));
UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
UnaryOpIC::TypeInfo previous_type =
static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
type = UnaryOpIC::ComputeNewType(type, previous_type);
UnaryOpStub stub(key, type);
UnaryOpStub stub(op, mode, type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {

6
deps/v8/src/ic.h

@ -358,8 +358,6 @@ class KeyedIC: public IC {
virtual Code::Kind kind() const = 0;
virtual String* GetStubNameForCache(IC::State ic_state) = 0;
MaybeObject* ComputeStub(JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode,
@ -426,8 +424,6 @@ class KeyedLoadIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
virtual String* GetStubNameForCache(IC::State ic_state);
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
@ -581,8 +577,6 @@ class KeyedStoreIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
virtual String* GetStubNameForCache(IC::State ic_state);
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,

5
deps/v8/src/isolate.cc

@ -1855,11 +1855,6 @@ void Isolate::Exit() {
}
void Isolate::ResetEagerOptimizingData() {
compilation_cache_->ResetEagerOptimizingData();
}
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);

4
deps/v8/src/isolate.h

@ -332,6 +332,8 @@ class HashMap;
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, random_seed, 2) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
@ -978,8 +980,6 @@ class Isolate {
}
#endif
void ResetEagerOptimizingData();
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }

24
deps/v8/src/log.cc

@ -1362,18 +1362,14 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
int Logger::GetActiveProfilerModules() {
int result = PROFILER_MODULE_NONE;
if (profiler_ != NULL && !profiler_->paused()) {
result |= PROFILER_MODULE_CPU;
}
return result;
bool Logger::IsProfilerPaused() {
return profiler_ == NULL || profiler_->paused();
}
void Logger::PauseProfiler(int flags, int tag) {
void Logger::PauseProfiler() {
if (!log_->IsEnabled()) return;
if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
if (profiler_ != NULL) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
@ -1388,18 +1384,12 @@ void Logger::PauseProfiler(int flags, int tag) {
--logging_nesting_;
}
}
if (tag != 0) {
UncheckedIntEvent("close-tag", tag);
}
}
void Logger::ResumeProfiler(int flags, int tag) {
void Logger::ResumeProfiler() {
if (!log_->IsEnabled()) return;
if (tag != 0) {
UncheckedIntEvent("open-tag", tag);
}
if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
if (profiler_ != NULL) {
if (cpu_profiler_nesting_++ == 0) {
++logging_nesting_;
if (FLAG_prof_lazy) {
@ -1421,7 +1411,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
PauseProfiler(PROFILER_MODULE_CPU, 0);
PauseProfiler();
}

6
deps/v8/src/log.h

@ -280,9 +280,9 @@ class Logger {
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
void PauseProfiler(int flags, int tag);
void ResumeProfiler(int flags, int tag);
int GetActiveProfilerModules();
void PauseProfiler();
void ResumeProfiler();
bool IsProfilerPaused();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.

57
deps/v8/src/mark-compact.cc

@ -1424,6 +1424,12 @@ void MarkCompactCollector::MarkLiveObjects() {
// reachable from the weak roots.
ProcessExternalMarking();
// Object literal map caches reference symbols (cache keys) and maps
// (cache values). At this point still useful maps have already been
// marked. Mark the keys for the alive values before we process the
// symbol table.
ProcessMapCaches();
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
@ -1452,6 +1458,57 @@ void MarkCompactCollector::MarkLiveObjects() {
}
void MarkCompactCollector::ProcessMapCaches() {
Object* raw_context = heap()->global_contexts_list_;
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
if (context->IsMarked()) {
HeapObject* raw_map_cache =
HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
// A map cache may be reachable from the stack. In this case
// it's already transitively marked and it's too late to clean
// up its parts.
if (!raw_map_cache->IsMarked() &&
raw_map_cache != heap()->undefined_value()) {
MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
int existing_elements = map_cache->NumberOfElements();
int used_elements = 0;
for (int i = MapCache::kElementsStartIndex;
i < map_cache->length();
i += MapCache::kEntrySize) {
Object* raw_key = map_cache->get(i);
if (raw_key == heap()->undefined_value() ||
raw_key == heap()->null_value()) continue;
STATIC_ASSERT(MapCache::kEntrySize == 2);
Object* raw_map = map_cache->get(i + 1);
if (raw_map->IsHeapObject() &&
HeapObject::cast(raw_map)->IsMarked()) {
++used_elements;
} else {
// Delete useless entries with unmarked maps.
ASSERT(raw_map->IsMap());
map_cache->set_null_unchecked(heap(), i);
map_cache->set_null_unchecked(heap(), i + 1);
}
}
if (used_elements == 0) {
context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
} else {
// Note: we don't actually shrink the cache here to avoid
// extra complexity during GC. We rely on subsequent cache
// usages (EnsureCapacity) to do this.
map_cache->ElementsRemoved(existing_elements - used_elements);
MarkObject(map_cache);
}
}
}
// Move to next element in the list.
raw_context = context->get(Context::NEXT_CONTEXT_LINK);
}
ProcessMarkingStack();
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();

4
deps/v8/src/mark-compact.h

@ -306,6 +306,10 @@ class MarkCompactCollector {
// flag on the marking stack.
void RefillMarkingStack();
// After reachable maps have been marked process per context object
// literal map caches removing unmarked entries.
void ProcessMapCaches();
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);

13
deps/v8/src/mips/code-stubs-mips.cc

@ -166,7 +166,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the fixed slots.
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
@ -1847,19 +1846,13 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Argument is in a0 and v0 at this point, so we can overwrite a0.
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ li(a2, Operand(Smi::FromInt(MinorKey())));
__ li(a1, Operand(Smi::FromInt(op_)));
__ li(a2, Operand(Smi::FromInt(op_)));
__ li(a1, Operand(Smi::FromInt(mode_)));
__ li(a0, Operand(Smi::FromInt(operand_type_)));
__ Push(v0, a2, a1, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
masm->isolate()),
4,
1);
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}

16
deps/v8/src/mips/code-stubs-mips.h

@ -61,18 +61,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
}
UnaryOpStub(
int key,
UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@ -90,8 +83,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),

5
deps/v8/src/mips/deoptimizer-mips.cc

@ -78,6 +78,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
UNIMPLEMENTED();
}
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
}

237
deps/v8/src/mips/full-codegen-mips.cc

@ -101,16 +101,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
__ andi(at, reg, delta_to_patch_site % kImm16Mask);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
__ andi(at, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
info_emitted_ = true;
#endif
} else {
__ nop(); // Signals no inlined code.
}
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
MacroAssembler* masm_;
Label patch_site_;
@ -137,6 +139,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -154,13 +157,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = scope()->num_parameters() * kPointerSize;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
int locals_count = scope()->num_stack_slots();
int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
if (locals_count > 0) {
@ -180,7 +183,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in a1.
@ -196,7 +199,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// passed to us. It's saved in the stack and kept live in cp.
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@ -228,10 +231,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ mov(a3, a1);
}
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ Addu(a2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
__ li(a1, Operand(Smi::FromInt(num_parameters)));
__ Push(a3, a2, a1);
// Arguments to ArgumentsAccessStub:
@ -348,7 +352,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@ -716,10 +720,14 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a 'with'.
__ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ Check(eq, "Unexpected declaration in current context.",
a1, Operand(cp));
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
__ Check(ne, "Declaration in with context.",
a1, Operand(t0));
__ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
__ Check(ne, "Declaration in catch context.",
a1, Operand(t0));
}
if (mode == Variable::CONST) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@ -790,7 +798,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ CallWithAstId(ic);
// Value in v0 is ignored (declarations are statements).
}
}
@ -865,7 +873,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site, clause->CompareId());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
@ -1164,7 +1173,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, mode, AstNode::kNoNumber);
__ CallWithAstId(ic, mode);
}
@ -1244,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ li(a0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Branch(done);
}
}
@ -1266,7 +1275,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
} else if (slot->type() == Slot::LOOKUP) {
@ -1412,7 +1421,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1656,7 +1665,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1665,7 +1674,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1693,7 +1702,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site, expr->id());
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@ -1774,7 +1784,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ mov(a0, result_register());
__ pop(a1);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), NULL, expr->id());
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@ -1814,7 +1826,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ CallWithAstId(ic);
break;
}
case KEYED_PROPERTY: {
@ -1827,7 +1839,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ CallWithAstId(ic);
break;
}
}
@ -1852,7 +1864,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -1873,17 +1885,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&skip, ne, a1, Operand(t0));
__ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
__ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ lw(a2, ContextOperand(a1, slot->index()));
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Branch(&skip, ne, a2, Operand(t0));
__ sw(result_register(), ContextOperand(a1, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(a3, result_register()); // Preserve the stored value in v0.
__ RecordWrite(a1, Operand(offset), a3, a2);
break;
}
case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(result_register());
__ li(a0, Operand(slot->var()->name()));
@ -1960,7 +1962,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2012,7 +2014,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2065,7 +2067,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ CallWithAstId(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2099,7 +2101,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2139,7 +2141,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(a1);
// Push the receiver of the enclosing function and do runtime call.
__ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(a1);
// Push the strict mode flag.
__ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
@ -2280,7 +2283,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
// for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@ -2298,7 +2301,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ lw(a1, GlobalObjectOperand());
__ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
__ Push(v0, a1); // Function, receiver.
@ -2685,7 +2688,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in a0.
VisitForAccumulatorValue(args->at(0));
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
__ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(v0);
@ -2697,7 +2700,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
__ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
__ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -3596,6 +3599,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Load the function into v0.
VisitForAccumulatorValue(args->at(0));
// Prepare for the test.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// Test for strict mode function.
__ lw(a1, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
__ And(at, a1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ Branch(if_true, ne, at, Operand(zero_reg));
// Test for native function.
__ And(at, a1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(if_true, ne, at, Operand(zero_reg));
// Not native or strict-mode function.
__ Branch(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@ -3628,7 +3664,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
EmitCallIC(ic, mode, expr->id());
__ CallWithAstId(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -3771,7 +3807,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@ -3882,7 +3918,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in v0.
@ -3914,7 +3951,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3932,7 +3969,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3956,7 +3993,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ CallWithAstId(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL &&
@ -4153,7 +4190,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site, expr->id());
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
@ -4212,70 +4250,6 @@ Register FullCodeGenerator::context_register() {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
RelocInfo::Mode mode,
unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1, a1, a2);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1, a1, a2);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
default:
break;
}
if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
__ Call(ic, mode);
} else {
ASSERT(mode == RelocInfo::CODE_TARGET);
mode = RelocInfo::CODE_TARGET_WITH_ID;
__ CallWithAstId(ic, mode, ast_id);
}
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
JumpPatchSite* patch_site,
unsigned ast_id) {
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1, a1, a2);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1, a1, a2);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
default:
break;
}
if (ast_id == kNoASTId) {
__ Call(ic, RelocInfo::CODE_TARGET);
} else {
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
}
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ sw(value, MemOperand(fp, frame_offset));
@ -4287,6 +4261,27 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ li(at, Operand(Smi::FromInt(0)));
} else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
__ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(at);
}
// ----------------------------------------------------------------------------
// Non-local control flow support.

21
deps/v8/src/mips/macro-assembler-mips.cc

@ -2088,10 +2088,12 @@ void MacroAssembler::CallWithAstId(Handle<Code> code,
Condition cond,
Register r1,
const Operand& r2) {
ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
ASSERT(ast_id != kNoASTId);
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
ast_id_for_reloc_info_ = ast_id;
ASSERT(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
ast_id_for_reloc_info_ = ast_id;
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
@ -3715,17 +3717,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
// cannot be allowed to destroy the context in esi).
Move(dst, cp);
}
// We should not have found a 'with' context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (emit_debug_code()) {
lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
Check(eq, "Yo dawg, I heard you liked function contexts "
"so I put function contexts in all your contexts",
dst, Operand(t9));
}
}

4
deps/v8/src/mips/macro-assembler-mips.h

@ -181,8 +181,8 @@ DECLARE_NOTARGET_PROTOTYPE(Ret)
#undef DECLARE_BRANCH_PROTOTYPES
void CallWithAstId(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg));

35
deps/v8/src/mirror-debugger.js

@ -1243,13 +1243,17 @@ const kFrameDetailsLocalCountIndex = 4;
const kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6;
const kFrameDetailsAtReturnIndex = 7;
const kFrameDetailsDebuggerFrameIndex = 8;
const kFrameDetailsFlagsIndex = 8;
const kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1;
const kFrameDetailsNameValueSize = 2;
const kFrameDetailsFlagDebuggerFrame = 1;
const kFrameDetailsFlagOptimizedFrame = 2;
const kFrameDetailsFlagInlinedFrame = 4;
/**
* Wrapper for the frame details information retreived from the VM. The frame
* details from the VM is an array with the following content. See runtime.cc
@ -1262,7 +1266,7 @@ const kFrameDetailsNameValueSize = 2;
* 5: Source position
* 6: Construct call
* 7: Is at return
* 8: Debugger frame
* 8: Flags (debugger frame, optimized frame, inlined frame)
* Arguments name, value
* Locals name, value
* Return value if any
@ -1308,7 +1312,22 @@ FrameDetails.prototype.isAtReturn = function() {
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsDebuggerFrameIndex];
var f = kFrameDetailsFlagDebuggerFrame;
return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
}
FrameDetails.prototype.isOptimizedFrame = function() {
%CheckExecutionState(this.break_id_);
var f = kFrameDetailsFlagOptimizedFrame;
return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
}
FrameDetails.prototype.isInlinedFrame = function() {
%CheckExecutionState(this.break_id_);
var f = kFrameDetailsFlagInlinedFrame;
return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
}
@ -1447,6 +1466,16 @@ FrameMirror.prototype.isDebuggerFrame = function() {
};
FrameMirror.prototype.isOptimizedFrame = function() {
return this.details_.isOptimizedFrame();
};
FrameMirror.prototype.isInlinedFrame = function() {
return this.details_.isInlinedFrame();
};
FrameMirror.prototype.argumentCount = function() {
return this.details_.argumentCount();
};

50
deps/v8/src/objects.cc

@ -3031,11 +3031,33 @@ MaybeObject* JSObject::DeleteFastElement(uint32_t index) {
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
}
int length = IsJSArray()
uint32_t length = static_cast<uint32_t>(
IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: backing_store->length();
if (index < static_cast<uint32_t>(length)) {
: backing_store->length());
if (index < length) {
backing_store->set_the_hole(index);
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
((index > 0 && backing_store->get(index - 1) == hole) ||
(index + 1 < length && backing_store->get(index + 1) == hole))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
if (backing_store->get(i) != hole) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
if (4 * num_used <= backing_store->length()) {
MaybeObject* result = NormalizeElements();
if (result->IsFailure()) return result;
}
}
}
return heap->true_value();
}
@ -6287,19 +6309,6 @@ void JSFunction::MarkForLazyRecompilation() {
}
uint32_t JSFunction::SourceHash() {
uint32_t hash = 0;
Object* script = shared()->script();
if (!script->IsUndefined()) {
Object* source = Script::cast(script)->source();
if (source->IsUndefined()) hash = String::cast(source)->Hash();
}
hash ^= ComputeIntegerHash(shared()->start_position_and_type());
hash += ComputeIntegerHash(shared()->end_position());
return hash;
}
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@ -6950,7 +6959,7 @@ Map* Code::FindFirstMap() {
}
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
disasm::NameConverter converter;
@ -7098,10 +7107,6 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
}
}
#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
// Identify kind of code.
const char* Code::Kind2String(Kind kind) {
@ -7192,6 +7197,9 @@ void Code::Disassemble(const char* name, FILE* out) {
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", PropertyType2String(type()));
}
if (is_call_stub() || is_keyed_call_stub()) {
PrintF(out, "argc = %d\n", arguments_count());
}
}
if ((name != NULL) && (name[0] != '\0')) {
PrintF(out, "name = %s\n", name);

9
deps/v8/src/objects.h

@ -3418,7 +3418,7 @@ class DeoptimizationInputData: public FixedArray {
// Casting.
static inline DeoptimizationInputData* cast(Object* obj);
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputDataPrint(FILE* out);
#endif
@ -3946,6 +3946,10 @@ class Map: public HeapObject {
kind <= JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
}
inline bool has_dictionary_elements() {
return elements_kind() == JSObject::DICTIONARY_ELEMENTS;
}
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@ -4924,9 +4928,6 @@ class JSFunction: public JSObject {
// recompilation.
inline bool IsMarkedForLazyRecompilation();
// Compute a hash code for the source code of this function.
uint32_t SourceHash();
// Check whether or not this function is inlineable.
bool IsInlineable();

126
deps/v8/src/parser.cc

@ -411,6 +411,7 @@ Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
return result;
}
// ----------------------------------------------------------------------------
// Target is a support class to facilitate manipulation of the
// Parser's target_stack_ (the stack of potential 'break' and
@ -1301,13 +1302,14 @@ VariableProxy* Parser::Declare(Handle<String> name,
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
if (top_scope_->is_function_scope() ||
top_scope_->is_strict_mode_eval_scope()) {
Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_function_scope() ||
declaration_scope->is_strict_mode_eval_scope()) {
// Declare the variable in the function scope.
var = top_scope_->LocalLookup(name);
var = declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
var = top_scope_->DeclareLocal(name, mode);
var = declaration_scope->DeclareLocal(name, mode);
} else {
// The name was declared before; check for conflicting
// re-declarations. If the previous declaration was a const or the
@ -1323,7 +1325,7 @@ VariableProxy* Parser::Declare(Handle<String> name,
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
top_scope_->SetIllegalRedeclaration(expression);
declaration_scope->SetIllegalRedeclaration(expression);
}
}
}
@ -1344,14 +1346,18 @@ VariableProxy* Parser::Declare(Handle<String> name,
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
// Runtime::DeclareContextSlot() calls.
VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
VariableProxy* proxy = declaration_scope->NewUnresolved(name, false);
declaration_scope->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
// For global const variables we bind the proxy to a variable.
if (mode == Variable::CONST && top_scope_->is_global_scope()) {
if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
var = new(zone()) Variable(declaration_scope,
name,
Variable::CONST,
true,
kind);
}
// If requested and we have a local variable, bind the proxy to the variable
@ -1407,7 +1413,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
top_scope_->ForceEagerCompilation();
top_scope_->DeclarationScope()->ForceEagerCompilation();
// Compute the function template for the native function.
v8::Handle<v8::FunctionTemplate> fun_template =
@ -1485,8 +1491,8 @@ Block* Parser::ParseVariableStatement(bool* ok) {
// VariableStatement ::
// VariableDeclarations ';'
Expression* dummy; // to satisfy the ParseVariableDeclarations() signature
Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
Handle<String> ignore;
Block* result = ParseVariableDeclarations(true, &ignore, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
}
@ -1504,18 +1510,19 @@ bool Parser::IsEvalOrArguments(Handle<String> string) {
// to initialize it properly. This mechanism is used for the parsing
// of 'for-in' loops.
Block* Parser::ParseVariableDeclarations(bool accept_IN,
Expression** var,
Handle<String>* out,
bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
Variable::Mode mode = Variable::VAR;
bool is_const = false;
Scope* declaration_scope = top_scope_->DeclarationScope();
if (peek() == Token::VAR) {
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
Consume(Token::CONST);
if (top_scope_->is_strict_mode()) {
if (declaration_scope->is_strict_mode()) {
ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
@ -1540,18 +1547,18 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
//
// Create new block with one expected declaration.
Block* block = new(zone()) Block(NULL, 1, true);
VariableProxy* last_var = NULL; // the last variable declared
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
if (fni_ != NULL) fni_->Enter();
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
Handle<String> name = ParseIdentifier(CHECK_OK);
name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@ -1569,11 +1576,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// If we have a const declaration, in an inner scope, the proxy is always
// bound to the declared variable (independent of possibly surrounding with
// statements).
last_var = Declare(name, mode, NULL,
is_const /* always bound for CONST! */,
CHECK_OK);
Declare(name, mode, NULL, is_const /* always bound for CONST! */,
CHECK_OK);
nvars++;
if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessageAt(scanner().location(), "too_many_variables",
Vector<const char*>::empty());
*ok = false;
@ -1589,10 +1595,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
//
// var v; v = x;
//
// In particular, we need to re-lookup 'v' as it may be a
// different 'v' than the 'v' in the declaration (if we are inside
// a 'with' statement that makes a object property with name 'v'
// visible).
// In particular, we need to re-lookup 'v' (in top_scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
//
// However, note that const declarations are different! A const
// declaration of the form:
@ -1607,6 +1613,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
int position = -1;
if (peek() == Token::ASSIGN) {
@ -1647,7 +1654,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
if (top_scope_->is_global_scope()) {
if (initialization_scope->is_global_scope()) {
// Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
// We have at least 1 parameter.
@ -1670,8 +1677,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
arguments->Add(NewNumberLiteral(
top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
StrictModeFlag flag = initialization_scope->is_strict_mode()
? kStrictMode
: kNonStrictMode;
arguments->Add(NewNumberLiteral(flag));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@ -1708,8 +1717,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// the top context for variables). Sigh...
if (value != NULL) {
Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
bool in_with = is_const ? false : inside_with();
VariableProxy* proxy =
initialization_scope->NewUnresolved(name, in_with);
Assignment* assignment =
new(zone()) Assignment(op, last_var, value, position);
new(zone()) Assignment(op, proxy, value, position);
if (block) {
block->AddStatement(new(zone()) ExpressionStatement(assignment));
}
@ -1718,10 +1730,10 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
if (fni_ != NULL) fni_->Leave();
} while (peek() == Token::COMMA);
if (!is_const && nvars == 1) {
// We have a single, non-const variable.
ASSERT(last_var != NULL);
*var = last_var;
// If there was a single non-const declaration, return it in the output
// parameter for possible use by for/in.
if (nvars == 1 && !is_const) {
*out = name;
}
return block;
@ -1895,7 +1907,9 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
if (!top_scope_->is_function_scope()) {
Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
Handle<String> type = isolate()->factory()->illegal_return_symbol();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new(zone()) ExpressionStatement(throw_error);
@ -1922,7 +1936,7 @@ Block* Parser::WithHelper(Expression* obj, ZoneStringList* labels, bool* ok) {
Statement* stat;
{ Target target(&this->target_stack_, &collector);
with_nesting_level_++;
top_scope_->RecordWithStatement();
top_scope_->DeclarationScope()->RecordWithStatement();
stat = ParseStatement(labels, CHECK_OK);
with_nesting_level_--;
}
@ -2082,6 +2096,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// block. Since we don't know yet if there will be a finally block, we
// always collect the targets.
TargetCollector catch_collector;
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
Handle<String> name;
if (tok == Token::CATCH) {
@ -2108,10 +2124,16 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TargetCollector inner_collector;
{ Target target(&this->target_stack_, &catch_collector);
{ Target target(&this->target_stack_, &inner_collector);
++with_nesting_level_;
top_scope_->RecordWithStatement();
catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
if (top_scope_->is_strict_mode()) {
catch_scope->EnableStrictMode();
}
catch_variable = catch_scope->DeclareLocal(name, Variable::VAR);
Scope* saved_scope = top_scope_;
top_scope_ = catch_scope;
inner_body = ParseBlock(NULL, CHECK_OK);
--with_nesting_level_;
top_scope_ = saved_scope;
}
}
@ -2145,19 +2167,28 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// 'try { try B0 catch B1 } finally B2'
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
TryCatchStatement* statement =
new(zone()) TryCatchStatement(try_block, name, catch_block);
new(zone()) TryCatchStatement(try_block,
catch_scope,
catch_variable,
catch_block);
statement->set_escaping_targets(try_collector.targets());
try_block = new(zone()) Block(NULL, 1, false);
try_block->AddStatement(statement);
catch_block = NULL;
catch_block = NULL; // Clear to indicate it's been handled.
}
TryStatement* result = NULL;
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
result =
new(zone()) TryCatchStatement(try_block, name, catch_block);
new(zone()) TryCatchStatement(try_block,
catch_scope,
catch_variable,
catch_block);
} else {
ASSERT(finally_block != NULL);
result = new(zone()) TryFinallyStatement(try_block, finally_block);
@ -2230,10 +2261,12 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
Expression* each = NULL;
Handle<String> name;
Block* variable_statement =
ParseVariableDeclarations(false, &each, CHECK_OK);
if (peek() == Token::IN && each != NULL) {
ParseVariableDeclarations(false, &name, CHECK_OK);
if (peek() == Token::IN && !name.is_null()) {
VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
ForInStatement* loop = new(zone()) ForInStatement(labels);
Target target(&this->target_stack_, loop);
@ -2901,8 +2934,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
switch (peek()) {
case Token::THIS: {
Consume(Token::THIS);
VariableProxy* recv = top_scope_->receiver();
result = recv;
result = new(zone()) VariableProxy(top_scope_->receiver());
break;
}
@ -3762,7 +3794,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
top_scope_->ForceEagerCompilation();
top_scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForSymbol(name);

6
deps/v8/src/parser.h

@ -436,7 +436,7 @@ class Parser {
const char* message,
Vector<Handle<String> > args);
protected:
private:
// Limit on number of function parameters is chosen arbitrarily.
// Code::Flags uses only the low 17 bits of num-parameters to
// construct a hashable id, so if more than 2^17 are allowed, this
@ -484,7 +484,9 @@ class Parser {
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneStringList* labels, bool* ok);
Block* ParseVariableStatement(bool* ok);
Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
Block* ParseVariableDeclarations(bool accept_IN,
Handle<String>* out,
bool* ok);
Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
bool* ok);
IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);

232
deps/v8/src/platform-solaris.cc

@ -88,6 +88,7 @@ double ceiling(double x) {
}
static Mutex* limit_mutex = NULL;
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@ -96,6 +97,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
}
@ -145,6 +147,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
ScopedLock lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@ -407,7 +412,6 @@ static void* ThreadEntry(void* arg) {
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@ -587,78 +591,172 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
static pthread_t vm_tid_ = 0;
static pthread_t GetThreadID() {
return pthread_self();
}
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
if (vm_tid_ != GetThreadID()) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent();
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = Top::current_vm_state();
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
active_sampler_->SampleStack(sample);
active_sampler_->Tick(sample);
sampler->SampleStack(sample);
sampler->Tick(sample);
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData() : vm_tid_(GetThreadID()) {}
pthread_t vm_tid() const { return vm_tid_; }
private:
pthread_t vm_tid_;
};
class SignalSender : public Thread {
public:
enum SleepInterval {
FULL_INTERVAL,
HALF_INTERVAL
HALF_INTERVAL,
FULL_INTERVAL
};
explicit PlatformData(Sampler* sampler)
: sampler_(sampler),
signal_handler_installed_(false),
vm_tgid_(getpid()),
signal_sender_launched_(false) {
explicit SignalSender(int interval)
: Thread("SignalSender"),
interval_(interval) {}
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->Start();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
instance_->Join();
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
void SignalSender() {
while (sampler_->IsActive()) {
if (rate_limiter_.SuspendIfNecessary()) continue;
if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
SendProfilingSignal();
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
if (cpu_profiling_enabled && !signal_handler_installed_) {
InstallSignalHandler();
} else if (!cpu_profiling_enabled && signal_handler_installed_) {
RestoreSignalHandler();
}
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
return;
}
Sleep(HALF_INTERVAL);
RuntimeProfiler::NotifyTick();
if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
return;
}
Sleep(HALF_INTERVAL);
} else {
if (sampler_->IsProfiling()) SendProfilingSignal();
if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
if (cpu_profiling_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
this)) {
return;
}
}
if (runtime_profiler_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
return;
}
}
Sleep(FULL_INTERVAL);
}
}
}
void SendProfilingSignal() {
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
if (!sampler->isolate()->IsInitialized()) return;
sampler->isolate()->runtime_profiler()->NotifyTick();
}
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(vm_tid_, SIGPROF);
pthread_kill(tid, SIGPROF);
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = sampler_->interval_ * 1000 - 100;
useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
@ -673,22 +771,22 @@ class Sampler::PlatformData : public Malloced {
USE(result);
}
Sampler* sampler_;
bool signal_handler_installed_;
struct sigaction old_signal_handler_;
int vm_tgid_;
bool signal_sender_launched_;
pthread_t signal_sender_thread_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
};
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
static void* SenderEntry(void* arg) {
Sampler::PlatformData* data =
reinterpret_cast<Sampler::PlatformData*>(arg);
data->SignalSender();
return 0;
}
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = OS::CreateMutex();
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
Sampler::Sampler(Isolate* isolate, int interval)
@ -697,63 +795,27 @@ Sampler::Sampler(Isolate* isolate, int interval)
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData(this);
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!data_->signal_sender_launched_);
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
// There can only be one active sampler at the time on POSIX
// platforms.
ASSERT(!IsActive());
vm_tid_ = GetThreadID();
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
data_->signal_handler_installed_ =
sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
// Start a thread that sends SIGPROF signal to VM thread.
// Sending the signal ourselves instead of relying on itimer provides
// much better accuracy.
SetActive(true);
if (pthread_create(
&data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
data_->signal_sender_launched_ = true;
}
// Set this sampler as the active sampler.
active_sampler_ = this;
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
// Wait for signal sender termination (it will exit after setting
// active_ to false).
if (data_->signal_sender_launched_) {
Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
pthread_join(data_->signal_sender_thread_, NULL);
data_->signal_sender_launched_ = false;
}
// Restore old signal handler
if (data_->signal_handler_installed_) {
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false;
}
// This sampler is no longer the active sampler.
active_sampler_ = NULL;
}
#endif // ENABLE_LOGGING_AND_PROFILING

9
deps/v8/src/prettyprinter.cc

@ -203,7 +203,7 @@ void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Visit(node->try_block());
Print(" catch (");
const bool quote = false;
PrintLiteral(node->name(), quote);
PrintLiteral(node->variable()->name(), quote);
Print(") ");
Visit(node->catch_block());
}
@ -856,8 +856,9 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
const bool quote = false;
PrintLiteralIndented("CATCHVAR", node->name(), quote);
PrintLiteralWithModeIndented("CATCHVAR",
node->variable(),
node->variable()->name());
PrintIndentedVisit("CATCH", node->catch_block());
}
@ -1244,7 +1245,7 @@ void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TagScope tag(this, "TryCatchStatement");
{ AttributesScope attributes(this);
AddAttribute("variable", stmt->name());
AddAttribute("variable", stmt->variable()->name());
}
Visit(stmt->try_block());
Visit(stmt->catch_block());

108
deps/v8/src/profile-generator.cc

@ -1635,7 +1635,8 @@ HeapObject *const V8HeapExplorer::kGcRootsObject =
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
: snapshot_(snapshot),
: heap_(Isolate::Current()->heap()),
snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
filler_(NULL) {
@ -1725,10 +1726,14 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
: "",
children_count,
retainers_count);
} else if (object->IsFixedArray() || object->IsByteArray()) {
} else if (object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
object->IsByteArray() ||
object->IsExternalArray()) {
const char* tag = objects_tags_.GetTag(object);
return AddEntry(object,
HeapEntry::kArray,
"",
tag != NULL ? tag : "",
children_count,
retainers_count);
} else if (object->IsHeapNumber()) {
@ -1836,15 +1841,13 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
HeapEntry* entry = GetEntry(obj);
if (entry == NULL) return; // No interest in this object.
bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
// We need to reference JS global objects from snapshot's root.
// We use JSGlobalProxy because this is what embedder (e.g. browser)
// uses for the global object.
JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
SetRootShortcutReference(proxy->map()->prototype());
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
ExtractClosureReferences(js_obj, entry);
@ -1852,7 +1855,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
SetPropertyReference(
obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
@ -1860,39 +1863,49 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
HEAP->prototype_symbol(), proto_or_map,
heap_->prototype_symbol(), proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(
obj, entry,
HEAP->prototype_symbol(), js_fun->prototype());
heap_->prototype_symbol(), js_fun->prototype());
}
}
SetInternalReference(js_fun, entry,
"shared", js_fun->shared(),
JSFunction::kSharedFunctionInfoOffset);
TagObject(js_fun->unchecked_context(), "(context)");
SetInternalReference(js_fun, entry,
"context", js_fun->unchecked_context(),
JSFunction::kContextOffset);
TagObject(js_fun->literals(), "(function literals)");
SetInternalReference(js_fun, entry,
"literals", js_fun->literals(),
JSFunction::kLiteralsOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
"properties", js_obj->properties(),
JSObject::kPropertiesOffset);
TagObject(js_obj->elements(), "(object elements)");
SetInternalReference(obj, entry,
"elements", js_obj->elements(),
JSObject::kElementsOffset);
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsString()) {
if (obj->IsConsString()) {
ConsString* cs = ConsString::cast(obj);
SetInternalReference(obj, entry, 1, cs->first());
SetInternalReference(obj, entry, 2, cs->second());
}
extract_indexed_refs = false;
} else if (obj->IsGlobalContext()) {
Context* context = Context::cast(obj);
TagObject(context->jsfunction_result_caches(),
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->map_cache(), "(context map cache)");
TagObject(context->data(), "(context data)");
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@ -1901,6 +1914,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"constructor", map->constructor(),
Map::kConstructorOffset);
if (!map->instance_descriptors()->IsEmpty()) {
TagObject(map->instance_descriptors(), "(map descriptors)");
SetInternalReference(obj, entry,
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
@ -1908,9 +1922,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
SetInternalReference(obj, entry,
@ -1919,16 +1930,61 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"code", shared->unchecked_code(),
SharedFunctionInfo::kCodeOffset);
TagObject(shared->scope_info(), "(function scope info)");
SetInternalReference(obj, entry,
"scope_info", shared->scope_info(),
SharedFunctionInfo::kScopeInfoOffset);
SetInternalReference(obj, entry,
"instance_class_name", shared->instance_class_name(),
SharedFunctionInfo::kInstanceClassNameOffset);
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else {
} else if (obj->IsScript()) {
Script* script = Script::cast(obj);
SetInternalReference(obj, entry,
"source", script->source(),
Script::kSourceOffset);
SetInternalReference(obj, entry,
"name", script->name(),
Script::kNameOffset);
SetInternalReference(obj, entry,
"data", script->data(),
Script::kDataOffset);
SetInternalReference(obj, entry,
"context_data", script->context_data(),
Script::kContextOffset);
TagObject(script->line_ends(), "(script line ends)");
SetInternalReference(obj, entry,
"line_ends", script->line_ends(),
Script::kLineEndsOffset);
} else if (obj->IsDescriptorArray()) {
DescriptorArray* desc_array = DescriptorArray::cast(obj);
if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
Object* content_array =
desc_array->get(DescriptorArray::kContentArrayIndex);
TagObject(content_array, "(map descriptor content)");
SetInternalReference(obj, entry,
"content", content_array,
FixedArray::OffsetOfElementAt(
DescriptorArray::kContentArrayIndex));
}
} else if (obj->IsCodeCache()) {
CodeCache* code_cache = CodeCache::cast(obj);
TagObject(code_cache->default_cache(), "(default code cache)");
SetInternalReference(obj, entry,
"default_cache", code_cache->default_cache(),
CodeCache::kDefaultCacheOffset);
TagObject(code_cache->normal_type_cache(), "(code type cache)");
SetInternalReference(obj, entry,
"type_cache", code_cache->normal_type_cache(),
CodeCache::kNormalTypeCacheOffset);
} else if (obj->IsCode()) {
Code* code = Code::cast(obj);
TagObject(code->unchecked_relocation_info(), "(code relocation info)");
TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
}
if (extract_indexed_refs) {
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
@ -2086,7 +2142,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
HEAP->IterateRoots(&extractor, VISIT_ALL);
heap_->IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@ -2241,6 +2297,18 @@ void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
}
void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
if (obj->IsHeapObject() &&
!obj->IsOddball() &&
obj != heap_->raw_unchecked_empty_byte_array() &&
obj != heap_->raw_unchecked_empty_fixed_array() &&
obj != heap_->raw_unchecked_empty_fixed_double_array() &&
obj != heap_->raw_unchecked_empty_descriptor_array()) {
objects_tags_.SetTag(obj, tag);
}
}
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {

2
deps/v8/src/profile-generator.h

@ -973,9 +973,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;

2
deps/v8/src/rewriter.cc

@ -218,7 +218,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ASSERT(function != NULL);
Scope* scope = function->scope();
ASSERT(scope != NULL);
if (scope->is_function_scope()) return true;
if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {

143
deps/v8/src/runtime-profiler.cc

@ -43,32 +43,6 @@ namespace v8 {
namespace internal {
class PendingListNode : public Malloced {
public:
explicit PendingListNode(JSFunction* function);
~PendingListNode() { Destroy(); }
PendingListNode* next() const { return next_; }
void set_next(PendingListNode* node) { next_ = node; }
Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
// If the function is garbage collected before we've had the chance
// to optimize it the weak handle will be null.
bool IsValid() { return !function_.is_null(); }
// Returns the number of microseconds this node has been pending.
int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
private:
void Destroy();
static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
PendingListNode* next_;
Handle<Object> function_; // Weak handle.
int64_t start_;
};
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
@ -80,33 +54,10 @@ static const int kSamplerThresholdMin = 1;
static const int kSamplerThresholdDelta = 1;
static const int kSamplerThresholdSizeFactorInit = 3;
static const int kSamplerThresholdSizeFactorMin = 1;
static const int kSamplerThresholdSizeFactorDelta = 1;
static const int kSizeLimit = 1500;
PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
GlobalHandles* global_handles = Isolate::Current()->global_handles();
function_ = global_handles->Create(function);
start_ = OS::Ticks();
global_handles->MakeWeak(function_.location(), this, &WeakCallback);
}
void PendingListNode::Destroy() {
if (!IsValid()) return;
GlobalHandles* global_handles = Isolate::Current()->global_handles();
global_handles->Destroy(function_.location());
function_= Handle<Object>::null();
}
void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
reinterpret_cast<PendingListNode*>(data)->Destroy();
}
Atomic32 RuntimeProfiler::state_ = 0;
// TODO(isolates): Create the semaphore lazily and clean it up when no
// longer required.
@ -125,16 +76,8 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
sampler_threshold_(kSamplerThresholdInit),
sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
sampler_ticks_until_threshold_adjustment_(
kSamplerTicksBetweenThresholdAdjustment),
js_ratio_(0),
sampler_window_position_(0),
optimize_soon_list_(NULL),
state_window_position_(0),
state_window_ticks_(0) {
state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
state_counts_[IN_JS_STATE] = 0;
STATIC_ASSERT(IN_NON_JS_STATE == 0);
memset(state_window_, 0, sizeof(state_window_));
kSamplerTicksBetweenThresholdAdjustment),
sampler_window_position_(0) {
ClearSampleBuffer();
}
@ -148,16 +91,13 @@ void RuntimeProfiler::GlobalSetup() {
}
void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
void RuntimeProfiler::Optimize(JSFunction* function) {
ASSERT(function->IsOptimizable());
if (FLAG_trace_opt) {
PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
PrintF("[marking ");
function->PrintName();
PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
PrintF(" for recompilation");
if (delay > 0) {
PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
}
PrintF("]\n");
}
@ -243,20 +183,6 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
PendingListNode* current = optimize_soon_list_;
while (current != NULL) {
PendingListNode* next = current->next();
if (current->IsValid()) {
Handle<JSFunction> function = current->function();
int delay = current->Delay();
if (function->IsOptimizable()) {
Optimize(*function, true, delay);
}
}
delete current;
current = next;
}
optimize_soon_list_ = NULL;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@ -303,24 +229,9 @@ void RuntimeProfiler::OptimizeNow() {
: 1;
int threshold = sampler_threshold_ * threshold_size_factor;
int current_js_ratio = NoBarrier_Load(&js_ratio_);
// Adjust threshold depending on the ratio of time spent
// in JS code.
if (current_js_ratio < 20) {
// If we spend less than 20% of the time in JS code,
// do not optimize.
continue;
} else if (current_js_ratio < 75) {
// Below 75% of time spent in JS code, only optimize very
// frequently used functions.
threshold *= 3;
}
if (LookupSample(function) >= threshold) {
Optimize(function, false, 0);
isolate_->compilation_cache()->MarkForEagerOptimizing(
Handle<JSFunction>(function));
Optimize(function);
}
}
@ -333,40 +244,8 @@ void RuntimeProfiler::OptimizeNow() {
}
void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
if (!function->IsOptimizable()) return;
PendingListNode* node = new PendingListNode(function);
node->set_next(optimize_soon_list_);
optimize_soon_list_ = node;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
SamplerState old_state = state_window_[state_window_position_];
state_counts_[old_state]--;
state_window_[state_window_position_] = current_state;
state_counts_[current_state]++;
ASSERT(IsPowerOf2(kStateWindowSize));
state_window_position_ = (state_window_position_ + 1) &
(kStateWindowSize - 1);
// Note: to calculate correct ratio we have to track how many valid
// ticks are actually in the state window, because on profiler
// startup this number can be less than the window size.
state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
state_window_ticks_);
}
#endif
void RuntimeProfiler::NotifyTick() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Record state sample.
SamplerState state = IsSomeIsolateInJS()
? IN_JS_STATE
: IN_NON_JS_STATE;
UpdateStateRatio(state);
isolate_->stack_guard()->RequestRuntimeProfilerTick();
#endif
}
@ -424,7 +303,6 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
// to get the right count of active isolates.
NoBarrier_AtomicIncrement(&state_, 1);
semaphore_->Signal();
isolate->ResetEagerOptimizingData();
#endif
}
@ -471,15 +349,8 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int kNonJSTicksThreshold = 100;
if (RuntimeProfiler::IsSomeIsolateInJS()) {
non_js_ticks_ = 0;
} else {
if (non_js_ticks_ < kNonJSTicksThreshold) {
++non_js_ticks_;
} else {
return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
if (!RuntimeProfiler::IsSomeIsolateInJS()) {
return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
#endif
return false;

23
deps/v8/src/runtime-profiler.h

@ -37,7 +37,6 @@ namespace internal {
class Isolate;
class JSFunction;
class Object;
class PendingListNode;
class Semaphore;
class RuntimeProfiler {
@ -52,7 +51,6 @@ class RuntimeProfiler {
}
void OptimizeNow();
void OptimizeSoon(JSFunction* function);
void NotifyTick();
@ -106,7 +104,7 @@ class RuntimeProfiler {
static void HandleWakeUp(Isolate* isolate);
void Optimize(JSFunction* function, bool eager, int delay);
void Optimize(JSFunction* function);
void AttemptOnStackReplacement(JSFunction* function);
@ -118,31 +116,16 @@ class RuntimeProfiler {
void AddSample(JSFunction* function, int weight);
#ifdef ENABLE_LOGGING_AND_PROFILING
void UpdateStateRatio(SamplerState current_state);
#endif
Isolate* isolate_;
int sampler_threshold_;
int sampler_threshold_size_factor_;
int sampler_ticks_until_threshold_adjustment_;
// The ratio of ticks spent in JS code in percent.
Atomic32 js_ratio_;
Object* sampler_window_[kSamplerWindowSize];
int sampler_window_position_;
int sampler_window_weight_[kSamplerWindowSize];
// Support for pending 'optimize soon' requests.
PendingListNode* optimize_soon_list_;
SamplerState state_window_[kStateWindowSize];
int state_window_position_;
int state_window_ticks_;
int state_counts_[2];
// Possible state values:
// -1 => the profiler thread is waiting on the semaphore
// 0 or positive => the number of isolates running JavaScript code.
@ -159,7 +142,7 @@ class RuntimeProfiler {
// Rate limiter intended to be used in the profiler thread.
class RuntimeProfilerRateLimiter BASE_EMBEDDED {
public:
RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
RuntimeProfilerRateLimiter() {}
// Suspends the current thread (which must be the profiler thread)
// when not executing JavaScript to minimize CPU usage. Returns
@ -170,8 +153,6 @@ class RuntimeProfilerRateLimiter BASE_EMBEDDED {
bool SuspendIfNecessary();
private:
int non_js_ticks_;
DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
};

236
deps/v8/src/runtime.cc

@ -3918,15 +3918,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
if (proto->IsNull()) return *obj_value;
js_object = Handle<JSObject>::cast(proto);
}
NormalizeElements(js_object);
Handle<NumberDictionary> dictionary(js_object->element_dictionary());
Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
Handle<NumberDictionary> extended_dictionary =
NumberDictionarySet(dictionary, index, obj_value, details);
if (*extended_dictionary != *dictionary) {
js_object->set_elements(*extended_dictionary);
if (js_object->GetElementsKind() ==
JSObject::NON_STRICT_ARGUMENTS_ELEMENTS) {
FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
} else {
js_object->set_elements(*extended_dictionary);
}
}
return *obj_value;
}
@ -3981,8 +3985,7 @@ static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
Handle<Object> value,
PropertyAttributes attr) {
// Normalize the elements to enable attributes on the property.
NormalizeElements(js_object);
Handle<NumberDictionary> dictionary(js_object->element_dictionary());
Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
@ -5742,6 +5745,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
}
void FindAsciiStringIndices(Vector<const char> subject,
char pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
const char* subject_start = reinterpret_cast<const char*>(subject.start());
const char* subject_end = subject_start + subject.length();
const char* pos = subject_start;
while (limit > 0) {
pos = reinterpret_cast<const char*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
indices->Add(static_cast<int>(pos - subject_start));
pos++;
limit--;
}
}
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@ -5749,11 +5773,11 @@ void FindStringIndices(Isolate* isolate,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Collect indices of pattern in subject.
// Stop after finding at most limit values.
StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
int pattern_length = pattern.length();
int index = 0;
StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
@ -5796,11 +5820,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
FindStringIndices(isolate,
subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
Vector<const char> pattern_vector = pattern->ToAsciiVector();
if (pattern_vector.length() == 1) {
FindAsciiStringIndices(subject_vector,
pattern_vector[0],
&indices,
limit);
} else {
FindStringIndices(isolate,
subject_vector,
pattern_vector,
&indices,
limit);
}
} else {
FindStringIndices(isolate,
subject_vector,
@ -7821,7 +7853,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
}
}
isolate->compilation_cache()->MarkForLazyOptimizing(function);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
} else {
@ -9938,7 +9969,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
// If there is no JavaScript stack frame count is 0.
return Smi::FromInt(0);
}
for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
n += it.frame()->GetInlineCount();
}
return Smi::FromInt(n);
}
@ -9951,7 +9985,7 @@ static const int kFrameDetailsLocalCountIndex = 4;
static const int kFrameDetailsSourcePositionIndex = 5;
static const int kFrameDetailsConstructCallIndex = 6;
static const int kFrameDetailsAtReturnIndex = 7;
static const int kFrameDetailsDebuggerFrameIndex = 8;
static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
// Return an array with frame details
@ -9967,7 +10001,7 @@ static const int kFrameDetailsFirstDynamicIndex = 9;
// 5: Source position
// 6: Constructor call
// 7: Is at return
// 8: Debugger frame
// 8: Flags
// Arguments name, value
// Locals name, value
// Return value if any
@ -9990,16 +10024,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// If there are no JavaScript stack frames return undefined.
return heap->undefined_value();
}
int deoptimized_frame_index = -1; // Frame index in optimized frame.
DeoptimizedFrameInfo* deoptimized_frame = NULL;
int count = 0;
JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
if (count == index) break;
count++;
if (index < count + it.frame()->GetInlineCount()) break;
count += it.frame()->GetInlineCount();
}
if (it.done()) return heap->undefined_value();
bool is_optimized_frame =
it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
if (it.frame()->is_optimized()) {
deoptimized_frame_index =
it.frame()->GetInlineCount() - (index - count) - 1;
deoptimized_frame = Deoptimizer::DebuggerInspectableFrame(
it.frame(),
deoptimized_frame_index,
isolate);
}
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@ -10022,6 +10066,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
ASSERT(*scope_info != SerializedScopeInfo::Empty());
ScopeInfo<> info(*scope_info);
// Get the locals names and values into a temporary array.
@ -10033,23 +10078,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
// Fill in the values of the locals.
if (is_optimized_frame) {
// If we are inspecting an optimized frame use undefined as the
// value for all locals.
//
// TODO(1140): We should be able to get the correct values
// for locals in optimized frames.
for (int i = 0; i < info.NumberOfLocals(); i++) {
locals->set(i * 2, *info.LocalName(i));
locals->set(i * 2 + 1, isolate->heap()->undefined_value());
}
} else {
int i = 0;
for (; i < info.number_of_stack_slots(); ++i) {
// Use the value from the stack.
locals->set(i * 2, *info.LocalName(i));
int i = 0;
for (; i < info.number_of_stack_slots(); ++i) {
// Use the value from the stack.
locals->set(i * 2, *info.LocalName(i));
if (it.frame()->is_optimized()) {
// Get the value from the deoptimized frame.
locals->set(i * 2 + 1,
deoptimized_frame->GetExpression(i));
} else {
// Get the value from the stack.
locals->set(i * 2 + 1, it.frame()->GetExpression(i));
}
}
if (i < info.NumberOfLocals()) {
// Get the context containing declarations.
Handle<Context> context(
Context::cast(it.frame()->context())->declaration_context());
@ -10064,7 +10106,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Check whether this frame is positioned at return. If not top
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
if (!is_optimized_frame && index == 0) {
if (!it.frame()->is_optimized() && index == 0) {
at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
@ -10145,10 +10187,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Add the at return information.
details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
// Add information on whether this frame is invoked in the debugger context.
details->set(kFrameDetailsDebuggerFrameIndex,
heap->ToBoolean(*save->context() ==
*isolate->debug()->debug_context()));
// Add flags to indicate information on whether this frame is
// bit 0: invoked in the debugger context.
// bit 1: optimized frame.
// bit 2: inlined in optimized frame
int flags = 0;
if (*save->context() == *isolate->debug()->debug_context()) {
flags |= 1 << 0;
}
if (it.frame()->is_optimized()) {
flags |= 1 << 1;
if (deoptimized_frame_index > 0) {
flags |= 1 << 2;
}
}
details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
// Fill the dynamic part.
int details_index = kFrameDetailsFirstDynamicIndex;
@ -10167,7 +10220,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
//
// TODO(3141533): We should be able to get the actual parameter
// value for optimized frames.
if (!is_optimized_frame &&
if (!it.frame()->is_optimized() &&
(i < it.frame()->ComputeParametersCount())) {
details->set(details_index++, it.frame()->GetParameter(i));
} else {
@ -10203,6 +10256,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
details->set(kFrameDetailsReceiverIndex, *receiver);
// Get rid of the calculated deoptimized frame if any.
if (deoptimized_frame != NULL) {
Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame,
isolate);
}
ASSERT_EQ(details_size, details_index);
return *isolate->factory()->NewJSArrayWithElements(details);
}
@ -10263,7 +10322,7 @@ static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
}
// Second fill all stack locals.
for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
@ -10274,37 +10333,40 @@ static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
Handle<JSObject>());
}
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
if (!CopyContextLocalsToScopeObject(isolate,
serialized_scope_info, scope_info,
function_context, local_scope)) {
return Handle<JSObject>();
}
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
key,
GetProperty(ext, key),
NONE,
kNonStrictMode),
Handle<JSObject>());
if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
if (!CopyContextLocalsToScopeObject(isolate,
serialized_scope_info, scope_info,
function_context, local_scope)) {
return Handle<JSObject>();
}
// Finally copy any properties from the function context extension.
// These will be variables introduced by eval.
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
key,
GetProperty(ext, key),
NONE,
kNonStrictMode),
Handle<JSObject>());
}
}
}
}
return local_scope;
}
@ -12074,22 +12136,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
#ifdef ENABLE_LOGGING_AND_PROFILING
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
@ -12451,6 +12505,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
}
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \
CONVERT_CHECKED(JSObject, obj, args[0]); \
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
// ----------------------------------------------------------------------------
// Implementation of Runtime

25
deps/v8/src/runtime.h

@ -334,7 +334,23 @@ namespace internal {
F(MessageGetScript, 1, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
F(IS_VAR, 1, 1) \
\
/* expose boolean functions from objects-inl.h */ \
F(HasFastElements, 1, 1) \
F(HasFastDoubleElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasExternalPixelElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
F(HasExternalByteElements, 1, 1) \
F(HasExternalUnsignedByteElements, 1, 1) \
F(HasExternalShortElements, 1, 1) \
F(HasExternalUnsignedShortElements, 1, 1) \
F(HasExternalIntElements, 1, 1) \
F(HasExternalUnsignedIntElements, 1, 1) \
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
@ -413,8 +429,8 @@ namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
F(ProfilerResume, 2, 1) \
F(ProfilerPause, 2, 1)
F(ProfilerResume, 0, 1) \
F(ProfilerPause, 0, 1)
#else
#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
#endif
@ -470,7 +486,8 @@ namespace internal {
F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
F(FastAsciiArrayJoin, 2, 1)
F(FastAsciiArrayJoin, 2, 1) \
F(IsNativeOrStrictMode, 1, 1)
// ----------------------------------------------------------------------------

150
deps/v8/src/scopes.cc

@ -119,9 +119,9 @@ Scope::Scope(Type type)
temps_(0),
params_(0),
unresolved_(0),
decls_(0) {
decls_(0),
already_resolved_(false) {
SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
ASSERT(!resolved());
}
@ -131,14 +131,14 @@ Scope::Scope(Scope* outer_scope, Type type)
temps_(4),
params_(4),
unresolved_(16),
decls_(4) {
decls_(4),
already_resolved_(false) {
SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
ASSERT(!HasIllegalRedeclaration());
ASSERT(!resolved());
}
@ -148,15 +148,34 @@ Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
temps_(4),
params_(4),
unresolved_(16),
decls_(4) {
decls_(4),
already_resolved_(true) {
ASSERT(!scope_info.is_null());
SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
ASSERT(resolved());
if (scope_info->HasHeapAllocatedLocals()) {
num_heap_slots_ = scope_info_->NumberOfContextSlots();
}
AddInnerScope(inner_scope);
}
Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
: inner_scopes_(1),
variables_(),
temps_(0),
params_(0),
unresolved_(0),
decls_(0),
already_resolved_(true) {
SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
AddInnerScope(inner_scope);
++num_var_or_const_;
Variable* variable = variables_.Declare(this,
catch_variable_name,
Variable::VAR,
true, // Valid left-hand side.
Variable::NORMAL);
AllocateHeapSlot(variable);
}
@ -190,30 +209,43 @@ void Scope::SetDefaults(Type type,
Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
Scope* global_scope) {
// Reconstruct the outer scope chain from a closure's context chain.
ASSERT(!info->closure().is_null());
// If we have a serialized scope info, reuse it.
Context* context = info->closure()->context();
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
Scope* scope = NULL;
SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
if (scope_info != SerializedScopeInfo::Empty()) {
JSFunction* current = *info->closure();
do {
current = current->context()->closure();
Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
if (*scope_info != SerializedScopeInfo::Empty()) {
scope = new Scope(scope, scope_info);
if (innermost_scope == NULL) innermost_scope = scope;
bool contains_with = false;
while (!context->IsGlobalContext()) {
if (context->IsWithContext()) {
// All the inner scopes are inside a with.
contains_with = true;
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
} else {
if (context->IsFunctionContext()) {
SerializedScopeInfo* scope_info =
context->closure()->shared()->scope_info();
current_scope =
new Scope(current_scope, Handle<SerializedScopeInfo>(scope_info));
} else {
ASSERT(current->context()->IsGlobalContext());
ASSERT(context->IsCatchContext());
String* name = String::cast(context->extension());
current_scope = new Scope(current_scope, Handle<String>(name));
}
} while (!current->context()->IsGlobalContext());
}
if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
}
global_scope->AddInnerScope(scope);
if (innermost_scope == NULL) innermost_scope = global_scope;
// Forget about a with when we move to a context for a different function.
if (context->previous()->closure() != context->closure()) {
contains_with = false;
}
context = context->previous();
}
return innermost_scope;
global_scope->AddInnerScope(current_scope);
return (innermost_scope == NULL) ? global_scope : innermost_scope;
}
@ -238,7 +270,7 @@ bool Scope::Analyze(CompilationInfo* info) {
void Scope::Initialize(bool inside_with) {
ASSERT(!resolved());
ASSERT(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
@ -256,11 +288,16 @@ void Scope::Initialize(bool inside_with) {
// instead load them directly from the stack. Currently, the only
// such parameter is 'this' which is passed on the stack when
// invoking scripts
Variable* var =
variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
false, Variable::THIS);
var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
if (is_catch_scope()) {
ASSERT(outer_scope() != NULL);
receiver_ = outer_scope()->receiver();
} else {
Variable* var =
variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
false, Variable::THIS);
var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
}
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
@ -274,11 +311,10 @@ void Scope::Initialize(bool inside_with) {
Variable* Scope::LocalLookup(Handle<String> name) {
Variable* result = variables_.Lookup(name);
if (result != NULL || !resolved()) {
if (result != NULL || scope_info_.is_null()) {
return result;
}
// If the scope is resolved, we can find a variable in serialized scope
// info.
// If we have a serialized scope info, we might find the variable there.
//
// We should never lookup 'arguments' in this scope as it is implicitly
// present in every scope.
@ -326,7 +362,7 @@ Variable* Scope::DeclareFunctionVar(Handle<String> name) {
void Scope::DeclareParameter(Handle<String> name) {
ASSERT(!resolved());
ASSERT(!already_resolved());
ASSERT(is_function_scope());
Variable* var =
variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
@ -335,7 +371,7 @@ void Scope::DeclareParameter(Handle<String> name) {
Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
ASSERT(!resolved());
ASSERT(!already_resolved());
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
@ -358,7 +394,7 @@ VariableProxy* Scope::NewUnresolved(Handle<String> name,
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
ASSERT(!resolved());
ASSERT(!already_resolved());
VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
unresolved_.Add(proxy);
return proxy;
@ -378,7 +414,7 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!resolved());
ASSERT(!already_resolved());
Variable* var =
new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
temps_.Add(var);
@ -508,12 +544,22 @@ int Scope::ContextChainLength(Scope* scope) {
}
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (scope->is_catch_scope()) {
scope = scope->outer_scope();
}
return scope;
}
#ifdef DEBUG
static const char* Header(Scope::Type type) {
switch (type) {
case Scope::EVAL_SCOPE: return "eval";
case Scope::FUNCTION_SCOPE: return "function";
case Scope::GLOBAL_SCOPE: return "global";
case Scope::CATCH_SCOPE: return "catch";
}
UNREACHABLE();
return NULL;
@ -864,8 +910,10 @@ bool Scope::MustAllocate(Variable* var) {
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_)) {
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
scope_contains_with_ ||
is_catch_scope())) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
@ -874,16 +922,20 @@ bool Scope::MustAllocate(Variable* var) {
bool Scope::MustAllocateInContext(Variable* var) {
// If var is accessed from an inner scope, or if there is a
// possibility that it might be accessed from the current or an inner
// scope (through an eval() call), it must be allocated in the
// context. Exception: temporary variables are not allocated in the
// If var is accessed from an inner scope, or if there is a possibility
// that it might be accessed from the current or an inner scope (through
// an eval() call or a runtime with lookup), it must be allocated in the
// context.
return
var->mode() != Variable::TEMPORARY &&
(var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_ || var->is_global());
//
// Exceptions: temporary variables are never allocated in a context;
// catch-bound variables are always allocated in a context.
if (var->mode() == Variable::TEMPORARY) return false;
if (is_catch_scope()) return true;
return var->is_accessed_from_inner_scope() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
scope_contains_with_ ||
var->is_global();
}
@ -1010,7 +1062,7 @@ void Scope::AllocateVariablesRecursively() {
// If scope is already resolved, we still need to allocate
// variables in inner scopes which might not had been resolved yet.
if (resolved()) return;
if (already_resolved()) return;
// The number of slots required for variables.
num_stack_slots_ = 0;
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;

31
deps/v8/src/scopes.h

@ -90,9 +90,10 @@ class Scope: public ZoneObject {
// Construction
enum Type {
EVAL_SCOPE, // the top-level scope for an 'eval' source
FUNCTION_SCOPE, // the top-level scope for a function
GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
EVAL_SCOPE, // The top-level scope for an eval source.
FUNCTION_SCOPE, // The top-level scope for a function.
GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
CATCH_SCOPE // The scope introduced by catch.
};
Scope(Scope* outer_scope, Type type);
@ -202,6 +203,7 @@ class Scope: public ZoneObject {
bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
bool is_strict_mode() const { return strict_mode_; }
bool is_strict_mode_eval_scope() const {
return is_eval_scope() && is_strict_mode();
@ -225,13 +227,8 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Accessors.
// A new variable proxy corresponding to the (function) receiver.
VariableProxy* receiver() const {
VariableProxy* proxy =
new VariableProxy(FACTORY->this_symbol(), true, false);
proxy->BindTo(receiver_);
return proxy;
}
// The variable corresponding the 'this' value.
Variable* receiver() { return receiver_; }
// The variable holding the function literal for named function
// literals, or NULL.
@ -293,6 +290,10 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
// Find the first function, global, or eval scope. This is the scope
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(Handle<String> name) {
@ -367,6 +368,10 @@ class Scope: public ZoneObject {
bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
// True if it doesn't need scope resolution (e.g., if the scope was
// constructed based on a serialized scope info or a catch context).
bool already_resolved_;
// Computed as variables are declared.
int num_var_or_const_;
@ -376,7 +381,7 @@ class Scope: public ZoneObject {
// Serialized scopes support.
Handle<SerializedScopeInfo> scope_info_;
bool resolved() { return !scope_info_.is_null(); }
bool already_resolved() { return already_resolved_; }
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
@ -412,8 +417,12 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively();
private:
// Construct a function scope based on the scope info.
Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
Scope(Scope* inner_scope, Handle<String> catch_variable_name);
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
inner_scopes_.Add(inner_scope);

11
deps/v8/src/string.js

@ -251,7 +251,9 @@ function StringReplace(search, replace) {
// Compute the string to replace with.
if (IS_FUNCTION(replace)) {
builder.add(%_CallFunction(%GetGlobalReceiver(),
var receiver =
%_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
builder.add(%_CallFunction(receiver,
search,
start,
subject,
@ -418,7 +420,8 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
var override = new InternalArray(null, 0, subject);
var receiver = %GetGlobalReceiver();
var receiver =
%_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
while (i < len) {
var elem = res[i];
if (%_IsSmi(elem)) {
@ -475,8 +478,10 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// No captures, only the match, which is always valid.
var s = SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
var receiver =
%_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
replacement =
%_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
%_CallFunction(receiver, s, index, subject, replace);
} else {
var parameters = new InternalArray(m + 2);
for (var j = 0; j < m; j++) {

4
deps/v8/src/stub-cache.cc

@ -1694,6 +1694,8 @@ MaybeObject* KeyedLoadStubCompiler::ComputeSharedKeyedLoadElementStub(
} else if (receiver_map->has_external_array_elements()) {
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
maybe_stub = KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
} else if (receiver_map->has_dictionary_elements()) {
maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Slow);
} else {
UNREACHABLE();
}
@ -1746,6 +1748,8 @@ MaybeObject* KeyedStoreStubCompiler::ComputeSharedKeyedStoreElementStub(
} else if (receiver_map->has_external_array_elements()) {
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
maybe_stub = KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
} else if (receiver_map->has_dictionary_elements()) {
maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Slow);
} else {
UNREACHABLE();
}

1
deps/v8/src/type-info.cc

@ -122,6 +122,7 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
Builtins* builtins = Isolate::Current()->builtins();
return code->is_keyed_store_stub() &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
code->ic_state() == MEGAMORPHIC;
}
return false;

4
deps/v8/src/v8-counters.h

@ -170,14 +170,10 @@ namespace internal {
SC(named_load_inline_field, V8.NamedLoadInlineFast) \
SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
SC(named_load_full, V8.NamedLoadFull) \
SC(keyed_load_full, V8.KeyedLoadFull) \
SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
SC(keyed_store_full, V8.KeyedStoreFull) \
SC(named_store_full, V8.NamedStoreFull) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \

44
deps/v8/src/v8.cc

@ -100,42 +100,34 @@ void V8::TearDown() {
}
static uint32_t random_seed() {
if (FLAG_random_seed == 0) {
return random();
static void seed_random(uint32_t* state) {
for (int i = 0; i < 2; ++i) {
state[i] = FLAG_random_seed;
while (state[i] == 0) {
state[i] = random();
}
}
return FLAG_random_seed;
}
typedef struct {
uint32_t hi;
uint32_t lo;
} random_state;
// Random number generator using George Marsaglia's MWC algorithm.
static uint32_t random_base(uint32_t* state) {
// Initialize seed using the system random().
// No non-zero seed will ever become zero again.
if (state[0] == 0) seed_random(state);
// Mix the bits. Never replaces state[i] with 0 if it is nonzero.
state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
// Random number generator using George Marsaglia's MWC algorithm.
static uint32_t random_base(random_state *state) {
// Initialize seed using the system random(). If one of the seeds
// should ever become zero again, or if random() returns zero, we
// avoid getting stuck with zero bits in hi or lo by re-initializing
// them on demand.
if (state->hi == 0) state->hi = random_seed();
if (state->lo == 0) state->lo = random_seed();
// Mix the bits.
state->hi = 36969 * (state->hi & 0xFFFF) + (state->hi >> 16);
state->lo = 18273 * (state->lo & 0xFFFF) + (state->lo >> 16);
return (state->hi << 16) + (state->lo & 0xFFFF);
return (state[0] << 14) + (state[1] & 0x3FFFF);
}
// Used by JavaScript APIs
uint32_t V8::Random(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
// TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
return random_base(isolate->random_seed());
}
@ -144,9 +136,7 @@ uint32_t V8::Random(Isolate* isolate) {
// leaks that could be used in an exploit.
uint32_t V8::RandomPrivate(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
// TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
return random_base(isolate->private_random_seed());
}

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 4
#define BUILD_NUMBER 8
#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

6
deps/v8/src/x64/assembler-x64.h

@ -1168,7 +1168,7 @@ class Assembler : public AssemblerBase {
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Handle<Code> target,
RelocInfo::Mode rmode,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Calls directly to the given address using a relative offset.
@ -1350,7 +1350,9 @@ class Assembler : public AssemblerBase {
void Print();
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
}
// Mark address of the ExitJSFrame code.
void RecordJSReturn();

13
deps/v8/src/x64/code-stubs-x64.cc

@ -424,12 +424,10 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rax);
// Left and right arguments are now on top.
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
__ push(rax); // the operand
__ Push(Smi::FromInt(op_));
__ Push(Smi::FromInt(mode_));
__ Push(Smi::FromInt(operand_type_));
__ push(rcx); // Push return address.
@ -437,10 +435,7 @@ void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
masm->isolate()),
4,
1);
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}

16
deps/v8/src/x64/code-stubs-x64.h

@ -61,18 +61,11 @@ class TranscendentalCacheStub: public CodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
}
UnaryOpStub(
int key,
UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operand_type_(operand_type),
name_(NULL) {
}
@ -90,8 +83,7 @@ class UnaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),

32
deps/v8/src/x64/deoptimizer-x64.cc

@ -316,7 +316,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
@ -340,6 +340,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -448,12 +451,15 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@ -584,7 +590,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
if (is_topmost && bailout_type_ != DEBUGGER) {
Code* continuation = (bailout_type_ == EAGER)
? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
: isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
@ -596,6 +602,26 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers rbp and rsp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {

199
deps/v8/src/x64/full-codegen-x64.cc

@ -78,16 +78,18 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
info_emitted_ = true;
#endif
} else {
__ nop(); // Signals no inlined code.
}
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
@ -121,6 +123,7 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -140,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
@ -152,7 +155,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(rdi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = scope()->num_stack_slots();
int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@ -166,7 +169,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@ -183,7 +186,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@ -215,11 +218,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ lea(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(rdx);
__ Push(Smi::FromInt(scope()->num_parameters()));
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@ -332,7 +336,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ movq(rsp, rbp);
__ pop(rbp);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -749,7 +753,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
}
}
}
@ -822,7 +826,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site, clause->CompareId());
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ testq(rax, rax);
__ j(not_equal, &next_test);
@ -1128,7 +1133,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
EmitCallIC(ic, mode, AstNode::kNoNumber);
__ call(ic, mode);
}
@ -1208,7 +1213,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ Move(rax, key_literal->handle());
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@ -1230,7 +1235,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@ -1378,7 +1383,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
__ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1607,14 +1612,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1636,7 +1641,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site, expr->id());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
__ bind(&smi_case);
@ -1683,8 +1689,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ pop(rdx);
BinaryOpStub stub(op, mode);
// NULL signals no inlined smi code.
EmitCallIC(stub.GetCode(), NULL, expr->id());
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@ -1724,7 +1731,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
break;
}
case KEYED_PROPERTY: {
@ -1737,7 +1744,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
break;
}
}
@ -1761,7 +1768,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -1854,7 +1861,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -1894,7 +1901,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -1946,7 +1953,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@ -1980,7 +1987,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@ -2020,7 +2027,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ Push(Smi::FromInt(strict_mode_flag()));
@ -2157,7 +2164,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
// for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@ -2175,7 +2182,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(rax);
// Push Global receiver.
@ -2562,7 +2569,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
@ -2574,7 +2581,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@ -3507,6 +3514,39 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
}
void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Load the function into rax.
VisitForAccumulatorValue(args->at(0));
// Prepare for the test.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// Test for strict mode function.
__ movq(rdx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, if_true);
// Test for native function.
__ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, if_true);
// Not native or strict-mode function.
__ jmp(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@ -3537,7 +3577,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
EmitCallIC(ic, mode, expr->id());
__ call(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@ -3674,7 +3714,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(rax);
}
@ -3795,7 +3835,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in rax.
@ -3828,7 +3869,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3845,7 +3886,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3872,7 +3913,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
__ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
@ -4067,7 +4108,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site, expr->id());
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@ -4126,59 +4168,6 @@ Register FullCodeGenerator::context_register() {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
RelocInfo::Mode mode,
unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
__ call(ic, mode, ast_id);
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
JumpPatchSite* patch_site,
unsigned ast_id) {
Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
__ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET, ast_id);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
@ -4191,19 +4180,20 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
if (scope()->is_global_scope()) {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ Push(Smi::FromInt(0));
} else if (scope()->is_eval_scope()) {
} else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(scope()->is_function_scope());
ASSERT(declaration_scope->is_function_scope());
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@ -4217,11 +4207,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
__ movq(rdx, Operand(rsp, 0));
__ pop(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rsp, 0), rdx);
__ push(rdx);
// Store result register while executing finally block.
__ push(result_register());
}
@ -4230,16 +4220,13 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ movq(rdx, Operand(rsp, 0));
__ pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
__ movq(Operand(rsp, 0), rdx);
// And return.
__ ret(0);
__ jmp(rdx);
}

2
deps/v8/src/x64/ic-x64.cc

@ -1266,6 +1266,8 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ j(greater_equal, slow_case);

260
deps/v8/src/x64/lithium-codegen-x64.cc

@ -1363,7 +1363,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Representation r = instr->hydrogen()->representation();
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ testl(reg, reg);
@ -1376,7 +1376,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
HType type = instr->hydrogen()->type();
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
@ -1483,32 +1483,6 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
}
void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
Label unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
Label done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
__ j(cc, &done, Label::kNear);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@ -1529,22 +1503,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
Label different, done;
__ cmpq(left, right);
__ j(not_equal, &different, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&different);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
@ -1556,19 +1514,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label done;
__ cmpq(left, Immediate(instr->hydrogen()->right()));
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ j(equal, &done, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1579,50 +1524,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
// Consider adding other type and representation tests too.
if (instr->hydrogen()->value()->type().IsSmi()) {
__ LoadRoot(result, Heap::kFalseValueRootIndex);
return;
}
__ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
ASSERT(Heap::kTrueValueRootIndex >= 0);
__ movl(result, Immediate(Heap::kTrueValueRootIndex));
Label load;
__ j(equal, &load, Label::kNear);
__ Set(result, Heap::kFalseValueRootIndex);
__ bind(&load);
__ LoadRootIndexed(result, result, 0);
} else {
Label false_value, true_value, done;
__ j(equal, &true_value, Label::kNear);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, &true_value, Label::kNear);
__ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value, Label::kNear);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@ -1685,25 +1586,6 @@ Condition LCodeGen::EmitIsObject(Register input,
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@ -1718,22 +1600,6 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
LOperand* input_operand = instr->InputAt(0);
Register result = ToRegister(instr->result());
if (input_operand->IsRegister()) {
Register input = ToRegister(input_operand);
__ CheckSmiToIndicator(result, input);
} else {
Operand input = ToOperand(instr->InputAt(0));
__ CheckSmiToIndicator(result, input);
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
__ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1750,25 +1616,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Label false_label, done;
__ JumpIfSmi(input, &false_label);
__ movq(result, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(zero, &false_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -1784,7 +1631,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
static InstanceType TestType(HHasInstanceType* instr) {
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@ -1793,7 +1640,7 @@ static InstanceType TestType(HHasInstanceType* instr) {
}
static Condition BranchCondition(HHasInstanceType* instr) {
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@ -1804,25 +1651,6 @@ static Condition BranchCondition(HHasInstanceType* instr) {
}
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ testl(input, Immediate(kSmiTagMask));
Label done, is_false;
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())),
&is_false, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@ -1852,21 +1680,6 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
Label done;
__ j(zero, &done, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@ -1935,29 +1748,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
Label done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
__ j(not_equal, &is_false);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@ -4025,29 +3815,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
input,
instr->type_literal());
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@ -4139,25 +3906,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label done;
EmitIsConstructCall(result);
__ j(equal, &true_label, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());

199
deps/v8/src/x64/lithium-x64.cc

@ -267,12 +267,6 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
void LTypeofIs::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@ -342,13 +336,6 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@ -985,18 +972,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
HTest* test = HTest::cast(current);
instr->set_hydrogen_value(test->value());
HBasicBlock* first = test->FirstSuccessor();
HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@ -1041,81 +1017,17 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
ASSERT(!v->HasSideEffects());
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
HValue* left = compare->left();
HValue* right = compare->right();
Representation r = compare->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
} else {
ASSERT(r.IsDouble());
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right));
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsIsUndetectable()) {
HIsUndetectable* compare = HIsUndetectable::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsCompareObjectEq()) {
HCompareObjectEq* compare = HCompareObjectEq::cast(v);
return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else if (v->IsConstant()) {
if (v->EmitAtUses()) {
ASSERT(v->IsConstant());
ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
} else {
Abort("Undefined compare before branch");
return NULL;
}
return new LBranch(UseRegisterAtStart(v));
}
@ -1468,85 +1380,83 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
return new LCmpIDAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
return new LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEq(
HCompareConstantEq* instr) {
LOperand* left = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LCmpConstantEq(left));
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value));
LOperand* temp = instr->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LIsObject(value));
return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
return DefineAsRegister(new LIsSmi(value));
return new LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsUndetectable(value));
return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LHasInstanceType(value));
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@ -1559,17 +1469,17 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LHasCachedArrayIndex(value));
return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
Abort("Unimplemented: %s", "DoClassOfTest");
return NULL;
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister());
}
@ -2152,13 +2062,14 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall);
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
return new LIsConstructCallAndBranch(TempRegister());
}

190
deps/v8/src/x64/lithium-x64.h

@ -77,13 +77,9 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@ -103,9 +99,7 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
@ -113,15 +107,10 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@ -173,7 +162,6 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@ -233,7 +221,6 @@ class LInstruction: public ZoneObject {
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@ -457,16 +444,15 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
void SetBranchTargets(int true_block_id, int false_block_id) {
true_block_id_ = true_block_id;
false_block_id_ = false_block_id;
}
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
int true_block_id_;
int false_block_id_;
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
};
@ -565,23 +551,6 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@ -590,7 +559,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@ -615,17 +584,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@ -638,17 +596,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCmpConstantEq(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
@ -657,20 +604,7 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
bool is_strict() const { return hydrogen()->is_strict(); }
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@ -682,7 +616,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNull)
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@ -690,16 +624,6 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
@ -707,22 +631,12 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@ -730,22 +644,12 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsUndetectable(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@ -755,22 +659,12 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
};
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@ -779,7 +673,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -796,17 +690,6 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@ -815,19 +698,7 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
public:
LClassOfTest(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -842,7 +713,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@ -856,7 +727,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@ -1002,7 +873,7 @@ class LBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@ -1972,21 +1843,6 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream);
};
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@ -1994,7 +1850,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@ -2002,13 +1858,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@ -2017,6 +1866,7 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
};

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save