Browse Source

Upgrade V8 to 2.2.4.2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
e72b7b8002
  1. 9
      deps/v8/ChangeLog
  2. 30
      deps/v8/SConstruct
  3. 2
      deps/v8/include/v8.h
  4. 16
      deps/v8/src/api.cc
  5. 490
      deps/v8/src/arm/codegen-arm.cc
  6. 11
      deps/v8/src/arm/codegen-arm.h
  7. 3
      deps/v8/src/arm/full-codegen-arm.cc
  8. 92
      deps/v8/src/arm/macro-assembler-arm.cc
  9. 4
      deps/v8/src/arm/macro-assembler-arm.h
  10. 11
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  11. 6
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  12. 25
      deps/v8/src/arm/simulator-arm.cc
  13. 88
      deps/v8/src/arm/stub-cache-arm.cc
  14. 3
      deps/v8/src/arm/virtual-frame-arm.cc
  15. 11
      deps/v8/src/arm/virtual-frame-arm.h
  16. 8
      deps/v8/src/array.js
  17. 8
      deps/v8/src/assembler.cc
  18. 2
      deps/v8/src/assembler.h
  19. 41
      deps/v8/src/bootstrapper.cc
  20. 4
      deps/v8/src/codegen.h
  21. 2
      deps/v8/src/compilation-cache.cc
  22. 17
      deps/v8/src/compilation-cache.h
  23. 2
      deps/v8/src/compiler.cc
  24. 2
      deps/v8/src/contexts.h
  25. 5
      deps/v8/src/cpu-profiler-inl.h
  26. 26
      deps/v8/src/cpu-profiler.cc
  27. 8
      deps/v8/src/cpu-profiler.h
  28. 19
      deps/v8/src/d8-debug.cc
  29. 2
      deps/v8/src/d8.cc
  30. 1
      deps/v8/src/d8.h
  31. 11
      deps/v8/src/d8.js
  32. 6
      deps/v8/src/debug-debugger.js
  33. 3
      deps/v8/src/execution.cc
  34. 7
      deps/v8/src/factory.cc
  35. 4
      deps/v8/src/factory.h
  36. 4
      deps/v8/src/flag-definitions.h
  37. 9
      deps/v8/src/globals.h
  38. 32
      deps/v8/src/handles.cc
  39. 5
      deps/v8/src/handles.h
  40. 4
      deps/v8/src/heap-inl.h
  41. 64
      deps/v8/src/heap.cc
  42. 13
      deps/v8/src/heap.h
  43. 523
      deps/v8/src/ia32/codegen-ia32.cc
  44. 103
      deps/v8/src/ia32/codegen-ia32.h
  45. 3
      deps/v8/src/ia32/full-codegen-ia32.cc
  46. 75
      deps/v8/src/ia32/ic-ia32.cc
  47. 62
      deps/v8/src/ia32/macro-assembler-ia32.cc
  48. 3
      deps/v8/src/ia32/macro-assembler-ia32.h
  49. 4
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  50. 6
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  51. 82
      deps/v8/src/ia32/stub-cache-ia32.cc
  52. 7
      deps/v8/src/ic.cc
  53. 34
      deps/v8/src/jsregexp.cc
  54. 6
      deps/v8/src/jsregexp.h
  55. 748
      deps/v8/src/liveedit-debugger.js
  56. 352
      deps/v8/src/liveedit.cc
  57. 38
      deps/v8/src/liveedit.h
  58. 4
      deps/v8/src/log-inl.h
  59. 20
      deps/v8/src/log.h
  60. 3
      deps/v8/src/macros.py
  61. 4
      deps/v8/src/mips/simulator-mips.cc
  62. 1
      deps/v8/src/objects-debug.cc
  63. 26
      deps/v8/src/objects-inl.h
  64. 8
      deps/v8/src/objects.cc
  65. 44
      deps/v8/src/objects.h
  66. 4
      deps/v8/src/parser.cc
  67. 7
      deps/v8/src/platform-linux.cc
  68. 5
      deps/v8/src/platform-macos.cc
  69. 5
      deps/v8/src/platform-win32.cc
  70. 9
      deps/v8/src/profile-generator-inl.h
  71. 86
      deps/v8/src/profile-generator.cc
  72. 69
      deps/v8/src/profile-generator.h
  73. 4
      deps/v8/src/regexp-macro-assembler-irregexp-inl.h
  74. 4
      deps/v8/src/regexp-macro-assembler-irregexp.cc
  75. 4
      deps/v8/src/regexp-macro-assembler-irregexp.h
  76. 5
      deps/v8/src/regexp-macro-assembler.cc
  77. 4
      deps/v8/src/regexp-macro-assembler.h
  78. 136
      deps/v8/src/runtime.cc
  79. 3
      deps/v8/src/runtime.h
  80. 4
      deps/v8/src/serialize.cc
  81. 3
      deps/v8/src/serialize.h
  82. 14
      deps/v8/src/string.js
  83. 32
      deps/v8/src/stub-cache.cc
  84. 13
      deps/v8/src/stub-cache.h
  85. 6
      deps/v8/src/version.cc
  86. 10
      deps/v8/src/x64/assembler-x64.cc
  87. 2
      deps/v8/src/x64/builtins-x64.cc
  88. 121
      deps/v8/src/x64/codegen-x64.cc
  89. 3
      deps/v8/src/x64/codegen-x64.h
  90. 3
      deps/v8/src/x64/full-codegen-x64.cc
  91. 62
      deps/v8/src/x64/macro-assembler-x64.cc
  92. 3
      deps/v8/src/x64/macro-assembler-x64.h
  93. 4
      deps/v8/src/x64/regexp-macro-assembler-x64.cc
  94. 4
      deps/v8/src/x64/regexp-macro-assembler-x64.h
  95. 85
      deps/v8/src/x64/stub-cache-x64.cc
  96. 1
      deps/v8/test/cctest/SConscript
  97. 6
      deps/v8/test/cctest/test-cpu-profiler.cc
  98. 4
      deps/v8/test/cctest/test-debug.cc
  99. 174
      deps/v8/test/cctest/test-liveedit.cc
  100. 59
      deps/v8/test/cctest/test-profile-generator.cc

9
deps/v8/ChangeLog

@ -1,3 +1,12 @@
2010-04-21: Version 2.3.4
Fixed warnings on arm on newer GCC versions.
Fixed a number of minor bugs.
Performance improvements on all platforms.
2010-04-14: Version 2.2.3
Added stack command and mem command to ARM simulator debugger.

30
deps/v8/SConstruct

@ -42,6 +42,18 @@ ANDROID_TOP = os.environ.get('TOP')
if ANDROID_TOP is None:
ANDROID_TOP=""
# ARM_TARGET_LIB is the path to the dynamic library to use on the target
# machine if cross-compiling to an arm machine. You will also need to set
# the additional cross-compiling environment variables to the cross compiler.
ARM_TARGET_LIB = os.environ.get('ARM_TARGET_LIB')
if ARM_TARGET_LIB:
ARM_LINK_FLAGS = ['-Wl,-rpath=' + ARM_TARGET_LIB + '/lib:' +
ARM_TARGET_LIB + '/usr/lib',
'-Wl,--dynamic-linker=' + ARM_TARGET_LIB +
'/lib/ld-linux.so.3']
else:
ARM_LINK_FLAGS = []
# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
@ -99,8 +111,8 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
'regexp:interpreted': {
'CPPDEFINES': ['V8_INTERPRETED_REGEXP']
},
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS']
@ -114,9 +126,6 @@ LIBRARY_FLAGS = {
'profilingsupport:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_LOGGING_AND_PROFILING'],
},
'cppprofilesprocessor:on': {
'CPPDEFINES': ['ENABLE_CPP_PROFILES_PROCESSOR'],
},
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
}
@ -419,6 +428,9 @@ CCTEST_EXTRA_FLAGS = {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
},
},
'msvc': {
'all': {
@ -483,6 +495,9 @@ SAMPLE_FLAGS = {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
@ -695,11 +710,6 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'enable profiling of JavaScript code'
},
'cppprofilesprocessor': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable C++ profiles processor'
},
'debuggersupport': {
'values': ['on', 'off'],
'default': 'on',

2
deps/v8/include/v8.h

@ -856,6 +856,8 @@ class V8EXPORT String : public Primitive {
* copying begins.
* \param length The number of bytes to copy from the string.
* \param nchars_ref The number of characters written, can be NULL.
* \param hints Various hints that might affect performance of this or
* subsequent operations.
* \return The number of bytes copied to the buffer
* excluding the NULL terminator.
*/

16
deps/v8/src/api.cc

@ -4027,7 +4027,7 @@ Local<Context> Debug::GetDebugContext() {
#endif // ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
Handle<String> CpuProfileNode::GetFunctionName() const {
IsDeadCheck("v8::CpuProfileNode::GetFunctionName");
@ -4058,6 +4058,18 @@ int CpuProfileNode::GetLineNumber() const {
}
double CpuProfileNode::GetTotalTime() const {
IsDeadCheck("v8::CpuProfileNode::GetTotalTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
}
double CpuProfileNode::GetSelfTime() const {
IsDeadCheck("v8::CpuProfileNode::GetSelfTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
}
double CpuProfileNode::GetTotalSamplesCount() const {
IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
@ -4148,7 +4160,7 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
}
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
namespace internal {

490
deps/v8/src/arm/codegen-arm.cc

@ -32,7 +32,10 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "jsregexp.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
@ -130,6 +133,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
allocator_(NULL),
cc_reg_(al),
state_(NULL),
loop_nesting_(0),
function_return_is_shadowed_(false) {
}
@ -153,6 +157,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
ASSERT(frame_ == NULL);
frame_ = new VirtualFrame();
cc_reg_ = al;
// Adjust for function-level loop nesting.
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ = info->loop_nesting();
{
CodeGenState state(this);
@ -377,6 +386,10 @@ void CodeGenerator::Generate(CompilationInfo* info) {
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
// Adjust for function-level loop nesting.
ASSERT(loop_nesting_ == info->loop_nesting());
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(!has_cc());
ASSERT(state_ == NULL);
@ -1882,6 +1895,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
JumpTarget body(JumpTarget::BIDIRECTIONAL);
IncrementLoopNesting();
// Label the top of the loop for the backward CFG edge. If the test
// is always true we can use the continue target, and if the test is
@ -1942,6 +1956,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
DecrementLoopNesting();
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@ -1960,6 +1975,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (info == ALWAYS_FALSE) return;
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
IncrementLoopNesting();
// Label the top of the loop with the continue target for the backward
// CFG edge.
@ -1991,6 +2007,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
DecrementLoopNesting();
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@ -2012,6 +2029,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
if (info == ALWAYS_FALSE) return;
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
IncrementLoopNesting();
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
@ -2066,6 +2084,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
DecrementLoopNesting();
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@ -4015,8 +4034,8 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
Load(args->at(3));
frame_->CallRuntime(Runtime::kRegExpExec, 4);
RegExpExecStub stub;
frame_->CallStub(&stub, 4);
frame_->EmitPush(r0);
}
@ -4115,6 +4134,72 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
: dst_(dst), cache_(cache), key_(key) {
set_comment("[ DeferredSearchCache");
}
virtual void Generate();
private:
Register dst_, cache_, key_;
};
void DeferredSearchCache::Generate() {
__ push(cache_);
__ push(key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
if (!dst_.is(r0)) {
__ mov(dst_, r0);
}
}
void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
Top::global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
frame_->EmitPop(r2);
DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(r1, Operand(cache_obj));
__ ldr(r0, FieldMemOperand(r1, kFingerOffset));
// r0 now holds finger offset as a smi.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// r3 now points to the start of fixed array elements.
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
// Note side effect of PreIndex: r3 now points to the key of the pair.
__ cmp(r2, r0);
deferred->Branch(ne);
__ ldr(r0, MemOperand(r3, kPointerSize));
deferred->BindExit();
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
@ -5586,8 +5671,10 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
} else {
// Call a native function to do a comparison between two non-NaNs.
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::compare_doubles()));
__ Jump(r5); // Tail call.
__ push(lr);
__ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
__ CallCFunction(ExternalReference::compare_doubles(), 4);
__ pop(pc); // Return.
}
}
@ -5909,7 +5996,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || lhs.is(r1) && rhs.is(r0));
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
if (ShouldGenerateSmiCode()) {
// Smi-smi case (overflow).
@ -7014,7 +7101,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
bool always_allocate,
int frame_alignment_skew) {
// r0: result parameter for PerformGC, if any
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@ -7022,8 +7110,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Passing r0.
ExternalReference gc_reference = ExternalReference::perform_gc_function();
__ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
__ PrepareCallCFunction(1, r1);
__ CallCFunction(ExternalReference::perform_gc_function(), 1);
}
ExternalReference scope_depth =
@ -7040,6 +7128,37 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
#if defined(V8_HOST_ARCH_ARM)
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
ASSERT(IsPowerOf2(frame_alignment));
__ sub(r2, sp, Operand(frame_alignment_skew));
__ tst(r2, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
__ stop("Unexpected alignment");
__ bind(&alignment_as_expected);
}
}
#endif
// Just before the call (jump) below lr is pushed, so the actual alignment is
// adding one to the current skew.
int alignment_before_call =
(frame_alignment_skew + kPointerSize) & frame_alignment_mask;
if (alignment_before_call > 0) {
// Push until the alignment before the call is met.
__ mov(r2, Operand(0));
for (int i = alignment_before_call;
(i & frame_alignment_mask) != 0;
i += kPointerSize) {
__ push(r2);
}
}
// TODO(1242173): To let the GC traverse the return address of the exit
// frames, we need to know where the return address is. Right now,
// we push it on the stack to be able to find it again, but we never
@ -7047,10 +7166,15 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
masm->push(lr);
masm->Jump(r5);
// Restore sp back to before aligning the stack.
if (alignment_before_call > 0) {
__ add(sp, sp, Operand(alignment_before_call));
}
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
// though (contain the result).
@ -7137,7 +7261,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
false,
false);
false,
-kPointerSize);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
@ -7145,7 +7270,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
false);
false,
0);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
@ -7155,7 +7281,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
true);
true,
kPointerSize);
__ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
@ -7502,6 +7629,345 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
}
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
// sp[4]: previous index
// sp[8]: subject string
// sp[12]: JSRegExp object
static const int kLastMatchInfoOffset = 0 * kPointerSize;
static const int kPreviousIndexOffset = 1 * kPointerSize;
static const int kSubjectOffset = 2 * kPointerSize;
static const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime, invoke_regexp;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
// directly from generated code the native RegExp code will not do a GC and
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
Register last_match_info_elements = r6;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address();
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size();
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
__ tst(r0, Operand(r0));
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ tst(regexp_data, Operand(kSmiTagMask));
__ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
}
// regexp_data: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
__ b(ne, &runtime);
// regexp_data: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. This
// uses the asumption that smis are 2 * their untagged value.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
__ b(hi, &runtime);
// r2: Number of capture registers
// regexp_data: RegExp data (FixedArray)
// Check that the second argument is a string.
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ tst(subject, Operand(kSmiTagMask));
__ b(eq, &runtime);
Condition is_string = masm->IsObjectStringType(subject, r0);
__ b(NegateCondition(is_string), &runtime);
// Get the length of the string to r3.
__ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
// r2: Number of capture registers
// r3: Length of subject string
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
__ cmp(r3, Operand(r0, ASR, kSmiTagSize + kSmiShiftSize));
__ b(ls, &runtime);
// r2: Number of capture registers
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check that the fourth object is a JSArray object.
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &runtime);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
__ b(ne, &runtime);
// Check that the JSArray is in fast case.
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ cmp(r0, Operand(Factory::fixed_array_map()));
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
__ cmp(r2, r0);
__ b(gt, &runtime);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
Label seq_string;
const int kStringRepresentationEncodingMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
__ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
// First check for sequential string.
ASSERT_EQ(0, kStringTag);
ASSERT_EQ(0, kSeqStringTag);
__ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
__ b(eq, &seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ and_(r0, r0, Operand(kStringRepresentationMask));
__ cmp(r0, Operand(kConsStringTag));
__ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ LoadRoot(r1, Heap::kEmptyStringRootIndex);
__ cmp(r0, r1);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
ASSERT_EQ(0, kSeqStringTag);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
__ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
__ bind(&seq_string);
// r1: suject string type & kStringRepresentationEncodingMask
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
#ifdef DEBUG
const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
CHECK_EQ(4, kSeqAsciiString);
CHECK_EQ(0, kSeqTwoByteString);
#endif
// Find the code object based on the assumptions above.
__ mov(r3, Operand(r1, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// the hole.
__ CompareObjectType(r7, r0, r0, CODE_TYPE);
__ b(ne, &runtime);
// r3: encoding of subject string (1 if ascii, 0 if two_byte);
// r7: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
// r1: previous index
// r3: encoding of subject string (1 if ascii, 0 if two_byte);
// r7: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
static const int kRegExpExecuteArguments = 7;
__ push(lr);
__ PrepareCallCFunction(kRegExpExecuteArguments, r0);
// Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// Argument 5 (sp[0]): static offsets vector buffer.
__ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
__ str(r0, MemOperand(sp, 0 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
__ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
__ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Argument 4 (r3): End of string data
// Argument 3 (r2): Start of string data
__ add(r2, r9, Operand(r1, LSL, r3));
__ add(r3, r9, Operand(r0, LSL, r3));
// Argument 2 (r1): Previous index.
// Already there
// Argument 1 (r0): Subject string.
__ mov(r0, subject);
// Locate the code entry and call it.
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(r7, kRegExpExecuteArguments);
__ pop(lr);
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
// Check the result.
Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
__ b(eq, &failure);
__ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ b(ne, &runtime);
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r0, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
__ ldr(r1, MemOperand(r1, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(Factory::null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// Process the result from the native regexp code.
__ bind(&success);
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ add(r1, r1, Operand(2)); // r1 was a smi.
// r1: number of capture registers
// r4: subject string
// Store the capture count.
__ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
__ RecordWrite(r3, r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
__ mov(r2, Operand(RegExpImpl::kLastInputOffset));
__ RecordWrite(r3, r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector();
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
// r2: offsets vector
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ add(r0,
last_match_info_elements,
Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ sub(r1, r1, Operand(1), SetCC);
__ b(mi, &done);
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
__ mov(r3, Operand(r3, LSL, kSmiTagSize));
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;

11
deps/v8/src/arm/codegen-arm.h

@ -215,8 +215,10 @@ class CodeGenerator: public AstVisitor {
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// We don't track loop nesting level on ARM yet.
int loop_nesting() const { return 0; }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
@ -284,6 +286,7 @@ class CodeGenerator: public AstVisitor {
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
@ -409,6 +412,9 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
@ -455,6 +461,7 @@ class CodeGenerator: public AstVisitor {
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
int loop_nesting_;
// Jump targets
BreakTarget function_return_;

3
deps/v8/src/arm/full-codegen-arm.cc

@ -666,7 +666,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
// Build the shared function info and instantiate the function based
// on it.
Handle<SharedFunctionInfo> function_info =
Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return;

92
deps/v8/src/arm/macro-assembler-arm.cc

@ -355,10 +355,19 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// ip = sp + kPointerSize * #args;
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
// alignment in AlignStack.
AlignStack(1);
// Prepare the stack to be aligned when calling into C. After this point there
// are 5 pushes before the call into C, so the stack needs to be aligned after
// 5 pushes.
int frame_alignment = ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment != kPointerSize) {
// The following code needs to be more general if this assert does not hold.
ASSERT(frame_alignment == 2 * kPointerSize);
// With 5 pushes left the frame must be unaligned at this point.
mov(r7, Operand(Smi::FromInt(0)));
tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
push(r7, eq); // Push if aligned to make it unaligned.
}
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@ -389,27 +398,20 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
}
void MacroAssembler::AlignStack(int offset) {
int MacroAssembler::ActivationFrameAlignment() {
#if defined(V8_HOST_ARCH_ARM)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
int activation_frame_alignment = OS::ActivationFrameAlignment();
return OS::ActivationFrameAlignment();
#else // defined(V8_HOST_ARCH_ARM)
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so we will always align at
// this point here.
int activation_frame_alignment = 2 * kPointerSize;
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
#endif // defined(V8_HOST_ARCH_ARM)
if (activation_frame_alignment != kPointerSize) {
// This code needs to be made more general if this assert doesn't hold.
ASSERT(activation_frame_alignment == 2 * kPointerSize);
mov(r7, Operand(Smi::FromInt(0)));
tst(sp, Operand(activation_frame_alignment - offset));
push(r7, eq); // Conditional push instruction.
}
}
@ -1309,15 +1311,29 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(r1));
// Load the builtins object into target register.
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
ldr(r1, FieldMemOperand(r1, builtins_offset));
// Load the code entry point from the function into the target register.
ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
ldr(r1, FieldMemOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
// Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(target,
JSBuiltinsObject::OffsetOfCodeWithId(id)));
if (FLAG_debug_code) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(r1);
ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
cmp(r1, target);
Assert(eq, "Builtin code object changed");
pop(r1);
}
add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
}
@ -1558,16 +1574,16 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frameAlignment = OS::ActivationFrameAlignment();
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers r0..r3.
int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
if (frameAlignment > kPointerSize) {
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frameAlignment));
and_(sp, sp, Operand(-frameAlignment));
ASSERT(IsPowerOf2(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
@ -1583,6 +1599,26 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if defined(V8_HOST_ARCH_ARM)
if (FLAG_debug_code) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask));
b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
stop("Unexpected alignment");
bind(&alignment_as_expected);
}
}
#endif
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.

4
deps/v8/src/arm/macro-assembler-arm.h

@ -116,8 +116,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void LoadContext(Register dst, int context_chain_length);

11
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -39,7 +39,7 @@
namespace v8 {
namespace internal {
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r5 : Pointer to current code object (Code*) including heap object tag.
@ -611,7 +611,6 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@ -1001,6 +1000,12 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
// If this is a direct call from JavaScript retry the RegExp forcing the call
// through the runtime system. Currently the direct call cannot handle a GC.
if (frame_entry<int>(re_frame, kDirectCall) == 1) {
return RETRY;
}
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);
@ -1230,6 +1235,6 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
#undef __
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

6
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -32,14 +32,14 @@ namespace v8 {
namespace internal {
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerARM();
virtual ~RegExpMacroAssemblerARM();
};
#else
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
@ -258,7 +258,7 @@ class RegExpCEntryStub: public CodeStub {
const char* GetName() { return "RegExpCEntryStub"; }
};
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

25
deps/v8/src/arm/simulator-arm.cc

@ -1249,6 +1249,11 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
int swi = instr->SwiField();
switch (swi) {
case call_rt_redirected: {
// Check if stack is aligned. Error if not aligned is reported below to
// include information on the function called.
bool stack_aligned =
(get_register(sp)
& (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
Redirection* redirection = Redirection::FromSwiInstruction(instr);
int32_t arg0 = get_register(r0);
int32_t arg1 = get_register(r1);
@ -1262,12 +1267,17 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
reinterpret_cast<intptr_t>(redirection->external_function());
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
double x, y;
GetFpArgs(&x, &y);
PrintF("Call to host function at %p with args %f, %f\n",
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), x, y);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
} else {
@ -1275,15 +1285,20 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
reinterpret_cast<int32_t>(redirection->external_function());
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p with args %08x, %08x, %08x, %08x\n",
"Call to host function at %p with args %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
int64_t result = target(arg0, arg1, arg2, arg3);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
@ -2524,4 +2539,4 @@ uintptr_t Simulator::PopAddress() {
} } // namespace assembler::arm
#endif // !defined(__arm__)
#endif // __arm__

88
deps/v8/src/arm/stub-cache-arm.cc

@ -600,6 +600,28 @@ static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
}
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
GlobalObject* global,
String* name,
Register scratch,
Label* miss) {
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) return probe;
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(Handle<Object>(cell)));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
return cell;
}
#undef __
#define __ ACCESS_MASM(masm())
@ -620,23 +642,19 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
while (object != holder) {
if (object->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(object);
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) {
set_failure(Failure::cast(probe));
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(object),
name,
scratch,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
return result;
}
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(Handle<Object>(cell)));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
}
object = JSObject::cast(object->GetPrototype());
}
@ -1389,6 +1407,50 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
// Load receiver.
__ ldr(r0, MemOperand(sp, 0));
// Check that receiver is not a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check the maps of the full prototype chain.
CheckPrototypes(object, r0, last, r3, r1, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(last),
name,
r1,
&miss);
if (cell->IsFailure()) return cell;
}
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NONEXISTENT, Heap::empty_string());
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,

3
deps/v8/src/arm/virtual-frame-arm.cc

@ -507,6 +507,9 @@ void VirtualFrame::SpillAll() {
// Fall through.
case NO_TOS_REGISTERS:
break;
default:
UNREACHABLE();
break;
}
ASSERT(register_allocation_map_ == 0); // Not yet implemented.
}

11
deps/v8/src/arm/virtual-frame-arm.h

@ -376,8 +376,15 @@ class VirtualFrame : public ZoneObject {
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
// 5 states for the top of stack, which can be in memory or in r0 and r1.
enum TopOfStack { NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS,
TOS_STATES};
enum TopOfStack {
NO_TOS_REGISTERS,
R0_TOS,
R1_TOS,
R1_R0_TOS,
R0_R1_TOS,
TOS_STATES
};
static const int kMaxTOSRegisters = 2;
static const bool kR0InUse[TOS_STATES];

8
deps/v8/src/array.js

@ -712,10 +712,9 @@ function ArraySort(comparefn) {
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
var key = %_IsSmi(element) ? element : ToString(element);
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = Compare(tmp, key);
var order = Compare(tmp, element);
if (order > 0) {
a[j + 1] = tmp;
} else {
@ -734,9 +733,6 @@ function ArraySort(comparefn) {
}
var pivot_index = $floor($random() * (to - from)) + from;
var pivot = a[pivot_index];
// Pre-convert the element to a string for comparison if we know
// it will happen on each compare anyway.
var pivot_key = %_IsSmi(pivot) ? pivot : ToString(pivot);
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
a[pivot_index] = a[from];
@ -747,7 +743,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet.
for (var i = from + 1; i < high_start; ) {
var element = a[i];
var order = Compare(element, pivot_key);
var order = Compare(element, pivot);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;

8
deps/v8/src/assembler.cc

@ -46,7 +46,7 @@
#include "regexp-macro-assembler.h"
#include "platform.h"
// Include native regexp-macro-assembler.
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
@ -56,7 +56,7 @@
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
namespace v8 {
namespace internal {
@ -680,7 +680,7 @@ ExternalReference ExternalReference::compile_array_push_call() {
}
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
Address function;
@ -723,7 +723,7 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
return ExternalReference(RegExpStack::memory_size_address());
}
#endif
#endif // V8_INTERPRETED_REGEXP
static double add_two_doubles(double x, double y) {

2
deps/v8/src/assembler.h

@ -457,7 +457,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_step_in_fp_address();
#endif
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
// Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()

41
deps/v8/src/bootstrapper.cc

@ -228,6 +228,7 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
bool InstallNatives();
void InstallJSFunctionResultCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
static bool InstallExtensions(Handle<Context> global_context,
@ -1301,6 +1302,44 @@ bool Genesis::InstallNatives() {
}
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
F(16, global_context()->regexp_function())
static FixedArray* CreateCache(int size, JSFunction* factory) {
// Caches are supposed to live for a long time, allocate in old space.
int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
Handle<FixedArray> cache =
Factory::NewFixedArrayWithHoles(array_size, TENURED);
cache->set(JSFunctionResultCache::kFactoryIndex, factory);
cache->set(JSFunctionResultCache::kFingerIndex,
Smi::FromInt(JSFunctionResultCache::kEntriesIndex));
cache->set(JSFunctionResultCache::kCacheSizeIndex,
Smi::FromInt(JSFunctionResultCache::kEntriesIndex));
return *cache;
}
void Genesis::InstallJSFunctionResultCaches() {
const int kNumberOfCaches = 0 +
#define F(size, func) + 1
JSFUNCTION_RESULT_CACHE_LIST(F)
#undef F
;
Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
#define F(size, func) caches->set(index++, CreateCache(size, func));
JSFUNCTION_RESULT_CACHE_LIST(F)
#undef F
global_context()->set_jsfunction_result_caches(*caches);
}
int BootstrapperActive::nesting_ = 0;
@ -1453,6 +1492,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
builtins->set_javascript_builtin_code(id, shared->code());
}
return true;
}
@ -1664,6 +1704,7 @@ Genesis::Genesis(Handle<Object> global_object,
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
if (!InstallNatives()) return;
InstallJSFunctionResultCaches();
MakeFunctionInstancePrototypeWritable();

4
deps/v8/src/codegen.h

@ -124,6 +124,7 @@ namespace internal {
F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \
F(RegExpConstructResult, 3, 1) \
F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
@ -424,7 +425,8 @@ class CEntryStub : public CodeStub {
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope);
bool always_allocate_scope,
int alignment_skew = 0);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);

2
deps/v8/src/compilation-cache.cc

@ -270,7 +270,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
// Break when we've found a suitable boilerplate function that
// Break when we've found a suitable shared function info that
// matches the origin.
if (HasOrigin(function_info, name, line_offset, column_offset)) {
result = *function_info;

17
deps/v8/src/compilation-cache.h

@ -32,12 +32,13 @@ namespace v8 {
namespace internal {
// The compilation cache keeps function boilerplates for compiled
// scripts and evals. The boilerplates are looked up using the source
// string as the key. For regular expressions the compilation data is cached.
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
// compilation data is cached.
class CompilationCache {
public:
// Finds the script function boilerplate for a source
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
@ -45,7 +46,7 @@ class CompilationCache {
int line_offset,
int column_offset);
// Finds the function boilerplate for a source string for eval in a
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
@ -57,13 +58,13 @@ class CompilationCache {
static Handle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
// Associate the (source, kind) pair to the boilerplate. This may
// overwrite an existing mapping.
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
static void PutScript(Handle<String> source,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the boilerplate. This may overwrite an existing mapping.
// with the shared function info. This may overwrite an existing mapping.
static void PutEval(Handle<String> source,
Handle<Context> context,
bool is_global,

2
deps/v8/src/compiler.cc

@ -541,7 +541,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
code);
}
// Create a boilerplate function.
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),

2
deps/v8/src/contexts.h

@ -83,6 +83,7 @@ enum ContextLookupFlags {
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
@ -205,6 +206,7 @@ class Context: public FixedArray {
GET_STACK_TRACE_LINE_INDEX,
CONFIGURE_GLOBAL_INDEX,
FUNCTION_CACHE_INDEX,
JSFUNCTION_RESULT_CACHES_INDEX,
RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX,
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,

5
deps/v8/src/cpu-profiler-inl.h

@ -30,7 +30,7 @@
#include "cpu-profiler.h"
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
@ -71,6 +71,7 @@ TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
TickSampleEventRecord::init(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
@ -93,6 +94,6 @@ bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_CPU_PROFILER_INL_H_

26
deps/v8/src/cpu-profiler.cc

@ -29,7 +29,7 @@
#include "cpu-profiler-inl.h"
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "log-inl.h"
@ -253,14 +253,12 @@ void CpuProfiler::StartProfiling(String* title) {
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
ASSERT(singleton_ != NULL);
return singleton_->StopCollectingProfile(title);
return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
}
CpuProfile* CpuProfiler::StopProfiling(String* title) {
ASSERT(singleton_ != NULL);
return singleton_->StopCollectingProfile(title);
return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
}
@ -422,6 +420,10 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
// Enable stack sampling.
// It is important to have it started prior to logging, see issue 683:
// http://code.google.com/p/v8/issues/detail?id=683
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
@ -429,15 +431,14 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
CpuProfile* result = profiles_->StopProfiling(title);
CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
if (result != NULL) {
result->Print();
}
@ -446,8 +447,9 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
return profiles_->StopProfiling(title);
return profiles_->StopProfiling(title, actual_sampling_rate);
}
@ -466,13 +468,13 @@ void CpuProfiler::StopProcessorIfLastProfile() {
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
namespace internal {
void CpuProfiler::Setup() {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ == NULL) {
singleton_ = new CpuProfiler();
}
@ -481,7 +483,7 @@ void CpuProfiler::Setup() {
void CpuProfiler::TearDown() {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ != NULL) {
delete singleton_;
}

8
deps/v8/src/cpu-profiler.h

@ -28,7 +28,7 @@
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "circular-queue.h"
@ -197,7 +197,7 @@ class ProfilerEventsProcessor : public Thread {
} while (false)
#else
#define PROFILE(Call) LOG(Call)
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
@ -208,7 +208,7 @@ class CpuProfiler {
static void Setup();
static void TearDown();
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
@ -265,7 +265,7 @@ class CpuProfiler {
#else
static INLINE(bool is_profiling()) { return false; }
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);

19
deps/v8/src/d8-debug.cc

@ -34,6 +34,11 @@
namespace v8 {
void PrintPrompt() {
printf("dbg> ");
fflush(stdout);
}
void HandleDebugEvent(DebugEvent event,
Handle<Object> exec_state,
@ -86,7 +91,7 @@ void HandleDebugEvent(DebugEvent event,
bool running = false;
while (!running) {
char command[kBufferSize];
printf("dbg> ");
PrintPrompt();
char* str = fgets(command, kBufferSize, stdin);
if (str == NULL) break;
@ -178,6 +183,7 @@ void RemoteDebugger::Run() {
// Start the keyboard thread.
KeyboardThread keyboard(this);
keyboard.Start();
PrintPrompt();
// Process events received from debugged VM and from the keyboard.
bool terminate = false;
@ -264,7 +270,8 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
Handle<Object> details =
Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
Shell::ReportException(&try_catch);
PrintPrompt();
return;
}
String::Utf8Value str(details->Get(String::New("text")));
@ -277,7 +284,7 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
} else {
printf("???\n");
}
printf("dbg> ");
PrintPrompt();
}
@ -289,13 +296,17 @@ void RemoteDebugger::HandleKeyboardCommand(char* command) {
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(&try_catch);
v8::String::Utf8Value exception(try_catch.Exception());
const char* exception_string = Shell::ToCString(exception);
printf("%s\n", exception_string);
PrintPrompt();
return;
}
// If undefined is returned the command was handled internally and there is
// no JSON to send.
if (request->IsUndefined()) {
PrintPrompt();
return;
}

2
deps/v8/src/d8.cc

@ -102,7 +102,7 @@ bool CounterMap::Match(void* key1, void* key2) {
// Converts a V8 value to a C string.
const char* ToCString(const v8::String::Utf8Value& value) {
const char* Shell::ToCString(const v8::String::Utf8Value& value) {
return *value ? *value : "<string conversion failed>";
}

1
deps/v8/src/d8.h

@ -117,6 +117,7 @@ class Shell: public i::AllStatic {
Handle<Value> name,
bool print_result,
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
static void Initialize();
static void OnExit();

11
deps/v8/src/d8.js

@ -715,8 +715,6 @@ DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
// Create a JSON request for the break command.
DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
var request = this.createRequest('setbreakpoint');
// Process arguments if any.
if (args && args.length > 0) {
var target = args;
@ -726,6 +724,8 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
var condition;
var pos;
var request = this.createRequest('setbreakpoint');
// Check for breakpoint condition.
pos = args.indexOf(' ');
if (pos > 0) {
@ -763,7 +763,7 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
request.arguments.column = column;
request.arguments.condition = condition;
} else {
throw new Error('Invalid break arguments.');
var request = this.createRequest('suspend');
}
return request.toJSONProtocol();
@ -817,6 +817,7 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('warning: arguments to \'help\' are ignored');
}
print('break');
print('break location [condition]');
print(' break on named function: location is a function name');
print(' break on function: location is #<id>#');
@ -931,6 +932,10 @@ function DebugResponseDetails(response) {
var body = response.body();
var result = '';
switch (response.command()) {
case 'suspend':
details.text = 'stopped';
break;
case 'setbreakpoint':
result = 'set breakpoint #';
result += body.breakpoint;

6
deps/v8/src/debug-debugger.js

@ -1970,7 +1970,7 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
if (!Debug.LiveEditChangeScript) {
if (!Debug.LiveEdit) {
return response.failed('LiveEdit feature is not supported');
}
if (!request.arguments) {
@ -2010,7 +2010,7 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
return;
}
invocation = function() {
return Debug.LiveEditChangeScript(the_script, change_pos, change_len,
return Debug.LiveEdit.ApplyPatch(the_script, change_pos, change_len,
new_string, change_log);
}
}
@ -2018,7 +2018,7 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
try {
invocation();
} catch (e) {
if (e instanceof Debug.LiveEditChangeScript.Failure) {
if (e instanceof Debug.LiveEdit.Failure) {
// Let's treat it as a "success" so that body with change_log will be
// sent back. "change_log" will have "failure" field set.
change_log.push( { failure: true, message: e.toString() } );

3
deps/v8/src/execution.cc

@ -46,9 +46,6 @@ static Handle<Object> Invoke(bool construct,
int argc,
Object*** args,
bool* has_pending_exception) {
// Make sure we have a real function, not a boilerplate function.
ASSERT(!func->IsBoilerplate());
// Entering JavaScript.
VMState state(JS);

7
deps/v8/src/factory.cc

@ -43,9 +43,11 @@ Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) {
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size), FixedArray);
CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size, pretenure),
FixedArray);
}
@ -312,7 +314,6 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->global_context());
}
result->set_literals(*literals);
ASSERT(!result->IsBoilerplate());
return result;
}

4
deps/v8/src/factory.h

@ -47,7 +47,9 @@ class Factory : public AllStatic {
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
static Handle<FixedArray> NewFixedArrayWithHoles(int size);
static Handle<FixedArray> NewFixedArrayWithHoles(
int size,
PretenureFlag pretenure = NOT_TENURED);
static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);

4
deps/v8/src/flag-definitions.h

@ -231,8 +231,10 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(optimize_ast, true, "optimize the ast")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "trace simulator execution")
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
// top.cc
DEFINE_bool(trace_exception, false,

9
deps/v8/src/globals.h

@ -147,6 +147,9 @@ const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
#endif
// Mask for the sign bit in a smi.
const intptr_t kSmiSignMask = kIntptrSignBit;
const int kObjectAlignmentBits = kPointerSizeLog2;
const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
@ -428,7 +431,11 @@ enum PropertyType {
CONSTANT_TRANSITION = 6, // only in fast mode
NULL_DESCRIPTOR = 7, // only in fast mode
// All properties before MAP_TRANSITION are real.
FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION
FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
// There are no IC stubs for NULL_DESCRIPTORS. Therefore,
// NULL_DESCRIPTOR can be used as the type flag for IC stubs for
// nonexistent properties.
NONEXISTENT = NULL_DESCRIPTOR
};

32
deps/v8/src/handles.cc

@ -457,6 +457,16 @@ void InitScriptLineEnds(Handle<Script> script) {
}
Handle<String> src(String::cast(script->source()));
Handle<FixedArray> array = CalculateLineEnds(src, true);
script->set_line_ends(*array);
ASSERT(script->line_ends()->IsFixedArray());
}
Handle<FixedArray> CalculateLineEnds(Handle<String> src,
bool with_imaginary_last_new_line) {
const int src_len = src->length();
Handle<String> new_line = Factory::NewStringFromAscii(CStrVector("\n"));
@ -468,8 +478,12 @@ void InitScriptLineEnds(Handle<Script> script) {
if (position != -1) {
position++;
}
// Even if the last line misses a line end, it is counted.
line_count++;
if (position != -1) {
line_count++;
} else if (with_imaginary_last_new_line) {
// Even if the last line misses a line end, it is counted.
line_count++;
}
}
// Pass 2: Fill in line ends positions
@ -478,15 +492,17 @@ void InitScriptLineEnds(Handle<Script> script) {
position = 0;
while (position != -1 && position < src_len) {
position = Runtime::StringMatch(src, new_line, position);
// If the script does not end with a line ending add the final end
// position as just past the last line ending.
array->set(array_index++,
Smi::FromInt(position != -1 ? position++ : src_len));
if (position != -1) {
array->set(array_index++, Smi::FromInt(position++));
} else if (with_imaginary_last_new_line) {
// If the script does not end with a line ending add the final end
// position as just past the last line ending.
array->set(array_index++, Smi::FromInt(src_len));
}
}
ASSERT(array_index == line_count);
script->set_line_ends(*array);
ASSERT(script->line_ends()->IsFixedArray());
return array;
}

5
deps/v8/src/handles.h

@ -271,6 +271,11 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script);
// Script line number computations.
void InitScriptLineEnds(Handle<Script> script);
// For string calculates an array of line end positions. If the string
// does not end with a new line character, this character may optionally be
// imagined.
Handle<FixedArray> CalculateLineEnds(Handle<String> string,
bool with_imaginary_last_new_line);
int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);

4
deps/v8/src/heap-inl.h

@ -240,8 +240,8 @@ void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
}
void Heap::MoveBlock(Object** dst, Object** src, size_t byte_size) {
ASSERT(IsAligned<size_t>(byte_size, kPointerSize));
void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;

64
deps/v8/src/heap.cc

@ -41,7 +41,7 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "v8threads.h"
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
@ -1444,10 +1444,6 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_global_context_map(Map::cast(obj));
obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
if (obj->IsFailure()) return false;
set_boilerplate_function_map(Map::cast(obj));
obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize);
if (obj->IsFailure()) return false;
@ -1531,7 +1527,7 @@ void Heap::CreateCEntryStub() {
}
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
void Heap::CreateRegExpCEntryStub() {
RegExpCEntryStub stub;
set_re_c_entry_code(*stub.GetCode());
@ -1568,7 +1564,7 @@ void Heap::CreateFixedStubs() {
Heap::CreateCEntryStub();
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Heap::CreateRegExpCEntryStub();
#endif
}
@ -1670,8 +1666,8 @@ bool Heap::CreateInitialObjects() {
if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1, TENURED);
// Allocate cache for single character ASCII strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
if (obj->IsFailure()) return false;
set_single_character_string_cache(FixedArray::cast(obj));
@ -3013,13 +3009,10 @@ Object* Heap::AllocateFixedArray(int length) {
}
Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
ASSERT(length >= 0);
ASSERT(empty_fixed_array()->IsFixedArray());
Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
if (length == 0) return empty_fixed_array();
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
@ -3053,18 +3046,39 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
ASSERT(space == LO_SPACE);
result = lo_space_->AllocateRawFixedArray(size);
}
return result;
}
static Object* AllocateFixedArrayWithFiller(int length,
PretenureFlag pretenure,
Object* filler) {
ASSERT(length >= 0);
ASSERT(Heap::empty_fixed_array()->IsFixedArray());
if (length == 0) return Heap::empty_fixed_array();
ASSERT(!Heap::InNewSpace(filler));
Object* result = Heap::AllocateRawFixedArray(length, pretenure);
if (result->IsFailure()) return result;
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
HeapObject::cast(result)->set_map(Heap::fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
ASSERT(!Heap::InNewSpace(undefined_value()));
MemsetPointer(array->data_start(), undefined_value(), length);
MemsetPointer(array->data_start(), filler, length);
return array;
}
Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
Object* Heap::AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure) {
return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
}
Object* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
@ -3077,22 +3091,6 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
}
Object* Heap::AllocateFixedArrayWithHoles(int length) {
if (length == 0) return empty_fixed_array();
Object* result = AllocateRawFixedArray(length);
if (!result->IsFailure()) {
// Initialize header.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(the_hole_value()));
MemsetPointer(array->data_start(), the_hole_value(), length);
}
return result;
}
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;

13
deps/v8/src/heap.h

@ -89,7 +89,6 @@ class ZoneScopeInfo;
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
V(Object, nan_value, NanValue) \
@ -111,7 +110,7 @@ class ZoneScopeInfo;
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \
V(Code, re_c_entry_code, RegExpCEntryCode)
@ -484,7 +483,9 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateFixedArrayWithHoles(int length);
static Object* AllocateFixedArrayWithHoles(
int length,
PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
@ -896,8 +897,10 @@ class Heap : public AllStatic {
// Returns the adjusted value.
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate unitialized fixed array (pretenure == NON_TENURE).
// Allocate uninitialized fixed array.
static Object* AllocateRawFixedArray(int length);
static Object* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
@ -955,7 +958,7 @@ class Heap : public AllStatic {
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Object** dst, Object** src, size_t byte_size);
static inline void MoveBlock(Object** dst, Object** src, int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();

523
deps/v8/src/ia32/codegen-ia32.cc

@ -140,7 +140,8 @@ void CodeGenerator::Generate(CompilationInfo* info) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
loop_nesting_ += info->loop_nesting();
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ = info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@ -333,7 +334,8 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
// Adjust for function-level loop nesting.
loop_nesting_ -= info->loop_nesting();
ASSERT_EQ(info->loop_nesting(), loop_nesting_);
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(state_ == NULL);
@ -2346,7 +2348,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
smi_value,
overwrite_mode);
// Check for negative or non-Smi left hand side.
__ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
__ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
deferred->Branch(not_zero);
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
@ -4659,8 +4661,8 @@ Result CodeGenerator::InstantiateFunction(
frame()->EmitPush(Immediate(function_info));
return frame()->CallStub(&stub, 1);
} else {
// Call the runtime to instantiate the function boilerplate
// object.
// Call the runtime to instantiate the function based on the
// shared function info.
frame()->EmitPush(esi);
frame()->EmitPush(Immediate(function_info));
return frame()->CallRuntime(Runtime::kNewClosure, 2);
@ -5899,7 +5901,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
__ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
__ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
value.Unuse();
destination()->Split(zero);
}
@ -5915,43 +5917,11 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
Label slow_case;
Label end;
Label not_a_flat_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
// Get register ecx to use as shift amount later.
Result shift_amount;
if (object.is_register() && object.reg().is(ecx)) {
Result fresh = allocator_->Allocate();
shift_amount = object;
object = fresh;
__ mov(object.reg(), ecx);
}
if (index.is_register() && index.reg().is(ecx)) {
Result fresh = allocator_->Allocate();
shift_amount = index;
index = fresh;
__ mov(index.reg(), ecx);
}
// There could be references to ecx in the frame. Allocating will
// spill them, otherwise spill explicitly.
if (shift_amount.is_valid()) {
frame_->Spill(ecx);
} else {
shift_amount = allocator()->Allocate(ecx);
}
ASSERT(shift_amount.is_register());
ASSERT(shift_amount.reg().is(ecx));
ASSERT(allocator_->count(ecx) == 1);
// We will mutate the index register and possibly the object register.
// The case where they are somehow the same register is handled
// because we only mutate them in the case where the receiver is a
@ -5961,93 +5931,33 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
frame_->Spill(object.reg());
frame_->Spill(index.reg());
// We need a single extra temporary register.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
// We need two extra registers.
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
// There is no virtual frame effect from here up to the final result
// push.
// If the receiver is a smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ test(object.reg(), Immediate(kSmiTagMask));
__ j(zero, &slow_case);
// If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &slow_case);
// Untag the index.
__ SmiUntag(index.reg());
__ bind(&try_again_with_new_string);
// Fetch the instance type of the receiver into ecx.
__ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the slow case.
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &slow_case);
// Fetch the length field into the temporary register.
__ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
// Check for index out of range.
__ cmp(index.reg(), Operand(temp.reg()));
__ j(greater_equal, &slow_case);
// Reload the instance type (into the temp register this time)..
__ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
__ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
__ test(temp.reg(), Immediate(kStringRepresentationMask));
__ j(not_zero, &not_a_flat_string);
// Check for 1-byte or 2-byte string.
__ test(temp.reg(), Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the temp register.
__ movzx_w(temp.reg(), FieldOperand(object.reg(),
index.reg(),
times_2,
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// ASCII string.
__ bind(&ascii_string);
// Load the byte into the temp register.
__ movzx_b(temp.reg(), FieldOperand(object.reg(),
index.reg(),
times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ SmiTag(temp.reg());
__ jmp(&end);
// Handle non-flat strings.
__ bind(&not_a_flat_string);
__ and_(temp.reg(), kStringRepresentationMask);
__ cmp(temp.reg(), kConsStringTag);
__ j(not_equal, &slow_case);
// ConsString.
// Check that the right hand side is the empty string (ie if this is really a
// flat string in a cons string). If that is not the case we would rather go
// to the runtime system now, to flatten the string.
__ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
__ cmp(Operand(temp.reg()), Factory::empty_string());
__ j(not_equal, &slow_case);
// Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
Label slow_case;
Label exit;
StringHelper::GenerateFastCharCodeAt(masm_,
object.reg(),
index.reg(),
scratch.reg(),
result.reg(),
&slow_case,
&slow_case,
&slow_case);
__ jmp(&exit);
__ bind(&slow_case);
// Move the undefined value into the result register, which will
// trigger the slow case.
__ Set(temp.reg(), Immediate(Factory::undefined_value()));
__ Set(result.reg(), Immediate(Factory::undefined_value()));
__ bind(&end);
frame_->Push(&temp);
__ bind(&exit);
frame_->Push(&result);
}
@ -6056,46 +5966,22 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Result code = frame_->Pop();
code.ToRegister();
ASSERT(code.is_valid());
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
JumpTarget slow_case;
JumpTarget exit;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(code.reg(),
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
slow_case.Branch(not_zero, &code, not_taken);
__ Set(temp.reg(), Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ mov(temp.reg(), FieldOperand(temp.reg(),
code.reg(), times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(temp.reg(), Factory::undefined_value());
slow_case.Branch(equal, &code, not_taken);
code.Unuse();
// StringHelper::GenerateCharFromCode may do a runtime call.
frame_->SpillAll();
frame_->Push(&temp);
exit.Jump();
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
slow_case.Bind(&code);
frame_->Push(&code);
Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
StringHelper::GenerateCharFromCode(masm_,
code.reg(),
result.reg(),
CALL_FUNCTION);
frame_->Push(&result);
exit.Bind();
}
@ -6628,6 +6514,80 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
: dst_(dst), cache_(cache), key_(key) {
set_comment("[ DeferredSearchCache");
}
virtual void Generate();
private:
Register dst_, cache_, key_;
};
void DeferredSearchCache::Generate() {
__ push(cache_);
__ push(key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
if (!dst_.is(eax)) {
__ mov(dst_, eax);
}
}
void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
Top::global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
frame_->Push(Factory::undefined_value());
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
Result key = frame_->Pop();
key.ToRegister();
Result cache = allocator()->Allocate();
__ mov(cache.reg(), cache_obj);
Result tmp = allocator()->Allocate();
DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
cache.reg(),
key.reg());
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
__ cmp(key.reg(), FieldOperand(cache.reg(),
tmp.reg(), // as smi
times_half_pointer_size,
FixedArray::kHeaderSize));
deferred->Branch(not_equal);
__ mov(tmp.reg(), FieldOperand(cache.reg(),
tmp.reg(), // as smi
times_half_pointer_size,
kPointerSize + FixedArray::kHeaderSize));
deferred->BindExit();
frame_->Push(&tmp);
}
void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
@ -8446,7 +8406,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
}
// Check that the key is a non-negative smi.
__ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
__ test(key.reg(), Immediate(kSmiTagMask | kSmiSignMask));
deferred->Branch(not_zero);
// Check that the receiver is not a smi.
@ -10732,9 +10692,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
#else // V8_INTERPRETED_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
@ -10811,9 +10771,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ebx: Length of subject string
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (usigned comparison).
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
__ SmiUntag(eax);
__ cmp(eax, Operand(ebx));
@ -10860,9 +10819,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ mov(edx, ebx);
__ and_(edx, kStringRepresentationMask);
__ cmp(edx, kConsStringTag);
__ and_(ebx, kStringRepresentationMask);
__ cmp(ebx, kConsStringTag);
__ j(not_equal, &runtime);
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
__ cmp(Operand(edx), Factory::empty_string());
@ -10881,7 +10839,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
__ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
__ cmp(ebx, kSeqTwoByteString);
__ j(equal, &seq_two_byte_string);
if (FLAG_debug_code) {
__ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
@ -10977,7 +10936,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592) Rerunning the RegExp to get the stack overflow exception.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@ -11028,7 +10987,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
__ mov(eax, Operand(esp, kPreviousIndexOffset));
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@ -11053,7 +11011,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}
@ -11657,7 +11615,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
bool always_allocate_scope,
int /* alignment_skew */) {
// eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@ -11667,7 +11626,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Result returned in eax, or eax+edx if result_size_ is 2.
// Check stack alignment.
if (FLAG_debug_code) {
__ CheckStackAlignment();
}
if (do_gc) {
// Pass failure code returned from last attempt as first argument to
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@ -12069,6 +12038,154 @@ const char* CompareStub::GetName() {
}
void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_positive_smi,
Label* slow_case) {
Label not_a_flat_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask));
__ j(zero, receiver_not_string);
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ test(result, Immediate(kIsNotStringMask));
__ j(not_zero, receiver_not_string);
// If the index is negative or non-smi trigger the non-positive-smi
// case.
ASSERT(kSmiTag == 0);
__ test(index, Immediate(kSmiTagMask | kSmiSignMask));
__ j(not_zero, index_not_positive_smi);
// Put untagged index into scratch register.
__ mov(scratch, index);
__ SmiUntag(scratch);
// Check for index out of range.
__ cmp(scratch, FieldOperand(object, String::kLengthOffset));
__ j(greater_equal, slow_case);
__ bind(&try_again_with_new_string);
// ----------- S t a t e -------------
// -- object : string to access
// -- result : instance type of the string
// -- scratch : positive smi index < length
// -----------------------------------
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
__ j(not_zero, &not_a_flat_string);
// Check for 1-byte or 2-byte string.
ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the temp register.
__ movzx_w(result, FieldOperand(object,
scratch, times_2,
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// Handle non-flat strings.
__ bind(&not_a_flat_string);
__ and_(result, kStringRepresentationMask);
__ cmp(result, kConsStringTag);
__ j(not_equal, slow_case);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ mov(result, FieldOperand(object, ConsString::kSecondOffset));
__ cmp(Operand(result), Factory::empty_string());
__ j(not_equal, slow_case);
// Get the first of the two strings and load its instance type.
__ mov(object, FieldOperand(object, ConsString::kFirstOffset));
__ mov(result, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
__ jmp(&try_again_with_new_string);
// ASCII string.
__ bind(&ascii_string);
// Load the byte into the temp register.
__ movzx_b(result, FieldOperand(object,
scratch, times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ SmiTag(result);
}
void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register result,
InvokeFlag flag) {
ASSERT(!code.is(result));
Label slow_case;
Label exit;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(code,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case, not_taken);
__ Set(result, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ mov(result, FieldOperand(result,
code, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(result, Factory::undefined_value());
__ j(equal, &slow_case, not_taken);
__ jmp(&exit);
__ bind(&slow_case);
if (flag == CALL_FUNCTION) {
__ push(code);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result.is(eax)) {
__ mov(result, eax);
}
} else {
ASSERT(flag == JUMP_FUNCTION);
ASSERT(result.is(eax));
__ pop(eax); // Save return address.
__ push(code);
__ push(eax); // Restore return address.
__ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
}
__ bind(&exit);
if (flag == JUMP_FUNCTION) {
ASSERT(result.is(eax));
__ ret(0);
}
}
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
@ -12135,8 +12252,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
&make_two_character_string);
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@ -12228,7 +12345,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
// Load second argument and locate first character.
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
@ -12237,7 +12354,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ecx: next character of result
// edx: first char of second argument
// edi: length of second argument
GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@ -12267,7 +12384,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
// Load second argument and locate first character.
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
@ -12276,7 +12393,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// ecx: next character of result
// edx: first char of second argument
// edi: length of second argument
GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@ -12286,12 +12403,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii) {
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@ -12312,12 +12429,12 @@ void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
}
void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii) {
void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii) {
// Copy characters using rep movs of doublewords. Align destination on 4 byte
// boundary before starting rep movs. Copy remaining characters after running
// rep movs.
@ -12372,13 +12489,13 @@ void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
}
void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found) {
void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found) {
// Register scratch3 is the general scratch register in this function.
Register scratch = scratch3;
@ -12492,10 +12609,10 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
}
void StringStubBase::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch) {
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch) {
// hash = character + (character << 10);
__ mov(hash, character);
__ shl(hash, 10);
@ -12507,10 +12624,10 @@ void StringStubBase::GenerateHashInit(MacroAssembler* masm,
}
void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch) {
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch) {
// hash += character;
__ add(hash, Operand(character));
// hash += hash << 10;
@ -12524,9 +12641,9 @@ void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
}
void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch) {
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch) {
// hash += hash << 3;
__ mov(scratch, hash);
__ shl(scratch, 3);
@ -12600,8 +12717,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table.
Label make_two_character_string;
GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
&make_two_character_string);
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
@ -12640,7 +12757,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// edx: original value of esi
// edi: first character of result
// esi: character of sub string start
GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(3 * kPointerSize);
@ -12679,7 +12796,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// edx: original value of esi
// edi: first character of result
// esi: character of sub string start
GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(3 * kPointerSize);

103
deps/v8/src/ia32/codegen-ia32.h

@ -630,6 +630,9 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
@ -880,53 +883,85 @@ class GenericBinaryOpStub: public CodeStub {
};
class StringStubBase: public CodeStub {
class StringHelper : public AllStatic {
public:
// Generates fast code for getting a char code out of a string
// object at the given index. May bail out for three reasons (in the
// listed order):
// * Receiver is not a string (receiver_not_string label).
// * Index is not a positive smi (index_not_positive_smi label).
// * Some other reason (slow_case label). In this case it's
// guaranteed that the above conditions are not violated,
// e.g. it's safe to assume the receiver is a string and the
// index is a positive smi.
// When successful, object, index, and scratch are clobbered.
// Otherwise, scratch and result are clobbered.
static void GenerateFastCharCodeAt(MacroAssembler* masm,
Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_positive_smi,
Label* slow_case);
// Generates code for creating a one-char string from the given char
// code. May do a runtime call, so any register can be clobbered
// and, if the given invoke flag specifies a call, an internal frame
// is required. In tail call mode the result must be eax register.
static void GenerateCharFromCode(MacroAssembler* masm,
Register code,
Register result,
InvokeFlag flag);
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
// overhead. Copying of overlapping regions is not supported.
void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying characters using the rep movs instruction.
// Copies ecx characters from esi to edi. Copying of overlapping regions is
// not supported.
void GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest, // Must be edi.
Register src, // Must be esi.
Register count, // Must be ecx.
Register scratch, // Neither of the above.
bool ascii);
static void GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest, // Must be edi.
Register src, // Must be esi.
Register count, // Must be ecx.
Register scratch, // Neither of above.
bool ascii);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register eax.
void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
// Generate string hash.
void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@ -937,7 +972,7 @@ enum StringAddFlags {
};
class StringAddStub: public StringStubBase {
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@ -954,7 +989,7 @@ class StringAddStub: public StringStubBase {
};
class SubStringStub: public StringStubBase {
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@ -966,7 +1001,7 @@ class SubStringStub: public StringStubBase {
};
class StringCompareStub: public StringStubBase {
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {
}

3
deps/v8/src/ia32/full-codegen-ia32.cc

@ -776,7 +776,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
// Build the shared function info and instantiate the function based
// on it.
Handle<SharedFunctionInfo> function_info =
Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return;

75
deps/v8/src/ia32/ic-ia32.cc

@ -491,39 +491,72 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- eax : key (index)
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss, index_ok;
// Pop return address.
// Performing the load early is better in the common case.
__ pop(ebx);
Label miss;
Label not_positive_smi;
Label slow_char_code;
Label got_char_code;
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
Register receiver = edx;
Register index = eax;
Register code = ebx;
Register scratch = ecx;
StringHelper::GenerateFastCharCodeAt(masm,
receiver,
index,
scratch,
code,
&miss, // When not a string.
&not_positive_smi,
&slow_char_code);
// If we didn't bail out, code register contains smi tagged char
// code.
__ bind(&got_char_code);
StringHelper::GenerateCharFromCode(masm, code, eax, JUMP_FUNCTION);
#ifdef DEBUG
__ Abort("Unexpected fall-through from char from code tail call");
#endif
// Check if key is a smi or a heap number.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &index_ok);
__ bind(&not_positive_smi);
ASSERT(kSmiTag == 0);
__ test(index, Immediate(kSmiTagMask));
__ j(zero, &slow_char_code);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(ecx, Factory::heap_number_map());
__ j(not_equal, &miss);
__ bind(&index_ok);
// Push receiver and key on the stack, and make a tail call.
__ push(edx); // receiver
__ push(eax); // key
__ push(ebx); // return address
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
// Push receiver and key on the stack (now that we know they are a
// string and a number), and call runtime.
__ bind(&slow_char_code);
__ EnterInternalFrame();
__ push(receiver);
__ push(index);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
ASSERT(!code.is(eax));
__ mov(code, eax);
__ LeaveInternalFrame();
// Check if the runtime call returned NaN char code. If yes, return
// undefined. Otherwise, we can continue.
if (FLAG_debug_code) {
ASSERT(kSmiTag == 0);
__ test(code, Immediate(kSmiTagMask));
__ j(zero, &got_char_code);
__ mov(scratch, FieldOperand(code, HeapObject::kMapOffset));
__ cmp(scratch, Factory::heap_number_map());
__ Assert(equal, "StringCharCodeAt must return smi or heap number");
}
__ cmp(code, Factory::nan_value());
__ j(not_equal, &got_char_code);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
__ bind(&miss);
__ push(ebx);
GenerateMiss(masm);
}

62
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -143,7 +143,17 @@ void MacroAssembler::RecordWrite(Register object, int offset,
InNewSpace(object, value, equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// The offset is relative to a tagged or untagged HeapObject pointer,
// so either offset or offset + kHeapObjectTag must be a
// multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object chunk or is in the first page of a large object chunk.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
@ -1396,16 +1406,28 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(edi));
// Load the builtins object into target register.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(edi, FieldOperand(edi, GlobalObject::kBuiltinsOffset));
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
mov(edi, FieldOperand(edi, builtins_offset));
// Load the code entry point from the function into the target register.
mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
// Load the code entry point from the builtins object.
mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
if (FLAG_debug_code) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
cmp(target, Operand(esp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
}
lea(target, FieldOperand(target, Code::kHeaderSize));
}
@ -1523,6 +1545,21 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
}
void MacroAssembler::CheckStackAlignment() {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
// Abort if stack is not aligned.
int3();
bind(&alignment_as_expected);
}
}
void MacroAssembler::Abort(const char* msg) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
@ -1622,6 +1659,11 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
// Check stack alignment.
if (FLAG_debug_code) {
CheckStackAlignment();
}
call(Operand(function));
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));

3
deps/v8/src/ia32/macro-assembler-ia32.h

@ -463,6 +463,9 @@ class MacroAssembler: public Assembler {
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Check that the stack is aligned.
void CheckStackAlignment();
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }

4
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -38,7 +38,7 @@
namespace v8 {
namespace internal {
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - edx : current character. Must be loaded using LoadCurrentCharacter
@ -1195,6 +1195,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

6
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -31,14 +31,14 @@
namespace v8 {
namespace internal {
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerIA32() { }
virtual ~RegExpMacroAssemblerIA32() { }
};
#else
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
@ -208,7 +208,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

82
deps/v8/src/ia32/stub-cache-ia32.cc

@ -950,6 +950,26 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
GlobalObject* global,
String* name,
Register scratch,
Label* miss) {
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) return probe;
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
Immediate(Factory::the_hole_value()));
__ j(not_equal, miss, not_taken);
return cell;
}
#undef __
#define __ ACCESS_MASM(masm())
@ -968,21 +988,19 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
push_at_depth, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
while (object != holder) {
if (object->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(object);
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) {
set_failure(Failure::cast(probe));
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(object),
name,
scratch,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
return result;
}
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
Immediate(Factory::the_hole_value()));
__ j(not_equal, miss, not_taken);
}
object = JSObject::cast(object->GetPrototype());
}
@ -1948,6 +1966,48 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
// -- esp[0] : return address
// -----------------------------------
Label miss;
// Check that the receiver isn't a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
CheckPrototypes(object, eax, last, ebx, edx, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(last),
name,
edx,
&miss);
if (cell->IsFailure()) return cell;
}
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, Factory::undefined_value());
__ ret(0);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NONEXISTENT, Heap::empty_string());
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,

7
deps/v8/src/ic.cc

@ -694,8 +694,8 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Bail out if the result is not cacheable.
if (!lookup->IsCacheable()) return;
// Loading properties from values is not common, so don't try to
// deal with non-JS objects here.
@ -709,6 +709,9 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = pre_monomorphic_stub();
} else if (!lookup->IsProperty()) {
// Nonexistent property. The result is undefined.
code = StubCache::ComputeLoadNonexistent(*name, *receiver);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {

34
deps/v8/src/jsregexp.cc

@ -43,7 +43,7 @@
#include "regexp-macro-assembler-irregexp.h"
#include "regexp-stack.h"
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
@ -122,6 +122,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
}
FlattenString(pattern);
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
PostponeInterruptsScope postpone;
RegExpCompileData parse_result;
FlatStringReader reader(pattern);
if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
@ -235,10 +236,10 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// returns false.
bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
#ifdef V8_NATIVE_REGEXP
if (compiled_code->IsCode()) return true;
#else // ! V8_NATIVE_REGEXP (RegExp interpreter code)
#ifdef V8_INTERPRETED_REGEXP
if (compiled_code->IsByteArray()) return true;
#else // V8_INTERPRETED_REGEXP (RegExp native code)
if (compiled_code->IsCode()) return true;
#endif
return CompileIrregexp(re, is_ascii);
}
@ -247,6 +248,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
// Compile the RegExp.
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
PostponeInterruptsScope postpone;
Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
if (entry->IsJSObject()) {
// If it's a JSObject, a previous compilation failed and threw this object.
@ -358,14 +360,14 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
return -1;
}
#ifdef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
#else // V8_INTERPRETED_REGEXP
// Native regexp only needs room to output captures. Registers are handled
// internally.
return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
#else // !V8_NATIVE_REGEXP
// Byte-code regexp needs space allocated for all its registers.
return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}
@ -379,7 +381,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output.length() >=
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
@ -412,7 +414,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
#else // ndef V8_NATIVE_REGEXP
#else // V8_INTERPRETED_REGEXP
ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
bool is_ascii = subject->IsAsciiRepresentation();
@ -433,7 +435,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
return RE_SUCCESS;
}
return RE_FAILURE;
#endif // ndef V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}
@ -444,7 +446,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
// Prepare space for the return values.
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
String* pattern = jsregexp->Pattern();
@ -5230,7 +5232,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
NodeInfo info = *node->info();
// Create the correct assembler for the architecture.
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
// Native regexp implementation.
NativeRegExpMacroAssembler::Mode mode =
@ -5245,11 +5247,11 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // ! V8_NATIVE_REGEXP
#else // V8_INTERPRETED_REGEXP
// Interpreted regexp implementation.
EmbeddedVector<byte, 1024> codes;
RegExpMacroAssemblerIrregexp macro_assembler(codes);
#endif
#endif // V8_INTERPRETED_REGEXP
return compiler.Assemble(&macro_assembler,
node,

6
deps/v8/src/jsregexp.h

@ -42,10 +42,10 @@ class RegExpImpl {
public:
// Whether V8 is compiled with native regexp support or not.
static bool UsesNativeRegExp() {
#ifdef V8_NATIVE_REGEXP
return true;
#else
#ifdef V8_INTERPRETED_REGEXP
return false;
#else
return true;
#endif
}

748
deps/v8/src/liveedit-debugger.js

@ -28,457 +28,463 @@
// LiveEdit feature implementation. The script should be executed after
// debug-debugger.js.
// A LiveEdit namespace is declared inside a single function constructor.
Debug.LiveEdit = new function() {
// Changes script text and recompiles all relevant functions if possible.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
//
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are left
// unchanged and re-linked to a newly created script instance representing old
// version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
// their positions updated.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable description
// of what happened.
Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str,
change_log) {
// So far the function works as namespace.
var liveedit = Debug.LiveEditChangeScript;
var Assert = liveedit.Assert;
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
// Elements of array are ordered by start positions of functions (from top
// to bottom) in the source. Fields outer_index and next_sibling_index help
// to navigate the nesting structure of functions.
// Changes script text and recompiles all relevant functions if possible.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
//
// The script is used for compilation, because it produces code that
// needs to be linked with some particular script (for nested functions).
function DebugGatherCompileInfo(source) {
// Get function info, elements are partially sorted (it is a tree
// of nested functions serialized as parent followed by serialized children.
var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
// Sort function infos by start position field.
var compile_info = new Array();
var old_index_map = new Array();
for (var i = 0; i < raw_compile_info.length; i++) {
compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i]));
old_index_map.push(i);
}
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are
// left unchanged and re-linked to a newly created script instance
// representing old version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
// their positions updated.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable
// description of what happened.
function ApplyPatch(script, change_pos, change_len, new_str,
change_log) {
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
// Elements of array are ordered by start positions of functions (from top
// to bottom) in the source. Fields outer_index and next_sibling_index help
// to navigate the nesting structure of functions.
//
// The script is used for compilation, because it produces code that
// needs to be linked with some particular script (for nested functions).
function DebugGatherCompileInfo(source) {
// Get function info, elements are partially sorted (it is a tree of
// nested functions serialized as parent followed by serialized children.
var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
// Sort function infos by start position field.
var compile_info = new Array();
var old_index_map = new Array();
for (var i = 0; i < raw_compile_info.length; i++) {
compile_info.push(new FunctionCompileInfo(raw_compile_info[i]));
old_index_map.push(i);
}
for (var i = 0; i < compile_info.length; i++) {
var k = i;
for (var j = i + 1; j < compile_info.length; j++) {
if (compile_info[k].start_position > compile_info[j].start_position) {
k = j;
for (var i = 0; i < compile_info.length; i++) {
var k = i;
for (var j = i + 1; j < compile_info.length; j++) {
if (compile_info[k].start_position > compile_info[j].start_position) {
k = j;
}
}
if (k != i) {
var temp_info = compile_info[k];
var temp_index = old_index_map[k];
compile_info[k] = compile_info[i];
old_index_map[k] = old_index_map[i];
compile_info[i] = temp_info;
old_index_map[i] = temp_index;
}
}
if (k != i) {
var temp_info = compile_info[k];
var temp_index = old_index_map[k];
compile_info[k] = compile_info[i];
old_index_map[k] = old_index_map[i];
compile_info[i] = temp_info;
old_index_map[i] = temp_index;
}
}
// After sorting update outer_inder field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
// The recursive function, that goes over all children of a particular
// node (i.e. function info).
function ResetIndexes(new_parent_index, old_parent_index) {
var previous_sibling = -1;
while (current_index < compile_info.length &&
compile_info[current_index].outer_index == old_parent_index) {
var saved_index = current_index;
compile_info[saved_index].outer_index = new_parent_index;
// After sorting update outer_inder field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
// The recursive function, that goes over all children of a particular
// node (i.e. function info).
function ResetIndexes(new_parent_index, old_parent_index) {
var previous_sibling = -1;
while (current_index < compile_info.length &&
compile_info[current_index].outer_index == old_parent_index) {
var saved_index = current_index;
compile_info[saved_index].outer_index = new_parent_index;
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = saved_index;
}
previous_sibling = saved_index;
current_index++;
ResetIndexes(saved_index, old_index_map[saved_index]);
}
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = saved_index;
compile_info[previous_sibling].next_sibling_index = -1;
}
previous_sibling = saved_index;
current_index++;
ResetIndexes(saved_index, old_index_map[saved_index]);
}
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = -1;
}
}
ResetIndexes(-1, -1);
Assert(current_index == compile_info.length);
ResetIndexes(-1, -1);
Assert(current_index == compile_info.length);
return compile_info;
}
// Given a positions, finds a function that fully includes the entire change.
function FindChangedFunction(compile_info, offset, len) {
// First condition: function should start before the change region.
// Function #0 (whole-script function) always does, but we want
// one, that is later in this list.
var index = 0;
while (index + 1 < compile_info.length &&
compile_info[index + 1].start_position <= offset) {
index++;
return compile_info;
}
// Now we are at the last function that begins before the change
// region. The function that covers entire change region is either
// this function or the enclosing one.
for (; compile_info[index].end_position < offset + len;
index = compile_info[index].outer_index) {
Assert(index != -1);
// Given a positions, finds a function that fully includes the entire
// change.
function FindChangedFunction(compile_info, offset, len) {
// First condition: function should start before the change region.
// Function #0 (whole-script function) always does, but we want
// one, that is later in this list.
var index = 0;
while (index + 1 < compile_info.length &&
compile_info[index + 1].start_position <= offset) {
index++;
}
// Now we are at the last function that begins before the change
// region. The function that covers entire change region is either
// this function or the enclosing one.
for (; compile_info[index].end_position < offset + len;
index = compile_info[index].outer_index) {
Assert(index != -1);
}
return index;
}
return index;
}
// Variable forward declarations. Preprocessor "Minifier" needs them.
var old_compile_info;
var shared_infos;
// Finds SharedFunctionInfo that corresponds compile info with index
// in old version of the script.
function FindFunctionInfo(index) {
var old_info = old_compile_info[index];
for (var i = 0; i < shared_infos.length; i++) {
var info = shared_infos[i];
if (info.start_position == old_info.start_position &&
info.end_position == old_info.end_position) {
return info;
// Variable forward declarations. Preprocessor "Minifier" needs them.
var old_compile_info;
var shared_infos;
// Finds SharedFunctionInfo that corresponds compile info with index
// in old version of the script.
function FindFunctionInfo(index) {
var old_info = old_compile_info[index];
for (var i = 0; i < shared_infos.length; i++) {
var info = shared_infos[i];
if (info.start_position == old_info.start_position &&
info.end_position == old_info.end_position) {
return info;
}
}
}
}
// Replaces function's Code.
function PatchCode(new_info, shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
// Replaces function's Code.
function PatchCode(new_info, shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
change_log.push( {function_patched: new_info.function_name} );
}
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
// version of script.
function PosTranslator(old_pos) {
if (old_pos <= change_pos) {
return old_pos;
}
if (old_pos >= change_pos + change_len_old) {
return old_pos + change_len_new - change_len_old;
change_log.push( {function_patched: new_info.function_name} );
}
return -1;
}
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
if (!shared_info) {
// TODO(LiveEdit): explain what is happening.
return;
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
// version of script.
function PosTranslator(old_pos) {
if (old_pos <= change_pos) {
return old_pos;
}
if (old_pos >= change_pos + change_len_old) {
return old_pos + change_len_new - change_len_old;
}
return -1;
}
var breakpoint_position_update = %LiveEditPatchFunctionPositions(
shared_info.raw_array, position_change_array);
for (var i = 0; i < breakpoint_position_update.length; i += 2) {
var new_pos = breakpoint_position_update[i];
var break_point_object = breakpoint_position_update[i + 1];
change_log.push( { breakpoint_position_update:
{ from: break_point_object.source_position(), to: new_pos } } );
break_point_object.updateSourcePosition(new_pos, script);
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
if (!shared_info) {
// TODO(LiveEdit): explain what is happening.
return;
}
var breakpoint_position_update = %LiveEditPatchFunctionPositions(
shared_info.raw_array, position_change_array);
for (var i = 0; i < breakpoint_position_update.length; i += 2) {
var new_pos = breakpoint_position_update[i];
var break_point_object = breakpoint_position_update[i + 1];
change_log.push( { breakpoint_position_update:
{ from: break_point_object.source_position(), to: new_pos } } );
break_point_object.updateSourcePosition(new_pos, script);
}
position_patch_report.push( { name: new_info.function_name } );
}
position_patch_report.push( { name: new_info.function_name } );
}
var link_to_old_script_report;
var old_script;
// Makes a function associated with another instance of a script (the
// one representing its old version). This way the function still
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
var link_to_old_script_report;
var old_script;
// Makes a function associated with another instance of a script (the
// one representing its old version). This way the function still
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
link_to_old_script_report.push( { name: shared_info.function_name } );
}
link_to_old_script_report.push( { name: shared_info.function_name } );
}
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
// Find all SharedFunctionInfo's that are compiled from this script.
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
// Find all SharedFunctionInfo's that are compiled from this script.
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
var shared_infos = new Array();
var shared_infos = new Array();
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i]));
}
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
}
// Gather compile information about old version of script.
var old_compile_info = DebugGatherCompileInfo(old_source);
// Gather compile information about old version of script.
var old_compile_info = DebugGatherCompileInfo(old_source);
// Gather compile information about new version of script.
var new_compile_info;
try {
new_compile_info = DebugGatherCompileInfo(new_source);
} catch (e) {
throw new liveedit.Failure("Failed to compile new version of script: " + e);
}
// Gather compile information about new version of script.
var new_compile_info;
try {
new_compile_info = DebugGatherCompileInfo(new_source);
} catch (e) {
throw new Failure("Failed to compile new version of script: " + e);
}
// An index of a single function, that is going to have its code replaced.
var function_being_patched =
FindChangedFunction(old_compile_info, change_pos, change_len_old);
// In old and new script versions function with a change should have the
// same indexes.
var function_being_patched2 =
FindChangedFunction(new_compile_info, change_pos, change_len_new);
Assert(function_being_patched == function_being_patched2,
"inconsistent old/new compile info");
// Check that function being patched has the same expectations in a new
// version. Otherwise we cannot safely patch its behavior and should
// choose the outer function instead.
while (!liveedit.CompareFunctionExpectations(
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
}
// An index of a single function, that is going to have its code replaced.
var function_being_patched =
FindChangedFunction(old_compile_info, change_pos, change_len_old);
// In old and new script versions function with a change should have the
// same indexes.
var function_being_patched2 =
FindChangedFunction(new_compile_info, change_pos, change_len_new);
Assert(function_being_patched == function_being_patched2,
"inconsistent old/new compile info");
// Check that function being patched has the same expectations in a new
// version. Otherwise we cannot safely patch its behavior and should
// choose the outer function instead.
while (!CompareFunctionExpectations(
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
}
// Check that function being patched is not currently on stack.
liveedit.CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
// Check that function being patched is not currently on stack.
CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
// Committing all changes.
var old_script_name = liveedit.CreateNameForOldScript(script);
// Committing all changes.
var old_script_name = CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source,
old_script_name);
PatchCode(new_compile_info[function_being_patched],
FindFunctionInfo(function_being_patched));
PatchCode(new_compile_info[function_being_patched],
FindFunctionInfo(function_being_patched));
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
i != -1;
i = new_compile_info[i].outer_index) {
PatchPositions(new_compile_info[i], FindFunctionInfo(i));
}
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
i != -1;
i = new_compile_info[i].outer_index) {
PatchPositions(new_compile_info[i], FindFunctionInfo(i));
}
// Update positions of all functions that are fully below the function
// being patched.
var old_next_sibling =
old_compile_info[function_being_patched].next_sibling_index;
var new_next_sibling =
new_compile_info[function_being_patched].next_sibling_index;
// We simply go over the tail of both old and new lists. Their tails should
// have an identical structure.
if (old_next_sibling == -1) {
Assert(new_next_sibling == -1);
} else {
Assert(old_compile_info.length - old_next_sibling ==
new_compile_info.length - new_next_sibling);
// Update positions of all functions that are fully below the function
// being patched.
var old_next_sibling =
old_compile_info[function_being_patched].next_sibling_index;
var new_next_sibling =
new_compile_info[function_being_patched].next_sibling_index;
// We simply go over the tail of both old and new lists. Their tails should
// have an identical structure.
if (old_next_sibling == -1) {
Assert(new_next_sibling == -1);
} else {
Assert(old_compile_info.length - old_next_sibling ==
new_compile_info.length - new_next_sibling);
for (var i = old_next_sibling, j = new_next_sibling;
i < old_compile_info.length; i++, j++) {
PatchPositions(new_compile_info[j], FindFunctionInfo(i));
for (var i = old_next_sibling, j = new_next_sibling;
i < old_compile_info.length; i++, j++) {
PatchPositions(new_compile_info[j], FindFunctionInfo(i));
}
}
}
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
// We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
}
}
}
Debug.LiveEditChangeScript.Assert = function(condition, message) {
if (!condition) {
if (message) {
throw "Assert " + message;
} else {
throw "Assert";
// Function is public.
this.ApplyPatch = ApplyPatch;
function Assert(condition, message) {
if (!condition) {
if (message) {
throw "Assert " + message;
} else {
throw "Assert";
}
}
}
}
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.param_num = raw_array[3];
this.code = raw_array[4];
this.scope_info = raw_array[5];
this.outer_index = raw_array[6];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
// A structure describing SharedFunctionInfo.
Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.info = raw_array[3];
this.raw_array = raw_array;
}
// Adds a suffix to script name to mark that it is old version.
Debug.LiveEditChangeScript.CreateNameForOldScript = function(script) {
// TODO(635): try better than this; support several changes.
return script.name + " (old)";
}
// Compares a function interface old and new version, whether it
// changed or not.
Debug.LiveEditChangeScript.CompareFunctionExpectations =
function(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
return false;
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
function FunctionCompileInfo(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.param_num = raw_array[3];
this.code = raw_array[4];
this.scope_info = raw_array[5];
this.outer_index = raw_array[6];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
if (!scope_info1) {
return !scope_info2;
function SharedInfoWrapper(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.info = raw_array[3];
this.raw_array = raw_array;
}
if (scope_info1.length != scope_info2.length) {
return false;
// Adds a suffix to script name to mark that it is old version.
function CreateNameForOldScript(script) {
// TODO(635): try better than this; support several changes.
return script.name + " (old)";
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
return scope_info1.toString() == scope_info2.toString();
}
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
change_log) {
var liveedit = Debug.LiveEditChangeScript;
// Compares a function interface old and new version, whether it
// changed or not.
function CompareFunctionExpectations(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
return false;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
var shared_list = new Array();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
var result = %LiveEditCheckAndDropActivations(shared_list, true);
if (result[shared_list.length]) {
// Extra array element may contain error message.
throw new liveedit.Failure(result[shared_list.length]);
}
if (!scope_info1) {
return !scope_info2;
}
var problems = new Array();
var dropped = new Array();
for (var i = 0; i < shared_list.length; i++) {
var shared = shared_wrapper_list[i];
if (result[i] == liveedit.FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
dropped.push({ name: shared.function_name } );
} else if (result[i] != liveedit.FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
var description = {
name: shared.function_name,
start_pos: shared.start_position,
end_pos: shared.end_position,
replace_problem:
liveedit.FunctionPatchabilityStatus.SymbolName(result[i])
};
problems.push(description);
if (scope_info1.length != scope_info2.length) {
return false;
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
return scope_info1.toString() == scope_info2.toString();
}
if (dropped.length > 0) {
change_log.push({ dropped_from_stack: dropped });
}
if (problems.length > 0) {
change_log.push( { functions_on_stack: problems } );
throw new liveedit.Failure("Blocked by functions on stack");
}
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
Debug.LiveEditChangeScript.FunctionPatchabilityStatus = {
AVAILABLE_FOR_PATCH: 1,
BLOCKED_ON_ACTIVE_STACK: 2,
BLOCKED_ON_OTHER_STACK: 3,
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5
}
// Minifier forward declaration.
var FunctionPatchabilityStatus;
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
function CheckStackActivations(shared_wrapper_list, change_log) {
var shared_list = new Array();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
var result = %LiveEditCheckAndDropActivations(shared_list, true);
if (result[shared_list.length]) {
// Extra array element may contain error message.
throw new Failure(result[shared_list.length]);
}
Debug.LiveEditChangeScript.FunctionPatchabilityStatus.SymbolName =
function(code) {
var enum = Debug.LiveEditChangeScript.FunctionPatchabilityStatus;
for (name in enum) {
if (enum[name] == code) {
return name;
var problems = new Array();
var dropped = new Array();
for (var i = 0; i < shared_list.length; i++) {
var shared = shared_wrapper_list[i];
if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
dropped.push({ name: shared.function_name } );
} else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
var description = {
name: shared.function_name,
start_pos: shared.start_position,
end_pos: shared.end_position,
replace_problem:
FunctionPatchabilityStatus.SymbolName(result[i])
};
problems.push(description);
}
}
if (dropped.length > 0) {
change_log.push({ dropped_from_stack: dropped });
}
if (problems.length > 0) {
change_log.push( { functions_on_stack: problems } );
throw new Failure("Blocked by functions on stack");
}
}
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
var FunctionPatchabilityStatus = {
AVAILABLE_FOR_PATCH: 1,
BLOCKED_ON_ACTIVE_STACK: 2,
BLOCKED_ON_OTHER_STACK: 3,
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5
}
// A logical failure in liveedit process. This means that change_log
// is valid and consistent description of what happened.
Debug.LiveEditChangeScript.Failure = function(message) {
this.message = message;
}
FunctionPatchabilityStatus.SymbolName = function(code) {
var enum = FunctionPatchabilityStatus;
for (name in enum) {
if (enum[name] == code) {
return name;
}
}
}
Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
}
// A testing entry.
Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}
// A logical failure in liveedit process. This means that change_log
// is valid and consistent description of what happened.
function Failure(message) {
this.message = message;
}
// Function (constructor) is public.
this.Failure = Failure;
// A LiveEdit namespace is declared inside a single function constructor.
Debug.LiveEdit = new function() {
var LiveEdit = this;
Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
}
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}
// Function is public.
this.GetPcFromSourcePos = GetPcFromSourcePos;
// LiveEdit main entry point: changes a script text to a new string.
LiveEdit.SetScriptSource = function(script, new_source, change_log) {
function SetScriptSource(script, new_source, change_log) {
var old_source = script.source;
var diff = FindSimpleDiff(old_source, new_source);
if (!diff) {
return;
}
Debug.LiveEditChangeScript(script, diff.change_pos, diff.old_len,
ApplyPatch(script, diff.change_pos, diff.old_len,
new_source.substring(diff.change_pos, diff.change_pos + diff.new_len),
change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
function CompareStringsLinewise(s1, s2) {
return %LiveEditCompareStringsLinewise(s1, s2);
}
// Function is public (for tests).
this.CompareStringsLinewise = CompareStringsLinewise;
// Finds a difference between 2 strings in form of a single chunk.

352
deps/v8/src/liveedit.cc

@ -42,6 +42,358 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// A simple implementation of dynamic programming algorithm. It solves
// the problem of finding the difference of 2 arrays. It uses a table of results
// of subproblems. Each cell contains a number together with 2-bit flag
// that helps building the chunk list.
class Differencer {
public:
explicit Differencer(Compare::Input* input)
: input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
buffer_ = NewArray<int>(len1_ * len2_);
}
~Differencer() {
DeleteArray(buffer_);
}
void Initialize() {
int array_size = len1_ * len2_;
for (int i = 0; i < array_size; i++) {
buffer_[i] = kEmptyCellValue;
}
}
// Makes sure that result for the full problem is calculated and stored
// in the table together with flags showing a path through subproblems.
void FillTable() {
CompareUpToTail(0, 0);
}
void SaveResult(Compare::Output* chunk_writer) {
ResultWriter writer(chunk_writer);
int pos1 = 0;
int pos2 = 0;
while (true) {
if (pos1 < len1_) {
if (pos2 < len2_) {
Direction dir = get_direction(pos1, pos2);
switch (dir) {
case EQ:
writer.eq();
pos1++;
pos2++;
break;
case SKIP1:
writer.skip1(1);
pos1++;
break;
case SKIP2:
case SKIP_ANY:
writer.skip2(1);
pos2++;
break;
default:
UNREACHABLE();
}
} else {
writer.skip1(len1_ - pos1);
break;
}
} else {
if (len2_ != pos2) {
writer.skip2(len2_ - pos2);
}
break;
}
}
writer.close();
}
private:
Compare::Input* input_;
int* buffer_;
int len1_;
int len2_;
enum Direction {
EQ = 0,
SKIP1,
SKIP2,
SKIP_ANY,
MAX_DIRECTION_FLAG_VALUE = SKIP_ANY
};
// Computes result for a subtask and optionally caches it in the buffer table.
// All results values are shifted to make space for flags in the lower bits.
int CompareUpToTail(int pos1, int pos2) {
if (pos1 < len1_) {
if (pos2 < len2_) {
int cached_res = get_value4(pos1, pos2);
if (cached_res == kEmptyCellValue) {
Direction dir;
int res;
if (input_->equals(pos1, pos2)) {
res = CompareUpToTail(pos1 + 1, pos2 + 1);
dir = EQ;
} else {
int res1 = CompareUpToTail(pos1 + 1, pos2) +
(1 << kDirectionSizeBits);
int res2 = CompareUpToTail(pos1, pos2 + 1) +
(1 << kDirectionSizeBits);
if (res1 == res2) {
res = res1;
dir = SKIP_ANY;
} else if (res1 < res2) {
res = res1;
dir = SKIP1;
} else {
res = res2;
dir = SKIP2;
}
}
set_value4_and_dir(pos1, pos2, res, dir);
cached_res = res;
}
return cached_res;
} else {
return (len1_ - pos1) << kDirectionSizeBits;
}
} else {
return (len2_ - pos2) << kDirectionSizeBits;
}
}
inline int& get_cell(int i1, int i2) {
return buffer_[i1 + i2 * len1_];
}
// Each cell keeps a value plus direction. Value is multiplied by 4.
void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
ASSERT((value4 & kDirectionMask) == 0);
get_cell(i1, i2) = value4 | dir;
}
int get_value4(int i1, int i2) {
return get_cell(i1, i2) & (kMaxUInt32 ^ kDirectionMask);
}
Direction get_direction(int i1, int i2) {
return static_cast<Direction>(get_cell(i1, i2) & kDirectionMask);
}
static const int kDirectionSizeBits = 2;
static const int kDirectionMask = (1 << kDirectionSizeBits) - 1;
static const int kEmptyCellValue = -1 << kDirectionSizeBits;
// This method only holds static assert statement (unfortunately you cannot
// place one in class scope).
void StaticAssertHolder() {
STATIC_ASSERT(MAX_DIRECTION_FLAG_VALUE < (1 << kDirectionSizeBits));
}
class ResultWriter {
public:
explicit ResultWriter(Compare::Output* chunk_writer)
: chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
}
void eq() {
FlushChunk();
pos1_++;
pos2_++;
}
void skip1(int len1) {
StartChunk();
pos1_ += len1;
}
void skip2(int len2) {
StartChunk();
pos2_ += len2;
}
void close() {
FlushChunk();
}
private:
Compare::Output* chunk_writer_;
int pos1_;
int pos2_;
int pos1_begin_;
int pos2_begin_;
bool has_open_chunk_;
void StartChunk() {
if (!has_open_chunk_) {
pos1_begin_ = pos1_;
pos2_begin_ = pos2_;
has_open_chunk_ = true;
}
}
void FlushChunk() {
if (has_open_chunk_) {
chunk_writer_->AddChunk(pos1_begin_, pos2_begin_,
pos1_ - pos1_begin_, pos2_ - pos2_begin_);
has_open_chunk_ = false;
}
}
};
};
void Compare::CalculateDifference(Compare::Input* input,
Compare::Output* result_writer) {
Differencer differencer(input);
differencer.Initialize();
differencer.FillTable();
differencer.SaveResult(result_writer);
}
static bool CompareSubstrings(Handle<String> s1, int pos1,
Handle<String> s2, int pos2, int len) {
static StringInputBuffer buf1;
static StringInputBuffer buf2;
buf1.Reset(*s1);
buf1.Seek(pos1);
buf2.Reset(*s2);
buf2.Seek(pos2);
for (int i = 0; i < len; i++) {
ASSERT(buf1.has_more() && buf2.has_more());
if (buf1.GetNext() != buf2.GetNext()) {
return false;
}
}
return true;
}
// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
// never has terminating new line character.
class LineEndsWrapper {
public:
explicit LineEndsWrapper(Handle<String> string)
: ends_array_(CalculateLineEnds(string, false)),
string_len_(string->length()) {
}
int length() {
return ends_array_->length() + 1;
}
// Returns start for any line including start of the imaginary line after
// the last line.
int GetLineStart(int index) {
if (index == 0) {
return 0;
} else {
return GetLineEnd(index - 1);
}
}
int GetLineEnd(int index) {
if (index == ends_array_->length()) {
// End of the last line is always an end of the whole string.
// If the string ends with a new line character, the last line is an
// empty string after this character.
return string_len_;
} else {
return GetPosAfterNewLine(index);
}
}
private:
Handle<FixedArray> ends_array_;
int string_len_;
int GetPosAfterNewLine(int index) {
return Smi::cast(ends_array_->get(index))->value() + 1;
}
};
// Represents 2 strings as 2 arrays of lines.
class LineArrayCompareInput : public Compare::Input {
public:
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
: s1_(s1), s2_(s2), line_ends1_(line_ends1), line_ends2_(line_ends2) {
}
int getLength1() {
return line_ends1_.length();
}
int getLength2() {
return line_ends2_.length();
}
bool equals(int index1, int index2) {
int line_start1 = line_ends1_.GetLineStart(index1);
int line_start2 = line_ends2_.GetLineStart(index2);
int line_end1 = line_ends1_.GetLineEnd(index1);
int line_end2 = line_ends2_.GetLineEnd(index2);
int len1 = line_end1 - line_start1;
int len2 = line_end2 - line_start2;
if (len1 != len2) {
return false;
}
return CompareSubstrings(s1_, line_start1, s2_, line_start2, len1);
}
private:
Handle<String> s1_;
Handle<String> s2_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
};
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
// (pos1, len1, len2).
class LineArrayCompareOutput : public Compare::Output {
public:
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
: array_(Factory::NewJSArray(10)), current_size_(0),
line_ends1_(line_ends1), line_ends2_(line_ends2) {
}
void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
int char_pos1 = line_ends1_.GetLineStart(line_pos1);
int char_pos2 = line_ends2_.GetLineStart(line_pos2);
int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
SetElement(array_, current_size_ + 1,
Handle<Object>(Smi::FromInt(char_len1)));
SetElement(array_, current_size_ + 2,
Handle<Object>(Smi::FromInt(char_len2)));
current_size_ += 3;
}
Handle<JSArray> GetResult() {
return array_;
}
private:
Handle<JSArray> array_;
int current_size_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
};
Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
Handle<String> s2) {
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
LineArrayCompareOutput output(line_ends1, line_ends2);
Compare::CalculateDifference(&input, &output);
return output.GetResult();
}
static void CompileScriptForTracker(Handle<Script> script) {
const bool is_eval = false;
const bool is_global = true;

38
deps/v8/src/liveedit.h

@ -109,6 +109,44 @@ class LiveEdit : AllStatic {
FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
};
// Compares 2 strings line-by-line and returns diff in form of array of
// triplets (pos1, len1, len2) describing list of diff chunks.
static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
Handle<String> s2);
};
// A general-purpose comparator between 2 arrays.
class Compare {
public:
// Holds 2 arrays of some elements allowing to compare any pair of
// element from the first array and element from the second array.
class Input {
public:
virtual int getLength1() = 0;
virtual int getLength2() = 0;
virtual bool equals(int index1, int index2) = 0;
protected:
virtual ~Input() {}
};
// Receives compare result as a series of chunks.
class Output {
public:
// Puts another chunk in result list. Note that technically speaking
// only 3 arguments actually needed with 4th being derivable.
virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
protected:
virtual ~Output() {}
};
// Finds the difference between 2 arrays of elements.
static void CalculateDifference(Input* input,
Output* result_writer);
};
#endif // ENABLE_DEBUGGER_SUPPORT

4
deps/v8/src/log-inl.h

@ -38,7 +38,6 @@ namespace internal {
Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
Script* script) {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
&& script->type()->value() == Script::TYPE_NATIVE) {
switch (tag) {
@ -50,9 +49,6 @@ Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
} else {
return tag;
}
#else
return tag;
#endif // ENABLE_CPP_PROFILES_PROCESSOR
}
#endif // ENABLE_LOGGING_AND_PROFILING

20
deps/v8/src/log.h

@ -87,7 +87,7 @@ class CompressionHelper;
#define LOG(Call) ((void) 0)
#endif
#define LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V) \
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
@ -116,19 +116,13 @@ class CompressionHelper;
V(REG_EXP_TAG, "RegExp", "re") \
V(SCRIPT_TAG, "Script", "sc") \
V(STORE_IC_TAG, "StoreIC", "sic") \
V(STUB_TAG, "Stub", "s")
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
// Add 'NATIVE_' cases for functions and scripts, but map them to
// original tags when writing to the log.
#define LOG_EVENTS_AND_TAGS_LIST(V) \
LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V) \
V(STUB_TAG, "Stub", "s") \
V(NATIVE_FUNCTION_TAG, "Function", "f") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile", "lc") \
V(NATIVE_SCRIPT_TAG, "Script", "sc")
#else
#define LOG_EVENTS_AND_TAGS_LIST(V) LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V)
#endif
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
class Logger {
public:
@ -274,11 +268,11 @@ class Logger {
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
private:
// Profiler's sampling interval (in milliseconds).
static const int kSamplingIntervalMs = 1;
private:
// Size of window used for log records compression.
static const int kCompressionWindowSize = 4;

3
deps/v8/src/macros.py

@ -83,6 +83,9 @@ const kMaxMonth = 10000000;
const kMinDate = -100000000;
const kMaxDate = 100000000;
# Native cache ids.
const STRING_TO_REGEXP_CACHE_ID = 0;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.

4
deps/v8/src/mips/simulator-mips.cc

@ -139,7 +139,7 @@ void Debugger::Stop(Instruction* instr) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
Debug();
}
#endif // def GENERATED_CODE_COVERAGE
#endif // GENERATED_CODE_COVERAGE
int32_t Debugger::GetRegisterValue(int regnum) {
@ -1644,5 +1644,5 @@ uintptr_t Simulator::PopAddress() {
} } // namespace assembler::mips
#endif // !defined(__mips)
#endif // __mips

1
deps/v8/src/objects-debug.cc

@ -725,7 +725,6 @@ void String::StringVerify() {
void JSFunction::JSFunctionPrint() {
HeapObject::PrintHeader("Function");
PrintF(" - map = 0x%p\n", map());
PrintF(" - is boilerplate: %s\n", IsBoilerplate() ? "yes" : "no");
PrintF(" - initial_map = ");
if (has_initial_map()) {
initial_map()->ShortPrint();

26
deps/v8/src/objects-inl.h

@ -2491,11 +2491,6 @@ bool SharedFunctionInfo::HasCustomCallGenerator() {
}
bool JSFunction::IsBoilerplate() {
return map() == Heap::boilerplate_function_map();
}
bool JSFunction::IsBuiltin() {
return context()->global()->IsJSBuiltinsObject();
}
@ -2580,22 +2575,35 @@ bool JSFunction::is_compiled() {
int JSFunction::NumberOfLiterals() {
ASSERT(!IsBoilerplate());
return literals()->length();
}
Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
ASSERT(0 <= id && id < kJSBuiltinsCount);
return READ_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize));
return READ_FIELD(this, OffsetOfFunctionWithId(id));
}
void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Object* value) {
ASSERT(0 <= id && id < kJSBuiltinsCount);
WRITE_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize), value);
WRITE_BARRIER(this, kJSBuiltinsOffset + (id * kPointerSize));
WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
}
Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
ASSERT(0 <= id && id < kJSBuiltinsCount);
return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
}
void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
ASSERT(0 <= id && id < kJSBuiltinsCount);
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
ASSERT(!Heap::InNewSpace(value));
}

8
deps/v8/src/objects.cc

@ -1189,8 +1189,7 @@ String* JSObject::class_name() {
String* JSObject::constructor_name() {
if (IsJSFunction()) {
return JSFunction::cast(this)->IsBoilerplate() ?
Heap::function_class_symbol() : Heap::closure_symbol();
return Heap::closure_symbol();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
@ -2519,9 +2518,8 @@ bool JSObject::ReferencesObject(Object* obj) {
break;
}
// For functions check the context. Boilerplate functions do
// not have to be traversed since they have no real context.
if (IsJSFunction() && !JSFunction::cast(this)->IsBoilerplate()) {
// For functions check the context.
if (IsJSFunction()) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
Top::context()->global_context()->arguments_boilerplate();

44
deps/v8/src/objects.h

@ -76,6 +76,7 @@
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - String
// - SeqString
// - SeqAsciiString
@ -2307,6 +2308,25 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
};
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
// [1]: finger index
// [2]: current cache size
// [3]: dummy field.
// The rest of array are key/value pairs.
class JSFunctionResultCache: public FixedArray {
public:
static const int kFactoryIndex = 0;
static const int kFingerIndex = kFactoryIndex + 1;
static const int kCacheSizeIndex = kFingerIndex + 1;
static const int kDummyIndex = kCacheSizeIndex + 1;
static const int kEntriesIndex = kDummyIndex + 1;
static const int kEntrySize = 2; // key + value
};
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
@ -3346,10 +3366,6 @@ class JSFunction: public JSObject {
inline Code* code();
inline void set_code(Code* value);
// Tells whether this function is a context-independent boilerplate
// function.
inline bool IsBoilerplate();
// Tells whether this function is builtin.
inline bool IsBuiltin();
@ -3525,6 +3541,10 @@ class JSBuiltinsObject: public GlobalObject {
inline Object* javascript_builtin(Builtins::JavaScript id);
inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
// Accessors for code of the runtime routines written in JavaScript.
inline Code* javascript_builtin_code(Builtins::JavaScript id);
inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
// Casting.
static inline JSBuiltinsObject* cast(Object* obj);
@ -3535,11 +3555,23 @@ class JSBuiltinsObject: public GlobalObject {
#endif
// Layout description. The size of the builtins object includes
// room for one pointer per runtime routine written in javascript.
// room for two pointers per runtime routine written in javascript
// (function and code object).
static const int kJSBuiltinsCount = Builtins::id_count;
static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
static const int kJSBuiltinsCodeOffset =
GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
static const int kSize =
kJSBuiltinsOffset + (kJSBuiltinsCount * kPointerSize);
kJSBuiltinsCodeOffset + (kJSBuiltinsCount * kPointerSize);
static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
return kJSBuiltinsOffset + id * kPointerSize;
}
static int OffsetOfCodeWithId(Builtins::JavaScript id) {
return kJSBuiltinsCodeOffset + id * kPointerSize;
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
};

4
deps/v8/src/parser.cc

@ -1959,7 +1959,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
extension_->GetNativeFunction(v8::Utils::ToLocal(name));
ASSERT(!fun_template.IsEmpty());
// Instantiate the function and create a boilerplate function from it.
// Instantiate the function and create a shared function info from it.
Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
@ -1968,7 +1968,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Factory::NewSharedFunctionInfo(name, literals, code);
shared->set_construct_stub(*construct_stub);
// Copy the function data to the boilerplate.
// Copy the function data to the shared function info.
shared->set_function_data(fun->shared()->function_data());
int parameters = fun->shared()->formal_parameter_count();
shared->set_formal_parameter_count(parameters);

7
deps/v8/src/platform-linux.cc

@ -159,7 +159,7 @@ int OS::ActivationFrameAlignment() {
#elif V8_TARGET_ARCH_MIPS
return 8;
#endif
// With gcc 4.4 the tree vectorization optimiser can generate code
// With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
return 16;
}
@ -728,10 +728,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (active_sampler_ == NULL) return;
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.

5
deps/v8/src/platform-macos.cc

@ -547,10 +547,7 @@ class Sampler::PlatformData : public Malloced {
// Loop until the sampler is disengaged, keeping the specified samling freq.
for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.

5
deps/v8/src/platform-win32.cc

@ -1806,10 +1806,7 @@ class Sampler::PlatformData : public Malloced {
// Loop until the sampler is disengaged, keeping the specified samling freq.
for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.

9
deps/v8/src/profile-generator-inl.h

@ -28,7 +28,7 @@
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "profile-generator.h"
@ -59,8 +59,9 @@ bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
}
ProfileNode::ProfileNode(CodeEntry* entry)
: entry_(entry),
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
total_ticks_(0),
self_ticks_(0),
children_(CodeEntriesMatch) {
@ -118,6 +119,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_PROFILE_GENERATOR_INL_H_

86
deps/v8/src/profile-generator.cc

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
@ -54,7 +54,7 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
children_.Lookup(entry, CodeEntryHash(entry), true);
if (map_entry->value == NULL) {
// New node added.
ProfileNode* new_node = new ProfileNode(entry);
ProfileNode* new_node = new ProfileNode(tree_, entry);
map_entry->value = new_node;
children_list_.Add(new_node);
}
@ -62,6 +62,16 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
}
double ProfileNode::GetSelfMillis() const {
return tree_->TicksToMillis(self_ticks_);
}
double ProfileNode::GetTotalMillis() const {
return tree_->TicksToMillis(total_ticks_);
}
void ProfileNode::Print(int indent) {
OS::Print("%5u %5u %*c %s%s",
total_ticks_, self_ticks_,
@ -95,13 +105,13 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG, "", "(root)", "", 0),
root_(new ProfileNode(&root_entry_)) {
root_(new ProfileNode(this, &root_entry_)) {
}
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
TraverseBreadthFirstPostOrder(&cb);
TraverseDepthFirstPostOrder(&cb);
}
@ -131,6 +141,11 @@ void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
}
void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
}
namespace {
class Position {
@ -153,9 +168,9 @@ class Position {
} // namespace
// Non-recursive implementation of breadth-first post-order tree traversal.
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
void ProfileTree::TraverseBreadthFirstPostOrder(Callback* callback) {
void ProfileTree::TraverseDepthFirstPostOrder(Callback* callback) {
List<Position> stack(10);
stack.Add(Position(root_));
do {
@ -194,12 +209,14 @@ class CalculateTotalTicksCallback {
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
TraverseBreadthFirstPostOrder(&cb);
TraverseDepthFirstPostOrder(&cb);
}
void ProfileTree::ShortPrint() {
OS::Print("root: %u %u\n", root_->total_ticks(), root_->self_ticks());
OS::Print("root: %u %u %.2fms %.2fms\n",
root_->total_ticks(), root_->self_ticks(),
root_->GetTotalMillis(), root_->GetSelfMillis());
}
@ -215,6 +232,12 @@ void CpuProfile::CalculateTotalTicks() {
}
void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
top_down_.SetTickRatePerMs(actual_sampling_rate);
bottom_up_.SetTickRatePerMs(actual_sampling_rate);
}
void CpuProfile::ShortPrint() {
OS::Print("top down ");
top_down_.ShortPrint();
@ -326,7 +349,8 @@ bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
}
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
double actual_sampling_rate) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
current_profiles_semaphore_->Wait();
@ -340,6 +364,7 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
if (profile != NULL) {
profile->CalculateTotalTicks();
profile->SetActualSamplingRate(actual_sampling_rate);
profiles_.Add(profile);
HashMap::Entry* entry =
profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
@ -352,8 +377,9 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
}
CpuProfile* CpuProfilesCollection::StopProfiling(String* title) {
return StopProfiling(GetName(title));
CpuProfile* CpuProfilesCollection::StopProfiling(String* title,
double actual_sampling_rate) {
return StopProfiling(GetName(title), actual_sampling_rate);
}
@ -466,6 +492,29 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
}
void SampleRateCalculator::Tick() {
if (--wall_time_query_countdown_ == 0)
UpdateMeasurements(OS::TimeCurrentMillis());
}
void SampleRateCalculator::UpdateMeasurements(double current_time) {
if (measurements_count_++ != 0) {
const double measured_ticks_per_ms =
(kWallTimeQueryIntervalMs * ticks_per_ms_) /
(current_time - last_wall_time_);
// Update the average value.
ticks_per_ms_ +=
(measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
// Update the externally accessible result.
result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
}
last_wall_time_ = current_time;
wall_time_query_countdown_ =
static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
}
const char* ProfileGenerator::kAnonymousFunctionName = "(anonymous function)";
const char* ProfileGenerator::kProgramEntryName = "(program)";
const char* ProfileGenerator::kGarbageCollectorEntryName =
@ -513,8 +562,17 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
if (FLAG_prof_browser_mode) {
// Put VM state as the topmost entry.
*entry++ = EntryForVMState(sample.state);
bool no_symbolized_entries = true;
for (CodeEntry** e = entries.start(); e != entry; ++e) {
if (*e != NULL) {
no_symbolized_entries = false;
break;
}
}
// If no frames were symbolized, put the VM state entry in.
if (no_symbolized_entries) {
*entry++ = EntryForVMState(sample.state);
}
}
profiles_->AddPathToCurrentProfiles(entries);
@ -522,4 +580,4 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING

69
deps/v8/src/profile-generator.h

@ -28,7 +28,7 @@
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "hashmap.h"
@ -70,9 +70,11 @@ class CodeEntry {
};
class ProfileTree;
class ProfileNode {
public:
INLINE(explicit ProfileNode(CodeEntry* entry));
INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
@ -80,9 +82,11 @@ class ProfileNode {
INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
double GetSelfMillis() const;
double GetTotalMillis() const;
void Print(int indent);
@ -95,6 +99,7 @@ class ProfileNode {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
}
ProfileTree* tree_;
CodeEntry* entry_;
unsigned total_ticks_;
unsigned self_ticks_;
@ -115,7 +120,11 @@ class ProfileTree {
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
double TicksToMillis(unsigned ticks) const {
return ticks * ms_to_ticks_scale_;
}
ProfileNode* root() const { return root_; }
void SetTickRatePerMs(double ticks_per_ms);
void ShortPrint();
void Print() {
@ -124,10 +133,11 @@ class ProfileTree {
private:
template <typename Callback>
void TraverseBreadthFirstPostOrder(Callback* callback);
void TraverseDepthFirstPostOrder(Callback* callback);
CodeEntry root_entry_;
ProfileNode* root_;
double ms_to_ticks_scale_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@ -141,12 +151,15 @@ class CpuProfile {
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
void SetActualSamplingRate(double actual_sampling_rate);
INLINE(const char* title() const) { return title_; }
INLINE(unsigned uid() const) { return uid_; }
INLINE(const ProfileTree* top_down() const) { return &top_down_; }
INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
void UpdateTicksScale();
void ShortPrint();
void Print();
@ -208,8 +221,8 @@ class CpuProfilesCollection {
bool StartProfiling(const char* title, unsigned uid);
bool StartProfiling(String* title, unsigned uid);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String* title);
CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
CpuProfile* StopProfiling(String* title, double actual_sampling_rate);
INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
CpuProfile* GetProfile(unsigned uid);
inline bool is_last_profile();
@ -256,6 +269,42 @@ class CpuProfilesCollection {
};
class SampleRateCalculator {
public:
SampleRateCalculator()
: result_(Logger::kSamplingIntervalMs * kResultScale),
ticks_per_ms_(Logger::kSamplingIntervalMs),
measurements_count_(0),
wall_time_query_countdown_(1) {
}
double ticks_per_ms() {
return result_ / static_cast<double>(kResultScale);
}
void Tick();
void UpdateMeasurements(double current_time);
// Instead of querying current wall time each tick,
// we use this constant to control query intervals.
static const unsigned kWallTimeQueryIntervalMs = 100;
private:
// As the result needs to be accessed from a different thread, we
// use type that guarantees atomic writes to memory. There should
// be <= 1000 ticks per second, thus storing a value of a 10 ** 5
// order should provide enough precision while keeping away from a
// potential overflow.
static const int kResultScale = 100000;
AtomicWord result_;
// All other fields are accessed only from the sampler thread.
double ticks_per_ms_;
unsigned measurements_count_;
unsigned wall_time_query_countdown_;
double last_wall_time_;
};
class ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles);
@ -287,6 +336,11 @@ class ProfileGenerator {
INLINE(CodeMap* code_map()) { return &code_map_; }
INLINE(void Tick()) { sample_rate_calc_.Tick(); }
INLINE(double actual_sampling_rate()) {
return sample_rate_calc_.ticks_per_ms();
}
static const char* kAnonymousFunctionName;
static const char* kProgramEntryName;
static const char* kGarbageCollectorEntryName;
@ -298,12 +352,13 @@ class ProfileGenerator {
CodeMap code_map_;
CodeEntry* program_entry_;
CodeEntry* gc_entry_;
SampleRateCalculator sample_rate_calc_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_PROFILE_GENERATOR_H_

4
deps/v8/src/regexp-macro-assembler-irregexp-inl.h

@ -38,7 +38,7 @@
namespace v8 {
namespace internal {
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
uint32_t twenty_four_bits) {
@ -71,7 +71,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
pc_ += 4;
}
#endif // ! V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
} } // namespace v8::internal

4
deps/v8/src/regexp-macro-assembler-irregexp.cc

@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
: buffer_(buffer),
@ -459,6 +459,6 @@ void RegExpMacroAssemblerIrregexp::Expand() {
}
}
#endif // !V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
} } // namespace v8::internal

4
deps/v8/src/regexp-macro-assembler-irregexp.h

@ -31,7 +31,7 @@
namespace v8 {
namespace internal {
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
public:
@ -134,7 +134,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
};
#endif // !V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
} } // namespace v8::internal

5
deps/v8/src/regexp-macro-assembler.cc

@ -52,7 +52,7 @@ bool RegExpMacroAssembler::CanReadUnaligned() {
}
#ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM.
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
}
@ -258,5 +258,6 @@ Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
return new_stack_base - stack_content_size;
}
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
} } // namespace v8::internal

4
deps/v8/src/regexp-macro-assembler.h

@ -161,7 +161,7 @@ class RegExpMacroAssembler {
};
#ifdef V8_NATIVE_REGEXP // Avoid compiling unused code.
#ifndef V8_INTERPRETED_REGEXP // Avoid compiling unused code.
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
public:
@ -221,7 +221,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* output);
};
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
} } // namespace v8::internal

136
deps/v8/src/runtime.cc

@ -1798,8 +1798,6 @@ class ReplacementStringBuilder {
void AddSubjectSlice(int from, int to) {
AddSubjectSlice(&array_builder_, from, to);
// Can we encode the slice in 11 bits for length and 19 bits for
// start position - as used by StringBuilderConcatHelper?
IncrementCharacterCount(to - from);
}
@ -5427,7 +5425,7 @@ static Object* Runtime_StringAdd(Arguments args) {
}
template<typename sinkchar>
template <typename sinkchar>
static inline void StringBuilderConcatHelper(String* special,
sinkchar* sink,
FixedArray* fixed_array,
@ -5498,33 +5496,41 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
bool ascii = special->IsAsciiRepresentation();
int position = 0;
int increment = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
// Smi encoding of position and length.
int len = Smi::cast(elt)->value();
if (len > 0) {
int smi_value = Smi::cast(elt)->value();
int pos;
int len;
if (smi_value > 0) {
// Position and length encoded in one smi.
int pos = len >> 11;
len &= 0x7ff;
if (pos + len > special_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
increment = len;
pos = StringBuilderSubstringPosition::decode(smi_value);
len = StringBuilderSubstringLength::decode(smi_value);
} else {
// Position and length encoded in two smis.
increment = (-len);
// Get the position and check that it is also a smi.
len = -smi_value;
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
Object* pos = fixed_array->get(i);
if (!pos->IsSmi()) {
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) {
return Top::Throw(Heap::illegal_argument_symbol());
}
pos = Smi::cast(next_smi)->value();
if (pos < 0) {
return Top::Throw(Heap::illegal_argument_symbol());
}
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
if (pos > special_length || len > special_length - pos) {
return Top::Throw(Heap::illegal_argument_symbol());
}
increment = len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
@ -9756,10 +9762,21 @@ static Object* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
// Compares 2 strings line-by-line and returns diff in form of JSArray of
// triplets (pos1, len1, len2) describing list of diff chunks.
static Object* Runtime_LiveEditCompareStringsLinewise(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
CONVERT_ARG_CHECKED(String, s1, 0);
CONVERT_ARG_CHECKED(String, s2, 1);
return *LiveEdit::CompareStringsLinewise(s1, s2);
}
// A testing entry. Returns statement position which is the closest to
// source_position.
@ -10007,6 +10024,91 @@ static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) {
}
static Object* CacheMiss(FixedArray* cache_obj, int index, Object* key_obj) {
ASSERT(index % 2 == 0); // index of the key
ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
ASSERT(index < cache_obj->length());
HandleScope scope;
Handle<FixedArray> cache(cache_obj);
Handle<Object> key(key_obj);
Handle<JSFunction> factory(JSFunction::cast(
cache->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<Object> receiver(Top::global_context()->global());
Handle<Object> value;
{
// This handle is nor shared, nor used later, so it's safe.
Object** argv[] = { key.location() };
bool pending_exception = false;
value = Execution::Call(factory,
receiver,
1,
argv,
&pending_exception);
if (pending_exception) return Failure::Exception();
}
cache->set(index, *key);
cache->set(index + 1, *value);
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(index));
return *value;
}
static Object* Runtime_GetFromCache(Arguments args) {
// This is only called from codegen, so checks might be more lax.
CONVERT_CHECKED(FixedArray, cache, args[0]);
Object* key = args[1];
const int finger_index =
Smi::cast(cache->get(JSFunctionResultCache::kFingerIndex))->value();
Object* o = cache->get(finger_index);
if (o == key) {
// The fastest case: hit the same place again.
return cache->get(finger_index + 1);
}
for (int i = finger_index - 2;
i >= JSFunctionResultCache::kEntriesIndex;
i -= 2) {
o = cache->get(i);
if (o == key) {
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
return cache->get(i + 1);
}
}
const int size =
Smi::cast(cache->get(JSFunctionResultCache::kCacheSizeIndex))->value();
ASSERT(size <= cache->length());
for (int i = size - 2; i > finger_index; i -= 2) {
o = cache->get(i);
if (o == key) {
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
return cache->get(i + 1);
}
}
// Cache miss. If we have spare room, put new data into it, otherwise
// evict post finger entry which must be least recently used.
if (size < cache->length()) {
cache->set(JSFunctionResultCache::kCacheSizeIndex, Smi::FromInt(size + 2));
return CacheMiss(cache, size, key);
} else {
int target_index = finger_index + JSFunctionResultCache::kEntrySize;
if (target_index == cache->length()) {
target_index = JSFunctionResultCache::kEntriesIndex;
}
return CacheMiss(cache, target_index, key);
}
}
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.

3
deps/v8/src/runtime.h

@ -288,6 +288,8 @@ namespace internal {
F(LocalKeys, 1, 1) \
/* Handle scopes */ \
F(DeleteHandleScopeExtensions, 0, 1) \
/* Cache suport */ \
F(GetFromCache, 2, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
@ -338,6 +340,7 @@ namespace internal {
F(LiveEditRelinkFunctionToScript, 2, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckAndDropActivations, 2, 1) \
F(LiveEditCompareStringsLinewise, 2, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 2, 1)
#else

4
deps/v8/src/serialize.cc

@ -422,7 +422,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
21,
"compile_array_push");
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
22,
@ -439,7 +439,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
25,
"NativeRegExpMacroAssembler::word_character_map");
#endif
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,

3
deps/v8/src/serialize.h

@ -503,7 +503,8 @@ class PartialSerializer : public Serializer {
// unique ID, and deserializing several partial snapshots containing script
// would cause dupes.
ASSERT(!o->IsScript());
return o->IsString() || o->IsSharedFunctionInfo() || o->IsHeapNumber();
return o->IsString() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode();
}
private:

14
deps/v8/src/string.js

@ -529,10 +529,16 @@ function ApplyReplacementFunction(replace, matchInfo, subject) {
return replace.apply(null, parameters);
}
// ECMA-262 section 15.5.4.12
function StringSearch(re) {
var regexp = new $RegExp(re);
var regexp;
if (IS_STRING(re)) {
regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
} else if (IS_REGEXP(re)) {
regexp = re;
} else {
regexp = new $RegExp(re);
}
var s = TO_STRING_INLINE(this);
var match = DoRegExpExec(regexp, s, 0);
if (match) {
@ -925,10 +931,10 @@ ReplaceResultBuilder.prototype.add = function(str) {
ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
var len = end - start;
if (len == 0) return;
if (start < 0 || len <= 0) return;
var elements = this.elements;
if (start < 0x80000 && len < 0x800) {
elements[elements.length] = (start << 11) + len;
elements[elements.length] = (start << 11) | len;
} else {
// 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
// so -len is a smi.

32
deps/v8/src/stub-cache.cc

@ -93,6 +93,38 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
}
Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
// and we use the empty string for the map cache in that case. If
// there are global objects involved, we need to check global
// property cells in the stub and therefore the stub will be
// specific to the name.
String* cache_name = Heap::empty_string();
if (receiver->IsGlobalObject()) cache_name = name;
JSObject* last = receiver;
while (last->GetPrototype() != Heap::null_value()) {
last = JSObject::cast(last->GetPrototype());
if (last->IsGlobalObject()) cache_name = name;
}
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadNonexistent(cache_name, receiver, last);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
Object* result =
receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
if (result->IsFailure()) return result;
}
return Set(name, receiver->map(), Code::cast(code));
}
Object* StubCache::ComputeLoadField(String* name,
JSObject* receiver,
JSObject* holder,

13
deps/v8/src/stub-cache.h

@ -56,6 +56,8 @@ class StubCache : public AllStatic {
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
static Object* ComputeLoadNonexistent(String* name, JSObject* receiver);
static Object* ComputeLoadField(String* name,
JSObject* receiver,
JSObject* holder,
@ -461,18 +463,25 @@ class StubCompiler BASE_EMBEDDED {
class LoadStubCompiler: public StubCompiler {
public:
Object* CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last);
Object* CompileLoadField(JSObject* object,
JSObject* holder,
int index,
String* name);
Object* CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback);
Object* CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
String* name);
Object* CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name);
@ -494,17 +503,21 @@ class KeyedLoadStubCompiler: public StubCompiler {
JSObject* object,
JSObject* holder,
int index);
Object* CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback);
Object* CompileLoadConstant(String* name,
JSObject* object,
JSObject* holder,
Object* value);
Object* CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name);
Object* CompileLoadArrayLength(String* name);
Object* CompileLoadStringLength(String* name);
Object* CompileLoadFunctionPrototype(String* name);

6
deps/v8/src/version.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 3
#define PATCH_LEVEL 1
#define BUILD_NUMBER 4
#define PATCH_LEVEL 2
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the

10
deps/v8/src/x64/assembler-x64.cc

@ -1138,23 +1138,25 @@ void Assembler::j(Condition cc,
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
const int short_size = sizeof(int8_t);
const int long_size = sizeof(int32_t);
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
if (is_int8(offs - sizeof(int8_t))) {
if (is_int8(offs - short_size)) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - sizeof(int8_t)) & 0xFF);
emit((offs - short_size) & 0xFF);
} else {
// 1110 1001 #32-bit disp.
emit(0xE9);
emitl(offs - sizeof(int32_t));
emitl(offs - long_size);
}
} else if (L->is_linked()) {
// 1110 1001 #32-bit disp.
emit(0xE9);
emitl(L->pos());
L->link_to(pc_offset() - sizeof(int32_t));
L->link_to(pc_offset() - long_size);
} else {
// 1110 1001 #32-bit disp.
ASSERT(L->is_unused());

2
deps/v8/src/x64/builtins-x64.cc

@ -1212,7 +1212,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movq(rdi, rdx);
#else // !defined(_WIN64)
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
// rsi : function

121
deps/v8/src/x64/codegen-x64.cc

@ -290,6 +290,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ += info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@ -483,11 +484,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
// Adjust for function-level loop nesting.
loop_nesting_ -= info->loop_nesting();
ASSERT_EQ(loop_nesting_, info->loop_nesting());
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(state_ == NULL);
ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
@ -2282,8 +2283,8 @@ void CodeGenerator::InstantiateFunction(
Result answer = frame_->CallStub(&stub, 1);
frame_->Push(&answer);
} else {
// Call the runtime to instantiate the function boilerplate
// object.
// Call the runtime to instantiate the function based on the
// shared function info.
frame_->EmitPush(rsi);
frame_->EmitPush(function_info);
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
@ -4259,6 +4260,82 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
: dst_(dst), cache_(cache), key_(key) {
set_comment("[ DeferredSearchCache");
}
virtual void Generate();
private:
Register dst_, cache_, key_;
};
void DeferredSearchCache::Generate() {
__ push(cache_);
__ push(key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
if (!dst_.is(rax)) {
__ movq(dst_, rax);
}
}
void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
Top::global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
frame_->Push(Factory::undefined_value());
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
Result key = frame_->Pop();
key.ToRegister();
Result cache = allocator()->Allocate();
__ movq(cache.reg(), cache_obj, RelocInfo::EMBEDDED_OBJECT);
Result tmp = allocator()->Allocate();
DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
cache.reg(),
key.reg());
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
SmiIndex index =
masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
__ cmpq(key.reg(), FieldOperand(cache.reg(),
index.reg,
index.scale,
FixedArray::kHeaderSize));
deferred->Branch(not_equal);
__ movq(tmp.reg(), FieldOperand(cache.reg(),
index.reg,
index.scale,
kPointerSize + FixedArray::kHeaderSize));
deferred->BindExit();
frame_->Push(&tmp);
}
void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
@ -7240,9 +7317,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
#else // V8_INTERPRETED_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
@ -7318,7 +7395,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (usigned comparison).
// length. A negative value will be greater (unsigned comparison).
__ movq(rax, Operand(rsp, kPreviousIndexOffset));
__ SmiToInteger32(rax, rax);
__ cmpl(rax, rbx);
@ -7364,9 +7441,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
__ movl(rdx, rbx);
__ andb(rdx, Immediate(kStringRepresentationMask));
__ cmpb(rdx, Immediate(kConsStringTag));
__ andb(rbx, Immediate(kStringRepresentationMask));
__ cmpb(rbx, Immediate(kConsStringTag));
__ j(not_equal, &runtime);
__ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
__ Cmp(rdx, Factory::empty_string());
@ -7385,7 +7461,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
// it has, the field contains a code object otherwise it contains the hole.
__ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
__ cmpb(rbx, Immediate(kSeqTwoByteString));
__ j(equal, &seq_two_byte_string);
if (FLAG_debug_code) {
__ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
@ -7511,7 +7588,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592) Rerunning the RegExp to get the stack overflow exception.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception_address(Top::k_pending_exception_address);
__ movq(kScratchRegister, pending_exception_address);
__ Cmp(kScratchRegister, Factory::the_hole_value());
@ -7558,7 +7635,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: offsets vector
// rdx: number of capture registers
Label next_capture, done;
__ movq(rax, Operand(rsp, kPreviousIndexOffset));
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@ -7583,7 +7659,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}
@ -8159,7 +8235,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
bool always_allocate_scope,
int /* alignment_skew */) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
@ -8173,11 +8250,19 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Complex results must be written to address passed as first argument.
// AMD64 calling convention: a struct of two pointers in rax+rdx
// Check stack alignment.
if (FLAG_debug_code) {
__ CheckStackAlignment();
}
if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC.
// Pass failure code returned from last attempt as first argument to
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
#ifdef _WIN64
__ movq(rcx, rax);
#else // ! defined(_WIN64)
#else // _WIN64
__ movq(rdi, rax);
#endif
__ movq(kScratchRegister,
@ -8211,7 +8296,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ lea(rdx, Operand(rsp, 4 * kPointerSize));
}
#else // ! defined(_WIN64)
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.

3
deps/v8/src/x64/codegen-x64.h

@ -584,6 +584,9 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);

3
deps/v8/src/x64/full-codegen-x64.cc

@ -780,7 +780,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
// Build the shared function info and instantiate the function based
// on it.
Handle<SharedFunctionInfo> function_info =
Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return;

62
deps/v8/src/x64/macro-assembler-x64.cc

@ -226,7 +226,17 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
cmpq(scratch, kScratchRegister);
j(equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// The offset is relative to a tagged or untagged HeapObject pointer,
// so either offset or offset + kHeapObjectTag must be a
// multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object page, or is in the first "page" of a large object page.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
@ -291,6 +301,21 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
}
void MacroAssembler::CheckStackAlignment() {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
// Abort if stack is not aligned.
int3();
bind(&alignment_as_expected);
}
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
@ -445,16 +470,28 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(rdi));
// Load the builtins object into target register.
movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(rdi, FieldOperand(rdi, GlobalObject::kBuiltinsOffset));
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
movq(rdi, FieldOperand(rdi, builtins_offset));
// Load the code entry point from the function into the target register.
movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
movq(rdi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
// Load the code entry point from the builtins object.
movq(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
if (FLAG_debug_code) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
cmpq(target, Operand(rsp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
}
lea(target, FieldOperand(target, Code::kHeaderSize));
}
@ -2616,6 +2653,11 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Check stack alignment.
if (FLAG_debug_code) {
CheckStackAlignment();
}
call(function);
ASSERT(OS::ActivationFrameAlignment() != 0);
ASSERT(num_arguments >= 0);

3
deps/v8/src/x64/macro-assembler-x64.h

@ -737,6 +737,9 @@ class MacroAssembler: public Assembler {
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Check that the stack is aligned.
void CheckStackAlignment();
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }

4
deps/v8/src/x64/regexp-macro-assembler-x64.cc

@ -39,7 +39,7 @@
namespace v8 {
namespace internal {
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
@ -1310,6 +1310,6 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

4
deps/v8/src/x64/regexp-macro-assembler-x64.h

@ -31,7 +31,7 @@
namespace v8 {
namespace internal {
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
@ -271,7 +271,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal

85
deps/v8/src/x64/stub-cache-x64.cc

@ -649,6 +649,26 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
GlobalObject* global,
String* name,
Register scratch,
Label* miss) {
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) return probe;
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
Factory::the_hole_value());
__ j(not_equal, miss);
return cell;
}
#undef __
#define __ ACCESS_MASM((masm()))
@ -1144,6 +1164,51 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
Label miss;
// Load receiver.
__ movq(rax, Operand(rsp, kPointerSize));
// Chech that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
CheckPrototypes(object, rax, last, rbx, rdx, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(last),
name,
rdx,
&miss);
if (cell->IsFailure()) return cell;
}
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret(0);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NONEXISTENT, Heap::empty_string());
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
@ -1765,21 +1830,19 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
while (object != holder) {
if (object->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(object);
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) {
set_failure(Failure::cast(probe));
Object* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(object),
name,
scratch,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
return result;
}
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
Factory::the_hole_value());
__ j(not_equal, miss);
}
object = JSObject::cast(object->GetPrototype());
}

1
deps/v8/test/cctest/SConscript

@ -55,6 +55,7 @@ SOURCES = {
'test-heap.cc',
'test-heap-profiler.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lock.cc',
'test-log.cc',
'test-log-utils.cc',

6
deps/v8/test/cctest/test-cpu-profiler.cc

@ -2,7 +2,7 @@
//
// Tests of profiles generator and utilities.
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
#include "cpu-profiler-inl.h"
@ -176,7 +176,7 @@ TEST(TickEvents) {
processor.Stop();
processor.Join();
CpuProfile* profile = profiles.StopProfiling("");
CpuProfile* profile = profiles.StopProfiling("", 1);
CHECK_NE(NULL, profile);
// Check call trees.
@ -222,4 +222,4 @@ TEST(TickEvents) {
CHECK_EQ("bbb", bottom_up_ddd_stub_children->last()->entry()->name());
}
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // ENABLE_LOGGING_AND_PROFILING

4
deps/v8/test/cctest/test-debug.cc

@ -5439,7 +5439,7 @@ TEST(DebugBreakInMessageHandler) {
}
#ifdef V8_NATIVE_REGEXP
#ifndef V8_INTERPRETED_REGEXP
// Debug event handler which gets the function on the top frame and schedules a
// break a number of times.
static void DebugEventDebugBreak(
@ -5506,7 +5506,7 @@ TEST(RegExpDebugBreak) {
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ("f", last_function_hit);
}
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
// Common part of EvalContextData and NestedBreakEventContextData tests.

174
deps/v8/test/cctest/test-liveedit.cc

@ -0,0 +1,174 @@
// Copyright 2007-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "v8.h"
#include "liveedit.h"
#include "cctest.h"
using namespace v8::internal;
// Anonymous namespace.
namespace {
class StringCompareInput : public Compare::Input {
public:
StringCompareInput(const char* s1, const char* s2) : s1_(s1), s2_(s2) {
}
int getLength1() {
return StrLength(s1_);
}
int getLength2() {
return StrLength(s2_);
}
bool equals(int index1, int index2) {
return s1_[index1] == s2_[index2];
}
private:
const char* s1_;
const char* s2_;
};
class DiffChunkStruct : public ZoneObject {
public:
DiffChunkStruct(int pos1_param, int pos2_param,
int len1_param, int len2_param)
: pos1(pos1_param), pos2(pos2_param),
len1(len1_param), len2(len2_param), next(NULL) {}
int pos1;
int pos2;
int len1;
int len2;
DiffChunkStruct* next;
};
class ListDiffOutputWriter : public Compare::Output {
public:
explicit ListDiffOutputWriter(DiffChunkStruct** next_chunk_pointer)
: next_chunk_pointer_(next_chunk_pointer) {
(*next_chunk_pointer_) = NULL;
}
void AddChunk(int pos1, int pos2, int len1, int len2) {
current_chunk_ = new DiffChunkStruct(pos1, pos2, len1, len2);
(*next_chunk_pointer_) = current_chunk_;
next_chunk_pointer_ = &current_chunk_->next;
}
private:
DiffChunkStruct** next_chunk_pointer_;
DiffChunkStruct* current_chunk_;
};
void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
ZoneScope zone_scope(DELETE_ON_EXIT);
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk);
Compare::CalculateDifference(&input, &writer);
int len1 = StrLength(s1);
int len2 = StrLength(s2);
int pos1 = 0;
int pos2 = 0;
int diff_parameter = 0;
for (DiffChunkStruct* chunk = first_chunk;
chunk != NULL;
chunk = chunk->next) {
int diff_pos1 = chunk->pos1;
int similar_part_length = diff_pos1 - pos1;
int diff_pos2 = pos2 + similar_part_length;
ASSERT_EQ(diff_pos2, chunk->pos2);
for (int j = 0; j < similar_part_length; j++) {
ASSERT(pos1 + j < len1);
ASSERT(pos2 + j < len2);
ASSERT_EQ(s1[pos1 + j], s2[pos2 + j]);
}
diff_parameter += chunk->len1 + chunk->len2;
pos1 = diff_pos1 + chunk->len1;
pos2 = diff_pos2 + chunk->len2;
}
{
// After last chunk.
int similar_part_length = len1 - pos1;
ASSERT_EQ(similar_part_length, len2 - pos2);
USE(len2);
for (int j = 0; j < similar_part_length; j++) {
ASSERT(pos1 + j < len1);
ASSERT(pos2 + j < len2);
ASSERT_EQ(s1[pos1 + j], s2[pos2 + j]);
}
}
if (expected_diff_parameter != -1) {
ASSERT_EQ(expected_diff_parameter, diff_parameter);
}
}
void CompareStrings(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
CompareStringsOneWay(s1, s2, expected_diff_parameter);
CompareStringsOneWay(s2, s1, expected_diff_parameter);
}
} // Anonymous namespace.
// --- T h e A c t u a l T e s t s
TEST(LiveEditDiffer) {
CompareStrings("zz1zzz12zz123zzz", "zzzzzzzzzz", 6);
CompareStrings("zz1zzz12zz123zzz", "zz0zzz0zz0zzz", 9);
CompareStrings("123456789", "987654321", 16);
CompareStrings("zzz", "yyy", 6);
CompareStrings("zzz", "zzz12", 2);
CompareStrings("zzz", "21zzz", 2);
CompareStrings("cat", "cut", 2);
CompareStrings("ct", "cut", 1);
CompareStrings("cat", "ct", 1);
CompareStrings("cat", "cat", 0);
CompareStrings("", "", 0);
CompareStrings("cat", "", 3);
CompareStrings("a cat", "a capybara", 7);
CompareStrings("abbabababababaaabbabababababbabbbbbbbababa",
"bbbbabababbbabababbbabababababbabbababa");
}

59
deps/v8/test/cctest/test-profile-generator.cc

@ -2,7 +2,7 @@
//
// Tests of profiles generator and utilities.
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
#include "profile-generator-inl.h"
@ -17,12 +17,13 @@ using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
using i::ProfileGenerator;
using i::SampleRateCalculator;
using i::TickSample;
using i::Vector;
TEST(ProfileNodeFindOrAddChild) {
ProfileNode node(NULL);
ProfileNode node(NULL, NULL);
CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
@ -424,7 +425,7 @@ TEST(RecordTickSample) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("");
CpuProfile* profile = profiles.StopProfiling("", 1);
CHECK_NE(NULL, profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
@ -443,4 +444,54 @@ TEST(RecordTickSample) {
CHECK_EQ(entry1, node4->entry());
}
#endif // ENABLE_CPP_PROFILES_PROCESSOR
TEST(SampleRateCalculator) {
const double kSamplingIntervalMs = i::Logger::kSamplingIntervalMs;
// Verify that ticking exactly in query intervals results in the
// initial sampling interval.
double time = 0.0;
SampleRateCalculator calc1;
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
SampleRateCalculator calc2;
time = 0.0;
CHECK_EQ(kSamplingIntervalMs, calc2.ticks_per_ms());
calc2.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc2.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.5;
calc2.UpdateMeasurements(time);
// (1.0 + 2.0) / 2
CHECK_EQ(kSamplingIntervalMs * 1.5, calc2.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.75;
calc2.UpdateMeasurements(time);
// (1.0 + 2.0 + 2.0) / 3
CHECK_EQ(kSamplingIntervalMs * 1.66666, calc2.ticks_per_ms());
SampleRateCalculator calc3;
time = 0.0;
CHECK_EQ(kSamplingIntervalMs, calc3.ticks_per_ms());
calc3.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc3.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 2;
calc3.UpdateMeasurements(time);
// (1.0 + 0.5) / 2
CHECK_EQ(kSamplingIntervalMs * 0.75, calc3.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 1.5;
calc3.UpdateMeasurements(time);
// (1.0 + 0.5 + 0.5) / 3
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
}
#endif // ENABLE_LOGGING_AND_PROFILING

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save