diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index bfe58a2c37..5c5ae4e1b8 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -17,3 +17,4 @@ Paolo Giarrusso Rafal Krypa Rene Rebe Ryan Dahl +Patrick Gansterer diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index b07e7cc15e..a78755b63a 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,15 @@ +2009-09-09: Version 1.3.10 + + Fixed profiler on Mac in 64-bit mode. + + Optimized creation of objects from simple constructor functions on + ARM. + + Fixed a number of debugger issues. + + Reduced the amount of memory consumed by V8. + + 2009-09-02: Version 1.3.9 Optimized stack guard checks on ARM. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 71673c0f0a..ddd0190af7 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -258,6 +258,10 @@ V8_EXTRA_FLAGS = { 'all': { 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800'] }, + 'library:shared': { + 'CPPDEFINES': ['BUILDING_V8_SHARED'], + 'LIBS': ['winmm', 'ws2_32'] + }, 'arch:ia32': { 'WARNINGFLAGS': ['/W3'] }, diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 346050d5d3..2789bad216 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -979,8 +979,9 @@ class V8EXPORT String : public Primitive { public: explicit Utf8Value(Handle obj); ~Utf8Value(); - char* operator*() const { return str_; } - int length() { return length_; } + char* operator*() { return str_; } + const char* operator*() const { return str_; } + int length() const { return length_; } private: char* str_; int length_; @@ -1001,8 +1002,9 @@ class V8EXPORT String : public Primitive { public: explicit AsciiValue(Handle obj); ~AsciiValue(); - char* operator*() const { return str_; } - int length() { return length_; } + char* operator*() { return str_; } + const char* operator*() const { return str_; } + int length() const { return length_; } private: char* str_; int length_; @@ -1022,8 +1024,9 @@ class V8EXPORT String : public Primitive { public: explicit Value(Handle obj); ~Value(); - uint16_t* operator*() const { return str_; } - int length() { return length_; } + uint16_t* operator*() { return str_; } + const uint16_t* operator*() const { return str_; } + int length() const { return length_; } private: uint16_t* str_; int length_; diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index fee3fab431..a1cbf1ba29 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -56,9 +56,9 @@ SOURCES = { ], 'arch:arm': [ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc', - 'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc', - 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc', - 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc', + 'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc', + 'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc', + 'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc', 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc' diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 679e038d3a..1128d3e022 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -427,7 +427,7 @@ void Context::Enter() { i::Handle env = Utils::OpenHandle(this); thread_local.EnterContext(env); - thread_local.SaveContext(i::GlobalHandles::Create(i::Top::context())); + thread_local.SaveContext(i::Top::context()); i::Top::set_context(*env); } @@ -441,9 +441,8 @@ void Context::Exit() { } // Content of 'last_context' could be NULL. - i::Handle last_context = thread_local.RestoreContext(); - i::Top::set_context(static_cast(*last_context)); - i::GlobalHandles::Destroy(last_context.location()); + i::Context* last_context = thread_local.RestoreContext(); + i::Top::set_context(last_context); } @@ -3700,19 +3699,21 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) { } -void HandleScopeImplementer::Iterate( - ObjectVisitor* v, - List* blocks, - v8::ImplementationUtilities::HandleScopeData* handle_data) { +void HandleScopeImplementer::IterateThis(ObjectVisitor* v) { // Iterate over all handles in the blocks except for the last. - for (int i = blocks->length() - 2; i >= 0; --i) { - Object** block = blocks->at(i); + for (int i = Blocks()->length() - 2; i >= 0; --i) { + Object** block = Blocks()->at(i); v->VisitPointers(block, &block[kHandleBlockSize]); } // Iterate over live handles in the last block (if any). - if (!blocks->is_empty()) { - v->VisitPointers(blocks->last(), handle_data->next); + if (!Blocks()->is_empty()) { + v->VisitPointers(Blocks()->last(), handle_scope_data_.next); + } + + if (!saved_contexts_.is_empty()) { + Object** start = reinterpret_cast(&saved_contexts_.first()); + v->VisitPointers(start, start + saved_contexts_.length()); } } @@ -3720,18 +3721,15 @@ void HandleScopeImplementer::Iterate( void HandleScopeImplementer::Iterate(ObjectVisitor* v) { v8::ImplementationUtilities::HandleScopeData* current = v8::ImplementationUtilities::CurrentHandleScope(); - Iterate(v, thread_local.Blocks(), current); + thread_local.handle_scope_data_ = *current; + thread_local.IterateThis(v); } char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) { HandleScopeImplementer* thread_local = reinterpret_cast(storage); - List* blocks_of_archived_thread = thread_local->Blocks(); - v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread = - &thread_local->handle_scope_data_; - Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread); - + thread_local->IterateThis(v); return storage + ArchiveSpacePerThread(); } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index ca8f523c94..9ae6307b4d 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -352,8 +352,8 @@ class HandleScopeImplementer { // contexts have been entered. inline Handle LastEnteredContext(); - inline void SaveContext(Handle context); - inline Handle RestoreContext(); + inline void SaveContext(Context* context); + inline Context* RestoreContext(); inline bool HasSavedContexts(); inline List* Blocks() { return &blocks; } @@ -368,14 +368,12 @@ class HandleScopeImplementer { // Used as a stack to keep track of entered contexts. List > entered_contexts_; // Used as a stack to keep track of saved contexts. - List > saved_contexts_; + List saved_contexts_; bool ignore_out_of_memory; // This is only used for threading support. v8::ImplementationUtilities::HandleScopeData handle_scope_data_; - static void Iterate(ObjectVisitor* v, - List* blocks, - v8::ImplementationUtilities::HandleScopeData* handle_data); + void IterateThis(ObjectVisitor* v); char* RestoreThreadHelper(char* from); char* ArchiveThreadHelper(char* to); @@ -386,12 +384,12 @@ class HandleScopeImplementer { static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page -void HandleScopeImplementer::SaveContext(Handle context) { +void HandleScopeImplementer::SaveContext(Context* context) { saved_contexts_.Add(context); } -Handle HandleScopeImplementer::RestoreContext() { +Context* HandleScopeImplementer::RestoreContext() { return saved_contexts_.RemoveLast(); } diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index daf2378eb2..920110f92e 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -88,23 +88,200 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Enter a construct frame. __ EnterConstructFrame(); - // Preserve the two incoming parameters + // Preserve the two incoming parameters on the stack. __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // smi-tagged arguments count - __ push(r1); // constructor function + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Use r7 for holding undefined which is used in several places below. + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + + // Try to allocate the object without transitioning into C code. If any of the + // preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; +#ifdef ENABLE_DEBUGGER_SUPPORT + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(nz, &rt_call); +#endif + + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + // r7: undefined + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see comments + // in Runtime_NewObject in runtime.cc). In which case the initial map's + // instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + // r7: undefined + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); + + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with undefined. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + // r7: undefined + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + { Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r5, Operand(r6)); + __ b(lt, &loop); + } + + // Add the object tag to make the JSObject real, so that we can continue and + // jump into the continuation code at any time from now on. Any failures + // need to undo the allocation, so that the heap is in a consistent state + // and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with allocated + // object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields and + // in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ and_(r6, + r0, + Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8)); + __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8)); + __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8)); + __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + // r7: undefined + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateObjectInNewSpace(r0, + r5, + r6, + r2, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset); + __ str(r3, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. + // r1: constructor function + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + // r7: undefined + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, Operand(r6)); + __ b(lt, &loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject + // r1: constructor function + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated + // r1: constructor function + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Allocate the new receiver object. + // Allocate the new receiver object using the runtime call. + // r1: constructor function + __ bind(&rt_call); __ push(r1); // argument for Runtime_NewObject __ CallRuntime(Runtime::kNewObject, 1); - __ push(r0); // save the receiver + __ mov(r4, r0); + + // Receiver for constructor call allocated. + // r4: JSObject + __ bind(&allocated); + __ push(r4); // Push the function and the allocated receiver from the stack. // sp[0]: receiver (newly allocated object) // sp[1]: constructor function // sp[2]: number of arguments (smi-tagged) __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // function - __ push(r0); // receiver + __ push(r1); // Constructor function. + __ push(r4); // Receiver. // Reload the number of arguments from the stack. // r1: constructor function @@ -194,6 +371,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); __ Jump(lr); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index b94aa10bd2..7b3662d4fc 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -4950,12 +4950,12 @@ static void AllocateHeapNumber( Register scratch2) { // Another scratch register. // Allocate an object in the heap for the heap number and tag it as a heap // object. - __ AllocateObjectInNewSpace(HeapNumber::kSize, + __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize, result, scratch1, scratch2, need_gc, - true); + TAG_OBJECT); // Get heap number map and store it in the allocated object. __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); @@ -5623,7 +5623,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) { // argument, so give it a Smi. __ mov(r0, Operand(Smi::FromInt(0))); __ push(r0); - __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); + __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1); __ StubReturn(1); } @@ -5678,6 +5678,13 @@ void UnarySubStub::Generate(MacroAssembler* masm) { } +int CEntryStub::MinorKey() { + ASSERT(result_size_ <= 2); + // Result returned in r0 or r0+r1 by default. + return 0; +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // r0 holds the exception. @@ -6195,7 +6202,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // by calling the runtime system. __ bind(&slow); __ push(r1); - __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1); + __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1); } @@ -6216,7 +6223,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3); + __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1); } diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc new file mode 100644 index 0000000000..964bfe14f0 --- /dev/null +++ b/deps/v8/src/arm/constants-arm.cc @@ -0,0 +1,92 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "constants-arm.h" + + +namespace assembler { +namespace arm { + +namespace v8i = v8::internal; + + +// These register names are defined in a way to match the native disassembler +// formatting. See for example the command "objdump -d ". +const char* Registers::names_[kNumRegisters] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc", +}; + + +// List of alias names which can be used when referring to ARM registers. +const Registers::RegisterAlias Registers::aliases_[] = { + {10, "sl"}, + {11, "r11"}, + {12, "r12"}, + {13, "r13"}, + {14, "r14"}, + {15, "r15"}, + {kNoRegister, NULL} +}; + + +const char* Registers::Name(int reg) { + const char* result; + if ((0 <= reg) && (reg < kNumRegisters)) { + result = names_[reg]; + } else { + result = "noreg"; + } + return result; +} + + +int Registers::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].reg != kNoRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].reg; + } + i++; + } + + // No register with the reguested name found. + return kNoRegister; +} + + +} } // namespace assembler::arm diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index f0311dfc17..2f2b709508 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -52,6 +52,13 @@ namespace assembler { namespace arm { +// Number of registers in normal ARM mode. +static const int kNumRegisters = 16; + +// PC is register 15. +static const int kPCRegister = 15; +static const int kNoRegister = -1; + // Defines constants and accessor classes to assemble, disassemble and // simulate ARM instructions. // @@ -269,6 +276,27 @@ class Instr { }; +// Helper functions for converting between register numbers and names. +class Registers { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int reg; + const char *name; + }; + + private: + static const char* names_[kNumRegisters]; + static const RegisterAlias aliases_[]; +}; + + + } } // namespace assembler::arm #endif // V8_ARM_CONSTANTS_ARM_H_ diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index bcfab6c809..e14284136c 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -179,12 +179,6 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { } -void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) { - // Generate nothing as this handling of debug break return is not done this - // way on ARM - yet. -} - - void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { // Generate nothing as CodeStub CallFunction is not used on ARM. } diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 2638409e85..64314837d6 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -57,6 +57,7 @@ #include "v8.h" +#include "constants-arm.h" #include "disasm.h" #include "macro-assembler.h" #include "platform.h" @@ -898,16 +899,6 @@ namespace disasm { namespace v8i = v8::internal; -static const int kMaxRegisters = 16; - -// These register names are defined in a way to match the native disassembler -// formatting. See for example the command "objdump -d ". -static const char* reg_names[kMaxRegisters] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc", -}; - - const char* NameConverter::NameOfAddress(byte* addr) const { static v8::internal::EmbeddedVector tmp_buffer; v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr); @@ -921,13 +912,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const { const char* NameConverter::NameOfCPURegister(int reg) const { - const char* result; - if ((0 <= reg) && (reg < kMaxRegisters)) { - result = reg_names[reg]; - } else { - result = "noreg"; - } - return result; + return assembler::arm::Registers::Name(reg); } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 848d04b540..d230b4546f 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -391,7 +391,7 @@ void CallIC::Generate(MacroAssembler* masm, __ mov(r0, Operand(2)); __ mov(r1, Operand(f)); - CEntryStub stub; + CEntryStub stub(1); __ CallStub(&stub); // Move result to r1 and leave the internal frame. @@ -503,7 +503,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ stm(db_w, sp, r2.bit() | r3.bit()); // Perform tail call to the entry. - __ TailCallRuntime(f, 2); + __ TailCallRuntime(f, 2, 1); } @@ -543,7 +543,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ ldm(ia, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r3.bit()); - __ TailCallRuntime(f, 2); + __ TailCallRuntime(f, 2, 1); } @@ -599,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldm(ia, sp, r0.bit() | r1.bit()); __ stm(db_w, sp, r0.bit() | r1.bit()); // Do tail-call to runtime routine. - __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2); + __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1); // Fast case: Do the load. __ bind(&fast); @@ -626,7 +626,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, __ ldm(ia, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit()); - __ TailCallRuntime(f, 3); + __ TailCallRuntime(f, 3, 1); } @@ -684,7 +684,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit()); // Do tail-call to runtime routine. - __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3); + __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -761,7 +761,7 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) { // Perform tail call to the entry. __ TailCallRuntime( - ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1); } @@ -798,7 +798,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { // Perform tail call to the entry. __ TailCallRuntime( - ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1); } @@ -814,7 +814,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit()); // Perform tail call to the entry. - __ TailCallRuntime(f, 3); + __ TailCallRuntime(f, 3, 1); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2ca4898682..de2db90085 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -56,6 +56,7 @@ MacroAssembler::MacroAssembler(void* buffer, int size) #if defined(USE_THUMB_INTERWORK) #if !defined(__ARM_ARCH_5T__) && \ !defined(__ARM_ARCH_5TE__) && \ + !defined(__ARM_ARCH_6__) && \ !defined(__ARM_ARCH_7A__) && \ !defined(__ARM_ARCH_7__) // add tests for other versions above v5t as required @@ -773,7 +774,7 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size, Register scratch1, Register scratch2, Label* gc_required, - bool tag_allocated_object) { + AllocationFlags flags) { ASSERT(!result.is(scratch1)); ASSERT(!scratch1.is(scratch2)); @@ -782,7 +783,18 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size, ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(); mov(scratch1, Operand(new_space_allocation_top)); - ldr(result, MemOperand(scratch1)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { + ldr(result, MemOperand(scratch1)); + } else { +#ifdef DEBUG + // Assert that result actually contains top on entry. scratch2 is used + // immediately below so this use of scratch2 does not cause difference with + // respect to register content between debug and release mode. + ldr(scratch2, MemOperand(scratch1)); + cmp(result, scratch2); + Check(eq, "Unexpected allocation top"); +#endif + } // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. @@ -790,7 +802,7 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size, ExternalReference::new_space_allocation_limit_address(); mov(scratch2, Operand(new_space_allocation_limit)); ldr(scratch2, MemOperand(scratch2)); - add(result, result, Operand(object_size)); + add(result, result, Operand(object_size * kPointerSize)); cmp(result, Operand(scratch2)); b(hi, gc_required); @@ -798,19 +810,98 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size, str(result, MemOperand(scratch1)); // Tag and adjust back to start of new object. - if (tag_allocated_object) { - sub(result, result, Operand(object_size - kHeapObjectTag)); + if ((flags & TAG_OBJECT) != 0) { + sub(result, result, Operand((object_size * kPointerSize) - + kHeapObjectTag)); + } else { + sub(result, result, Operand(object_size * kPointerSize)); + } +} + + +void MacroAssembler::AllocateObjectInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { + ASSERT(!result.is(scratch1)); + ASSERT(!scratch1.is(scratch2)); + + // Load address of new object into result and allocation top address into + // scratch1. + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(); + mov(scratch1, Operand(new_space_allocation_top)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { + ldr(result, MemOperand(scratch1)); } else { - sub(result, result, Operand(object_size)); +#ifdef DEBUG + // Assert that result actually contains top on entry. scratch2 is used + // immediately below so this use of scratch2 does not cause difference with + // respect to register content between debug and release mode. + ldr(scratch2, MemOperand(scratch1)); + cmp(result, scratch2); + Check(eq, "Unexpected allocation top"); +#endif + } + + // Calculate new top and bail out if new space is exhausted. Use result + // to calculate the new top. Object size is in words so a shift is required to + // get the number of bytes + ExternalReference new_space_allocation_limit = + ExternalReference::new_space_allocation_limit_address(); + mov(scratch2, Operand(new_space_allocation_limit)); + ldr(scratch2, MemOperand(scratch2)); + add(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + cmp(result, Operand(scratch2)); + b(hi, gc_required); + + // Update allocation top. result temporarily holds the new top, + str(result, MemOperand(scratch1)); + + // Adjust back to start of new object. + sub(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + + // Tag object if requested. + if ((flags & TAG_OBJECT) != 0) { + add(result, result, Operand(kHeapObjectTag)); } } +void MacroAssembler::UndoAllocationInNewSpace(Register object, + Register scratch) { + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(); + + // Make sure the object has no tag before resetting top. + and_(object, object, Operand(~kHeapObjectTagMask)); +#ifdef DEBUG + // Check that the object un-allocated is below the current top. + mov(scratch, Operand(new_space_allocation_top)); + ldr(scratch, MemOperand(scratch)); + cmp(object, scratch); + Check(lt, "Undo allocation of non allocated memory"); +#endif + // Write the address of the object to un-allocate as the current top. + mov(scratch, Operand(new_space_allocation_top)); + str(object, MemOperand(scratch)); +} + + void MacroAssembler::CompareObjectType(Register function, Register map, Register type_reg, InstanceType type) { ldr(map, FieldMemOperand(function, HeapObject::kMapOffset)); + CompareInstanceType(map, type_reg, type); +} + + +void MacroAssembler::CompareInstanceType(Register map, + Register type_reg, + InstanceType type) { ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); cmp(type_reg, Operand(type)); } @@ -909,7 +1000,8 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { void MacroAssembler::TailCallRuntime(const ExternalReference& ext, - int num_arguments) { + int num_arguments, + int result_size) { // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code @@ -925,7 +1017,7 @@ void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) { ASSERT((reinterpret_cast(builtin.address()) & 1) == 1); #endif mov(r1, Operand(builtin)); - CEntryStub stub; + CEntryStub stub(1); Jump(stub.GetCode(), RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 4bbd9802f3..f45cce51a9 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -38,34 +38,11 @@ namespace internal { const Register cp = { 8 }; // JavaScript context pointer -// Helper types to make boolean flag easier to read at call-site. -enum InvokeFlag { - CALL_FUNCTION, - JUMP_FUNCTION -}; - enum InvokeJSFlags { CALL_JS, JUMP_JS }; -enum ExitJSFlag { - RETURN, - DO_NOT_RETURN -}; - -enum CodeLocation { - IN_JAVASCRIPT, - IN_JS_ENTRY, - IN_C_ENTRY -}; - -enum HandlerType { - TRY_CATCH_HANDLER, - TRY_FINALLY_HANDLER, - JS_ENTRY_HANDLER -}; - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { @@ -190,16 +167,28 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Allocation support - // Allocate an object in new space. If the new space is exhausted control - // continues at the gc_required label. The allocated object is returned in - // result. If the flag tag_allocated_object is true the result is tagged as - // as a heap object. + // Allocate an object in new space. The object_size is specified in words (not + // bytes). If the new space is exhausted control continues at the gc_required + // label. The allocated object is returned in result. If the flag + // tag_allocated_object is true the result is tagged as as a heap object. void AllocateObjectInNewSpace(int object_size, Register result, Register scratch1, Register scratch2, Label* gc_required, - bool tag_allocated_object); + AllocationFlags flags); + void AllocateObjectInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + + // Undo allocation in new space. The object passed and objects allocated after + // it will no longer be allocated. The caller must make sure that no pointers + // are left to the object(s) no longer allocated as they would be invalid when + // allocation is undone. + void UndoAllocationInNewSpace(Register object, Register scratch); // --------------------------------------------------------------------------- // Support functions. @@ -220,12 +209,21 @@ class MacroAssembler: public Assembler { // It leaves the map in the map register (unless the type_reg and map register // are the same register). It leaves the heap object in the heap_object // register unless the heap_object register is the same register as one of the - // other // registers. + // other registers. void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type); + // Compare instance type in a map. map contains a valid map object whose + // object type should be compared with the given type. This both + // sets the flags and leaves the object type in the type_reg register. It + // leaves the heap object in the heap_object register unless the heap_object + // register is the same register as type_reg. + void CompareInstanceType(Register map, + Register type_reg, + InstanceType type); + inline void BranchOnSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); @@ -261,7 +259,9 @@ class MacroAssembler: public Assembler { // Tail call of a runtime routine (jump). // Like JumpToBuiltin, but also takes care of passing the number // of parameters. - void TailCallRuntime(const ExternalReference& ext, int num_arguments); + void TailCallRuntime(const ExternalReference& ext, + int num_arguments, + int result_size); // Jump to the builtin routine. void JumpToBuiltin(const ExternalReference& builtin); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 252d7839fb..2e75a61a84 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -216,25 +216,29 @@ void RegExpMacroAssemblerARM::CheckCharacters(Vector str, int cp_offset, Label* on_failure, bool check_end_of_string) { - int byte_length = str.length() * char_size(); - int byte_offset = cp_offset * char_size(); - if (check_end_of_string) { - // Check that there are at least str.length() characters left in the input. - __ cmp(end_of_input_address(), Operand(-(byte_offset + byte_length))); - BranchOrBacktrack(gt, on_failure); - } - if (on_failure == NULL) { - // Instead of inlining a backtrack, (re)use the global backtrack target. + // Instead of inlining a backtrack for each test, (re)use the global + // backtrack target. on_failure = &backtrack_label_; } + if (check_end_of_string) { + // Is last character of required match inside string. + CheckPosition(cp_offset + str.length() - 1, on_failure); + } + __ add(r0, end_of_input_address(), Operand(current_input_offset())); + if (cp_offset != 0) { + int byte_offset = cp_offset * char_size(); + __ add(r0, r0, Operand(byte_offset)); + } + + // r0 : Address of characters to match against str. int stored_high_byte = 0; for (int i = 0; i < str.length(); i++) { if (mode_ == ASCII) { __ ldrb(r1, MemOperand(r0, char_size(), PostIndex)); - // str[i] is known to be an ASCII character. + ASSERT(str[i] <= String::kMaxAsciiCharCode); __ cmp(r1, Operand(str[i])); } else { __ ldrh(r1, MemOperand(r0, char_size(), PostIndex)); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index e258e5a686..70dfcd2a9d 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -70,7 +70,8 @@ class Debugger { Simulator* sim_; - bool GetValue(char* desc, int32_t* value); + int32_t GetRegisterValue(int regnum); + bool GetValue(const char* desc, int32_t* value); // Set or delete a breakpoint. Returns true if successful. bool SetBreakpoint(Instr* breakpc); @@ -132,41 +133,19 @@ void Debugger::Stop(Instr* instr) { #endif -static const char* reg_names[] = { "r0", "r1", "r2", "r3", - "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", - "r12", "r13", "r14", "r15", - "pc", "lr", "sp", "ip", - "fp", "sl", ""}; - -static int reg_nums[] = { 0, 1, 2, 3, - 4, 5, 6, 7, - 8, 9, 10, 11, - 12, 13, 14, 15, - 15, 14, 13, 12, - 11, 10}; - - -static int RegNameToRegNum(char* name) { - int reg = 0; - while (*reg_names[reg] != 0) { - if (strcmp(reg_names[reg], name) == 0) { - return reg_nums[reg]; - } - reg++; +int32_t Debugger::GetRegisterValue(int regnum) { + if (regnum == kPCRegister) { + return sim_->get_pc(); + } else { + return sim_->get_register(regnum); } - return -1; } -bool Debugger::GetValue(char* desc, int32_t* value) { - int regnum = RegNameToRegNum(desc); - if (regnum >= 0) { - if (regnum == 15) { - *value = sim_->get_pc(); - } else { - *value = sim_->get_register(regnum); - } +bool Debugger::GetValue(const char* desc, int32_t* value) { + int regnum = Registers::Number(desc); + if (regnum != kNoRegister) { + *value = GetRegisterValue(regnum); return true; } else { return SScanF(desc, "%i", value) == 1; @@ -246,7 +225,7 @@ void Debugger::Debug() { v8::internal::EmbeddedVector buffer; dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); - PrintF(" 0x%x %s\n", sim_->get_pc(), buffer.start()); + PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start()); last_pc = sim_->get_pc(); } char* line = ReadLine("sim> "); @@ -270,13 +249,20 @@ void Debugger::Debug() { } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { if (args == 2) { int32_t value; - if (GetValue(arg1, &value)) { - PrintF("%s: %d 0x%x\n", arg1, value, value); + if (strcmp(arg1, "all") == 0) { + for (int i = 0; i < kNumRegisters; i++) { + value = GetRegisterValue(i); + PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value); + } } else { - PrintF("%s unrecognized\n", arg1); + if (GetValue(arg1, &value)) { + PrintF("%s: 0x%08x %d \n", arg1, value, value); + } else { + PrintF("%s unrecognized\n", arg1); + } } } else { - PrintF("print value\n"); + PrintF("print \n"); } } else if ((strcmp(cmd, "po") == 0) || (strcmp(cmd, "printobject") == 0)) { @@ -284,16 +270,18 @@ void Debugger::Debug() { int32_t value; if (GetValue(arg1, &value)) { Object* obj = reinterpret_cast(value); - USE(obj); PrintF("%s: \n", arg1); -#if defined(DEBUG) +#ifdef DEBUG obj->PrintLn(); -#endif // defined(DEBUG) +#else + obj->ShortPrint(); + PrintF("\n"); +#endif } else { PrintF("%s unrecognized\n", arg1); } } else { - PrintF("printobject value\n"); + PrintF("printobject \n"); } } else if (strcmp(cmd, "disasm") == 0) { disasm::NameConverter converter; @@ -325,7 +313,7 @@ void Debugger::Debug() { while (cur < end) { dasm.InstructionDecode(buffer, cur); - PrintF(" 0x%x %s\n", cur, buffer.start()); + PrintF(" 0x%08x %s\n", cur, buffer.start()); cur += Instr::kInstrSize; } } else if (strcmp(cmd, "gdb") == 0) { @@ -343,7 +331,7 @@ void Debugger::Debug() { PrintF("%s unrecognized\n", arg1); } } else { - PrintF("break addr\n"); + PrintF("break
\n"); } } else if (strcmp(cmd, "del") == 0) { if (!DeleteBreakpoint(NULL)) { @@ -362,6 +350,30 @@ void Debugger::Debug() { } else { PrintF("Not at debugger stop."); } + } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { + PrintF("cont\n"); + PrintF(" continue execution (alias 'c')\n"); + PrintF("stepi\n"); + PrintF(" step one instruction (alias 'si')\n"); + PrintF("print \n"); + PrintF(" print register content (alias 'p')\n"); + PrintF(" use register name 'all' to print all registers\n"); + PrintF("printobject \n"); + PrintF(" print an object from a register (alias 'po')\n"); + PrintF("flags\n"); + PrintF(" print flags\n"); + PrintF("disasm []\n"); + PrintF("disasm [[
] ]\n"); + PrintF(" disassemble code, default is 10 instructions from pc\n"); + PrintF("gdb\n"); + PrintF(" enter gdb\n"); + PrintF("break
\n"); + PrintF(" set a break point on the address\n"); + PrintF("del\n"); + PrintF(" delete the breakpoint\n"); + PrintF("unstop\n"); + PrintF(" ignore the stop instruction at the current location"); + PrintF(" from now on\n"); } else { PrintF("Unknown command: %s\n", cmd); } @@ -576,7 +588,7 @@ int Simulator::ReadW(int32_t addr, Instr* instr) { intptr_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned read at %x\n", addr); + PrintF("Unaligned read at 0x%08x\n", addr); UNIMPLEMENTED(); return 0; } @@ -588,7 +600,7 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -598,7 +610,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) { uint16_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned unsigned halfword read at %x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); return 0; } @@ -609,7 +621,7 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) { int16_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned signed halfword read at %x\n", addr); + PrintF("Unaligned signed halfword read at 0x%08x\n", addr); UNIMPLEMENTED(); return 0; } @@ -621,7 +633,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned unsigned halfword write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -632,7 +644,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned halfword write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -671,7 +683,7 @@ uintptr_t Simulator::StackLimit() const { // Unsupported instructions use Format to print an error and stop execution. void Simulator::Format(Instr* instr, const char* format) { - PrintF("Simulator found unsupported instruction:\n 0x%x: %s\n", + PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n", instr, format); UNIMPLEMENTED(); } @@ -1726,7 +1738,8 @@ void Simulator::DecodeUnconditional(Instr* instr) { uint16_t halfword = ReadH(addr, instr); set_register(rd, halfword); } else { - UNIMPLEMENTED(); + Debugger dbg(this); + dbg.Stop(instr); } } @@ -1741,7 +1754,7 @@ void Simulator::InstructionDecode(Instr* instr) { v8::internal::EmbeddedVector buffer; dasm.InstructionDecode(buffer, reinterpret_cast(instr)); - PrintF(" 0x%x %s\n", instr, buffer.start()); + PrintF(" 0x%08x %s\n", instr, buffer.start()); } if (instr->ConditionField() == special_condition) { DecodeUnconditional(instr); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 745b541e54..9e44cfa510 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -478,7 +478,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 5); + __ TailCallRuntime(load_callback_property, 5, 1); } @@ -514,7 +514,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Do tail-call to the runtime system. ExternalReference load_ic_property = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); - __ TailCallRuntime(load_ic_property, 5); + __ TailCallRuntime(load_ic_property, 5, 1); } @@ -884,7 +884,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference store_callback_property = ExternalReference(IC_Utility(IC::kStoreCallbackProperty)); - __ TailCallRuntime(store_callback_property, 4); + __ TailCallRuntime(store_callback_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -936,7 +936,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallRuntime(store_ic_property, 3); + __ TailCallRuntime(store_ic_property, 3, 1); // Handle store cache miss. __ bind(&miss); @@ -1344,7 +1344,138 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, Object* ConstructStubCompiler::CompileConstructStub( SharedFunctionInfo* shared) { - // Not implemented yet - just jump to generic stub. + // ----------- S t a t e ------------- + // -- r0 : argc + // -- r1 : constructor + // -- lr : return address + // -- [sp] : last argument + // ----------------------------------- + Label generic_stub_call; + + // Use r7 for holding undefined which is used in several places below. + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + +#ifdef ENABLE_DEBUGGER_SUPPORT + // Check to see whether there are any break points in the function code. If + // there are jump to the generic constructor stub which calls the actual + // code for the function thereby hitting the break points. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); + __ cmp(r2, r7); + __ b(ne, &generic_stub_call); +#endif + + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + // r7: undefined + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &generic_stub_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &generic_stub_call); + +#ifdef DEBUG + // Cannot construct functions this way. + // r0: argc + // r1: constructor function + // r2: initial map + // r7: undefined + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ Check(ne, "Function constructed by construct stub."); +#endif + + // Now allocate the JSObject in new space. + // r0: argc + // r1: constructor function + // r2: initial map + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateObjectInNewSpace(r3, + r4, + r5, + r6, + &generic_stub_call, + NO_ALLOCATION_FLAGS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r0: argc + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Calculate the location of the first argument. The stack contains only the + // argc arguments. + __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); + + // Fill all the in-object properties with undefined. + // r0: argc + // r1: first argument + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + // r7: undefined + // Fill the initialized properties with a constant value or a passed argument + // depending on the this.x = ...; assignment in the function. + for (int i = 0; i < shared->this_property_assignments_count(); i++) { + if (shared->IsThisPropertyAssignmentArgument(i)) { + Label not_passed, next; + // Check if the argument assigned to the property is actually passed. + int arg_number = shared->GetThisPropertyAssignmentArgument(i); + __ cmp(r0, Operand(arg_number)); + __ b(le, ¬_passed); + // Argument passed - find it on the stack. + __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + __ b(&next); + __ bind(¬_passed); + // Set the property to undefined. + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&next); + } else { + // Set the property to the constant value. + Handle constant(shared->GetThisPropertyAssignmentConstant(i)); + __ mov(r2, Operand(constant)); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + } + } + + // Fill the unused in-object property fields with undefined. + for (int i = shared->this_property_assignments_count(); + i < shared->CalculateInObjectProperties(); + i++) { + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + } + + // r0: argc + // r4: JSObject (not tagged) + // Move argc to r1 and the JSObject to return to r0 and tag it. + __ mov(r1, r0); + __ mov(r0, r4); + __ orr(r0, r0, Operand(kHeapObjectTag)); + + // r0: JSObject + // r1: argc + // Remove caller arguments and receiver from the stack and return. + __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); + __ add(sp, sp, Operand(kPointerSize)); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); + __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2); + __ Jump(lr); + + // Jump to the generic stub in case the specialized code cannot handle the + // construction. + __ bind(&generic_stub_call); Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); Handle generic_construct_stub(code); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index a2c45626be..c1daa57b1f 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -474,7 +474,7 @@ void Genesis::CreateRoots(v8::Handle global_template, // Please note that the prototype property for function instances must be // writable. Handle function_map_descriptors = - ComputeFunctionInstanceDescriptor(false, true); + ComputeFunctionInstanceDescriptor(false, false); fm->set_instance_descriptors(*function_map_descriptors); // Allocate the function map first and then patch the prototype later diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 4262dd2a82..195fe54bef 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -609,11 +609,6 @@ static void Generate_Return_DebugBreak(MacroAssembler* masm) { } -static void Generate_Return_DebugBreakEntry(MacroAssembler* masm) { - Debug::GenerateReturnDebugBreakEntry(masm); -} - - static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) { Debug::GenerateStubNoRegistersDebugBreak(masm); } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 0f4a610b83..8df767a925 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -90,7 +90,6 @@ namespace internal { // Define list of builtins used by the debugger implemented in assembly. #define BUILTIN_LIST_DEBUG_A(V) \ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \ - V(Return_DebugBreakEntry, BUILTIN, DEBUG_BREAK) \ V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \ V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \ diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h index 4ecbaf4cf4..b302e5beee 100644 --- a/deps/v8/src/checks.h +++ b/deps/v8/src/checks.h @@ -95,38 +95,6 @@ static inline void CheckNonEqualsHelper(const char* file, } } -#ifdef V8_TARGET_ARCH_X64 -// Helper function used by the CHECK_EQ function when given intptr_t -// arguments. Should not be called directly. -static inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - intptr_t expected, - const char* value_source, - intptr_t value) { - if (expected != value) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i", - expected_source, value_source, expected, value); - } -} - - -// Helper function used by the CHECK_NE function when given intptr_t -// arguments. Should not be called directly. -static inline void CheckNonEqualsHelper(const char* file, - int line, - const char* unexpected_source, - intptr_t unexpected, - const char* value_source, - intptr_t value) { - if (unexpected == value) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i", - unexpected_source, value_source, value); - } -} -#endif // V8_TARGET_ARCH_X64 - // Helper function used by the CHECK function when given string // arguments. Should not be called directly. diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 8e516c0b04..9a00ae2b65 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -517,7 +517,10 @@ const char* RuntimeStub::GetName() { void RuntimeStub::Generate(MacroAssembler* masm) { - masm->TailCallRuntime(ExternalReference(id_), num_arguments_); + Runtime::Function* f = Runtime::FunctionForId(id_); + masm->TailCallRuntime(ExternalReference(f), + num_arguments_, + f->result_size); } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index d6967b7aff..d03f4b60b0 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -286,7 +286,7 @@ class CompareStub: public CodeStub { class CEntryStub : public CodeStub { public: - CEntryStub() { } + explicit CEntryStub(int result_size) : result_size_(result_size) { } void Generate(MacroAssembler* masm) { GenerateBody(masm, false); } @@ -302,10 +302,14 @@ class CEntryStub : public CodeStub { void GenerateThrowTOS(MacroAssembler* masm); void GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type); - private: + // Number of pointers/values returned. + int result_size_; + Major MajorKey() { return CEntry; } - int MinorKey() { return 0; } + // Minor key must differ if different result_size_ values means different + // code is generated. + int MinorKey(); const char* GetName() { return "CEntryStub"; } }; @@ -313,7 +317,7 @@ class CEntryStub : public CodeStub { class CEntryDebugBreakStub : public CEntryStub { public: - CEntryDebugBreakStub() { } + CEntryDebugBreakStub() : CEntryStub(1) { } void Generate(MacroAssembler* masm) { GenerateBody(masm, true); } diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 7082280856..e4658b1cee 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -159,8 +159,7 @@ Handle Shell::Write(const Arguments& args) { printf(" "); } v8::String::Utf8Value str(args[i]); - const char* cstr = ToCString(str); - printf("%s", cstr); + fwrite(*str, sizeof(**str), str.length(), stdout); } return Undefined(); } @@ -180,15 +179,15 @@ Handle Shell::Read(const Arguments& args) { Handle Shell::ReadLine(const Arguments& args) { - char line_buf[256]; - if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) { - return ThrowException(String::New("Error reading line")); + i::SmartPointer line(i::ReadLine("")); + if (*line == NULL) { + return Null(); } - int len = strlen(line_buf); - if (line_buf[len - 1] == '\n') { + size_t len = strlen(*line); + if (len > 0 && line[len - 1] == '\n') { --len; } - return String::New(line_buf, len); + return String::New(*line, len); } diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index da5be1f3d3..14b50603c9 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -102,7 +102,8 @@ Debug.ScriptCompilationType = { Host: 0, Debug.ScopeType = { Global: 0, Local: 1, With: 2, - Closure: 3 }; + Closure: 3, + Catch: 4 }; // Current debug state. @@ -900,6 +901,10 @@ function formatScope_(scope) { result += 'With, '; result += '#' + scope.object.ref + '#'; break; + case Debug.ScopeType.Catch: + result += 'Catch, '; + result += '#' + scope.object.ref + '#'; + break; case Debug.ScopeType.Closure: result += 'Closure'; break; diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index e341022aa4..f3e11ae751 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -75,6 +75,9 @@ BreakLocationIterator::BreakLocationIterator(Handle debug_info, BreakLocatorType type) { debug_info_ = debug_info; type_ = type; + // Get the stub early to avoid possible GC during iterations. We may need + // this stub to detect debugger calls generated from debugger statements. + debug_break_stub_ = RuntimeStub(Runtime::kDebugBreak, 0).GetCode(); reloc_iterator_ = NULL; reloc_iterator_original_ = NULL; Reset(); // Initialize the rest of the member variables. @@ -126,6 +129,10 @@ void BreakLocationIterator::Next() { return; } if (code->kind() == Code::STUB) { + if (IsDebuggerStatement()) { + break_point_++; + return; + } if (type_ == ALL_BREAK_LOCATIONS) { if (Debug::IsBreakStub(code)) { break_point_++; @@ -238,7 +245,7 @@ void BreakLocationIterator::SetBreakPoint(Handle break_point_object) { if (!HasBreakPoint()) { SetDebugBreak(); } - ASSERT(IsDebugBreak()); + ASSERT(IsDebugBreak() || IsDebuggerStatement()); // Set the break point information. DebugInfo::SetBreakPoint(debug_info_, code_position(), position(), statement_position(), @@ -258,6 +265,11 @@ void BreakLocationIterator::ClearBreakPoint(Handle break_point_object) { void BreakLocationIterator::SetOneShot() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is a real break point here no more to do. if (HasBreakPoint()) { ASSERT(IsDebugBreak()); @@ -270,6 +282,11 @@ void BreakLocationIterator::SetOneShot() { void BreakLocationIterator::ClearOneShot() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is a real break point here no more to do. if (HasBreakPoint()) { ASSERT(IsDebugBreak()); @@ -283,6 +300,11 @@ void BreakLocationIterator::ClearOneShot() { void BreakLocationIterator::SetDebugBreak() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is already a break point here just return. This might happen if // the same code is flooded with break points twice. Flooding the same // function twice might happen when stepping in a function with an exception @@ -303,6 +325,11 @@ void BreakLocationIterator::SetDebugBreak() { void BreakLocationIterator::ClearDebugBreak() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + if (RelocInfo::IsJSReturn(rmode())) { // Restore the frame exit code. ClearDebugBreakAtReturn(); @@ -317,10 +344,10 @@ void BreakLocationIterator::ClearDebugBreak() { void BreakLocationIterator::PrepareStepIn() { HandleScope scope; - // Step in can only be prepared if currently positioned on an IC call or - // construct call. + // Step in can only be prepared if currently positioned on an IC call, + // construct call or CallFunction stub call. Address target = rinfo()->target_address(); - Code* code = Code::GetCodeFromTargetAddress(target); + Handle code(Code::GetCodeFromTargetAddress(target)); if (code->is_call_stub()) { // Step in through IC call is handled by the runtime system. Therefore make // sure that the any current IC is cleared and the runtime system is @@ -334,11 +361,29 @@ void BreakLocationIterator::PrepareStepIn() { rinfo()->set_target_address(stub->entry()); } } else { +#ifdef DEBUG + // All the following stuff is needed only for assertion checks so the code + // is wrapped in ifdef. + Handle maybe_call_function_stub = code; + if (IsDebugBreak()) { + Address original_target = original_rinfo()->target_address(); + maybe_call_function_stub = + Handle(Code::GetCodeFromTargetAddress(original_target)); + } + bool is_call_function_stub = + (maybe_call_function_stub->kind() == Code::STUB && + maybe_call_function_stub->major_key() == CodeStub::CallFunction); + // Step in through construct call requires no changes to the running code. // Step in through getters/setters should already be prepared as well // because caller of this function (Debug::PrepareStep) is expected to // flood the top frame's function with one shot breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()); + // Step in through CallFunction stub should also be prepared by caller of + // this function (Debug::PrepareStep) which should flood target function + // with breakpoints. + ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub() + || is_call_function_stub); +#endif } } @@ -409,6 +454,21 @@ void BreakLocationIterator::ClearDebugBreakAtIC() { } +bool BreakLocationIterator::IsDebuggerStatement() { + if (RelocInfo::IsCodeTarget(rmode())) { + Address target = original_rinfo()->target_address(); + Code* code = Code::GetCodeFromTargetAddress(target); + if (code->kind() == Code::STUB) { + CodeStub::Major major_key = code->major_key(); + if (major_key == CodeStub::Runtime) { + return (*debug_break_stub_ == code); + } + } + } + return false; +} + + Object* BreakLocationIterator::BreakPointObjects() { return debug_info_->GetBreakPointObjects(code_position()); } @@ -458,6 +518,7 @@ void Debug::ThreadInit() { thread_local_.step_count_ = 0; thread_local_.last_fp_ = 0; thread_local_.step_into_fp_ = 0; + thread_local_.step_out_fp_ = 0; thread_local_.after_break_target_ = 0; thread_local_.debugger_entry_ = NULL; thread_local_.pending_interrupts_ = 0; @@ -502,7 +563,6 @@ bool Debug::break_on_exception_ = false; bool Debug::break_on_uncaught_exception_ = true; Handle Debug::debug_context_ = Handle(); -Code* Debug::debug_break_return_entry_ = NULL; Code* Debug::debug_break_return_ = NULL; @@ -583,11 +643,6 @@ void ScriptCache::HandleWeakScript(v8::Persistent obj, void* data) { void Debug::Setup(bool create_heap_objects) { ThreadInit(); if (create_heap_objects) { - // Get code to handle entry to debug break on return. - debug_break_return_entry_ = - Builtins::builtin(Builtins::Return_DebugBreakEntry); - ASSERT(debug_break_return_entry_->IsCode()); - // Get code to handle debug break on return. debug_break_return_ = Builtins::builtin(Builtins::Return_DebugBreak); @@ -749,7 +804,6 @@ void Debug::PreemptionWhileInDebugger() { void Debug::Iterate(ObjectVisitor* v) { - v->VisitPointer(bit_cast(&(debug_break_return_entry_))); v->VisitPointer(bit_cast(&(debug_break_return_))); } @@ -804,11 +858,18 @@ Object* Debug::Break(Arguments args) { break_points_hit = CheckBreakPoints(break_point_objects); } - // Notify debugger if a real break point is triggered or if performing single - // stepping with no more steps to perform. Otherwise do another step. - if (!break_points_hit->IsUndefined() || - (thread_local_.last_step_action_ != StepNone && - thread_local_.step_count_ == 0)) { + // If step out is active skip everything until the frame where we need to step + // out to is reached, unless real breakpoint is hit. + if (Debug::StepOutActive() && frame->fp() != Debug::step_out_fp() && + break_points_hit->IsUndefined() ) { + // Step count should always be 0 for StepOut. + ASSERT(thread_local_.step_count_ == 0); + } else if (!break_points_hit->IsUndefined() || + (thread_local_.last_step_action_ != StepNone && + thread_local_.step_count_ == 0)) { + // Notify debugger if a real break point is triggered or if performing + // single stepping with no more steps to perform. Otherwise do another step. + // Clear all current stepping setup. ClearStepping(); @@ -1044,7 +1105,13 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { // Remember this step action and count. thread_local_.last_step_action_ = step_action; - thread_local_.step_count_ = step_count; + if (step_action == StepOut) { + // For step out target frame will be found on the stack so there is no need + // to set step counter for it. It's expected to always be 0 for StepOut. + thread_local_.step_count_ = 0; + } else { + thread_local_.step_count_ = step_count; + } // Get the frame where the execution has stopped and skip the debug frame if // any. The debug frame will only be present if execution was stopped due to @@ -1092,6 +1159,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { bool is_call_target = false; bool is_load_or_store = false; bool is_inline_cache_stub = false; + Handle call_function_stub; if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) { Address target = it.rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); @@ -1102,19 +1170,51 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { is_inline_cache_stub = true; is_load_or_store = !is_call_target; } + + // Check if target code is CallFunction stub. + Code* maybe_call_function_stub = code; + // If there is a breakpoint at this line look at the original code to + // check if it is a CallFunction stub. + if (it.IsDebugBreak()) { + Address original_target = it.original_rinfo()->target_address(); + maybe_call_function_stub = + Code::GetCodeFromTargetAddress(original_target); + } + if (maybe_call_function_stub->kind() == Code::STUB && + maybe_call_function_stub->major_key() == CodeStub::CallFunction) { + // Save reference to the code as we may need it to find out arguments + // count for 'step in' later. + call_function_stub = Handle(maybe_call_function_stub); + } } // If this is the last break code target step out is the only possibility. if (it.IsExit() || step_action == StepOut) { + if (step_action == StepOut) { + // Skip step_count frames starting with the current one. + while (step_count-- > 0 && !frames_it.done()) { + frames_it.Advance(); + } + } else { + ASSERT(it.IsExit()); + frames_it.Advance(); + } + // Skip builtin functions on the stack. + while (!frames_it.done() && + JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) { + frames_it.Advance(); + } // Step out: If there is a JavaScript caller frame, we need to // flood it with breakpoints. - frames_it.Advance(); if (!frames_it.done()) { // Fill the function to return to with one-shot break points. JSFunction* function = JSFunction::cast(frames_it.frame()->function()); FloodWithOneShot(Handle(function->shared())); + // Set target frame pointer. + ActivateStepOut(frames_it.frame()); } - } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode())) + } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) || + !call_function_stub.is_null()) || step_action == StepNext || step_action == StepMin) { // Step next or step min. @@ -1126,6 +1226,45 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { debug_info->code()->SourceStatementPosition(frame->pc()); thread_local_.last_fp_ = frame->fp(); } else { + // If it's CallFunction stub ensure target function is compiled and flood + // it with one shot breakpoints. + if (!call_function_stub.is_null()) { + // Find out number of arguments from the stub minor key. + // Reverse lookup required as the minor key cannot be retrieved + // from the code object. + Handle obj( + Heap::code_stubs()->SlowReverseLookup(*call_function_stub)); + ASSERT(*obj != Heap::undefined_value()); + ASSERT(obj->IsSmi()); + // Get the STUB key and extract major and minor key. + uint32_t key = Smi::cast(*obj)->value(); + // Argc in the stub is the number of arguments passed - not the + // expected arguments of the called function. + int call_function_arg_count = CodeStub::MinorKeyFromKey(key); + ASSERT(call_function_stub->major_key() == + CodeStub::MajorKeyFromKey(key)); + + // Find target function on the expression stack. + // Expression stack lools like this (top to bottom): + // argN + // ... + // arg0 + // Receiver + // Function to call + int expressions_count = frame->ComputeExpressionsCount(); + ASSERT(expressions_count - 2 - call_function_arg_count >= 0); + Object* fun = frame->GetExpression( + expressions_count - 2 - call_function_arg_count); + if (fun->IsJSFunction()) { + Handle js_function(JSFunction::cast(fun)); + // Don't step into builtins. + if (!js_function->IsBuiltin()) { + // It will also compile target function if it's not compiled yet. + FloodWithOneShot(Handle(js_function->shared())); + } + } + } + // Fill the current function with one-shot break points even for step in on // a call target as the function called might be a native function for // which step in will not stop. It also prepares for stepping in @@ -1328,6 +1467,7 @@ void Debug::ClearStepping() { // Clear the various stepping setup. ClearOneShot(); ClearStepIn(); + ClearStepOut(); ClearStepNext(); // Clear multiple step counter. @@ -1355,6 +1495,7 @@ void Debug::ClearOneShot() { void Debug::ActivateStepIn(StackFrame* frame) { + ASSERT(!StepOutActive()); thread_local_.step_into_fp_ = frame->fp(); } @@ -1364,6 +1505,17 @@ void Debug::ClearStepIn() { } +void Debug::ActivateStepOut(StackFrame* frame) { + ASSERT(!StepInActive()); + thread_local_.step_out_fp_ = frame->fp(); +} + + +void Debug::ClearStepOut() { + thread_local_.step_out_fp_ = 0; +} + + void Debug::ClearStepNext() { thread_local_.last_step_action_ = StepNone; thread_local_.last_statement_position_ = RelocInfo::kNoPosition; @@ -1455,26 +1607,25 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) { Address addr = frame->pc() - Assembler::kPatchReturnSequenceLength; // Check if the location is at JS exit. - bool at_js_exit = false; + bool at_js_return = false; + bool break_at_js_return_active = false; RelocIterator it(debug_info->code()); while (!it.done()) { if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) { - at_js_exit = (it.rinfo()->pc() == - addr - Assembler::kPatchReturnSequenceAddressOffset); + at_js_return = (it.rinfo()->pc() == + addr - Assembler::kPatchReturnSequenceAddressOffset); + break_at_js_return_active = it.rinfo()->IsCallInstruction(); } it.next(); } // Handle the jump to continue execution after break point depending on the // break location. - if (at_js_exit) { - // First check if the call in the code is still the debug break return - // entry code. If it is the break point is still active. If not the break - // point was removed during break point processing. - if (Assembler::target_address_at(addr) == - debug_break_return_entry()->entry()) { - // Break point still active. Jump to the corresponding place in the - // original code. + if (at_js_return) { + // If the break point as return is still active jump to the corresponding + // place in the original code. If not the break point was removed during + // break point processing. + if (break_at_js_return_active) { addr += original_code->instruction_start() - code->instruction_start(); } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 5b0273aa22..d6b2c088d3 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -119,6 +119,8 @@ class BreakLocationIterator { return reloc_iterator_original_->rinfo()->rmode(); } + bool IsDebuggerStatement(); + protected: bool RinfoDone() const; void RinfoNext(); @@ -128,6 +130,7 @@ class BreakLocationIterator { int position_; int statement_position_; Handle debug_info_; + Handle debug_break_stub_; RelocIterator* reloc_iterator_; RelocIterator* reloc_iterator_original_; @@ -279,6 +282,9 @@ class Debug { static Address step_in_fp() { return thread_local_.step_into_fp_; } static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; } + static bool StepOutActive() { return thread_local_.step_out_fp_ != 0; } + static Address step_out_fp() { return thread_local_.step_out_fp_; } + static EnterDebugger* debugger_entry() { return thread_local_.debugger_entry_; } @@ -329,10 +335,8 @@ class Debug { return ®isters_[r]; } - // Address of the debug break return entry code. - static Code* debug_break_return_entry() { return debug_break_return_entry_; } - - // Support for getting the address of the debug break on return code. + // Access to the debug break on return code. + static Code* debug_break_return() { return debug_break_return_; } static Code** debug_break_return_address() { return &debug_break_return_; } @@ -379,7 +383,6 @@ class Debug { static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm); static void GenerateConstructCallDebugBreak(MacroAssembler* masm); static void GenerateReturnDebugBreak(MacroAssembler* masm); - static void GenerateReturnDebugBreakEntry(MacroAssembler* masm); static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm); // Called from stub-cache.cc. @@ -390,6 +393,8 @@ class Debug { static void ClearOneShot(); static void ActivateStepIn(StackFrame* frame); static void ClearStepIn(); + static void ActivateStepOut(StackFrame* frame); + static void ClearStepOut(); static void ClearStepNext(); // Returns whether the compile succeeded. static bool EnsureCompiled(Handle shared); @@ -442,6 +447,10 @@ class Debug { // Frame pointer for frame from which step in was performed. Address step_into_fp_; + // Frame pointer for the frame where debugger should be called when current + // step out action is completed. + Address step_out_fp_; + // Storage location for jump when exiting debug break calls. Address after_break_target_; @@ -457,9 +466,6 @@ class Debug { static ThreadLocal thread_local_; static void ThreadInit(); - // Code object for debug break return entry code. - static Code* debug_break_return_entry_; - // Code to call for handling debug break on return. static Code* debug_break_return_; diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index c29815e550..949dd80c3b 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -77,11 +77,11 @@ int Heap::semispace_size_ = 512*KB; int Heap::old_generation_size_ = 128*MB; int Heap::initial_semispace_size_ = 128*KB; #elif defined(V8_TARGET_ARCH_X64) -int Heap::semispace_size_ = 8*MB; +int Heap::semispace_size_ = 16*MB; int Heap::old_generation_size_ = 1*GB; int Heap::initial_semispace_size_ = 1*MB; #else -int Heap::semispace_size_ = 4*MB; +int Heap::semispace_size_ = 8*MB; int Heap::old_generation_size_ = 512*MB; int Heap::initial_semispace_size_ = 512*KB; #endif @@ -1319,7 +1319,7 @@ bool Heap::CreateApiObjects() { void Heap::CreateCEntryStub() { - CEntryStub stub; + CEntryStub stub(1); set_c_entry_code(*stub.GetCode()); } @@ -2795,7 +2795,9 @@ STRUCT_LIST(MAKE_CASE) bool Heap::IdleNotification() { - static const int kIdlesBeforeCollection = 7; + static const int kIdlesBeforeScavenge = 4; + static const int kIdlesBeforeMarkSweep = 7; + static const int kIdlesBeforeMarkCompact = 8; static int number_idle_notifications = 0; static int last_gc_count = gc_count_; @@ -2808,19 +2810,22 @@ bool Heap::IdleNotification() { last_gc_count = gc_count_; } - if (number_idle_notifications >= kIdlesBeforeCollection) { - // The first time through we collect without forcing compaction. - // The second time through we force compaction and quit. - bool force_compaction = - number_idle_notifications > kIdlesBeforeCollection; - CollectAllGarbage(force_compaction); + if (number_idle_notifications == kIdlesBeforeScavenge) { + CollectGarbage(0, NEW_SPACE); + new_space_.Shrink(); last_gc_count = gc_count_; - if (force_compaction) { - // Shrink new space. - new_space_.Shrink(); - number_idle_notifications = 0; - finished = true; - } + + } else if (number_idle_notifications == kIdlesBeforeMarkSweep) { + CollectAllGarbage(false); + new_space_.Shrink(); + last_gc_count = gc_count_; + + } else if (number_idle_notifications == kIdlesBeforeMarkCompact) { + CollectAllGarbage(true); + new_space_.Shrink(); + last_gc_count = gc_count_; + number_idle_notifications = 0; + finished = true; } // Uncommit unused memory in new space. @@ -3185,63 +3190,49 @@ bool Heap::Setup(bool create_heap_objects) { if (!ConfigureHeapDefault()) return false; } - // Setup memory allocator and allocate an initial chunk of memory. The - // initial chunk is double the size of the new space to ensure that we can - // find a pair of semispaces that are contiguous and aligned to their size. + // Setup memory allocator and reserve a chunk of memory for new + // space. The chunk is double the size of the new space to ensure + // that we can find a pair of semispaces that are contiguous and + // aligned to their size. if (!MemoryAllocator::Setup(MaxCapacity())) return false; - void* chunk - = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_); + void* chunk = + MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_); if (chunk == NULL) return false; - // Put the initial chunk of the old space at the start of the initial - // chunk, then the two new space semispaces, then the initial chunk of - // code space. Align the pair of semispaces to their size, which must be - // a power of 2. + // Align the pair of semispaces to their size, which must be a power + // of 2. ASSERT(IsPowerOf2(young_generation_size_)); - Address code_space_start = reinterpret_cast
(chunk); - Address new_space_start = RoundUp(code_space_start, young_generation_size_); - Address old_space_start = new_space_start + young_generation_size_; - int code_space_size = new_space_start - code_space_start; - int old_space_size = young_generation_size_ - code_space_size; - - // Initialize new space. + Address new_space_start = + RoundUp(reinterpret_cast(chunk), young_generation_size_); if (!new_space_.Setup(new_space_start, young_generation_size_)) return false; - // Initialize old space, set the maximum capacity to the old generation - // size. It will not contain code. + // Initialize old pointer space. old_pointer_space_ = new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE); if (old_pointer_space_ == NULL) return false; - if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) { - return false; - } + if (!old_pointer_space_->Setup(NULL, 0)) return false; + + // Initialize old data space. old_data_space_ = new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE); if (old_data_space_ == NULL) return false; - if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1), - old_space_size >> 1)) { - return false; - } + if (!old_data_space_->Setup(NULL, 0)) return false; // Initialize the code space, set its maximum capacity to the old // generation size. It needs executable memory. code_space_ = new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE); if (code_space_ == NULL) return false; - if (!code_space_->Setup(code_space_start, code_space_size)) return false; + if (!code_space_->Setup(NULL, 0)) return false; // Initialize map space. map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE); if (map_space_ == NULL) return false; - // Setting up a paged space without giving it a virtual memory range big - // enough to hold at least a page will cause it to allocate. if (!map_space_->Setup(NULL, 0)) return false; // Initialize global property cell space. cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE); if (cell_space_ == NULL) return false; - // Setting up a paged space without giving it a virtual memory range big - // enough to hold at least a page will cause it to allocate. if (!cell_space_->Setup(NULL, 0)) return false; // The large object code space may contain code or data. We set the memory @@ -3563,7 +3554,7 @@ namespace { class JSConstructorProfile BASE_EMBEDDED { public: JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {} - void CollectStats(JSObject* obj); + void CollectStats(HeapObject* obj); void PrintStats(); // Used by ZoneSplayTree::ForEach. void Call(String* name, const NumberAndSizeInfo& number_and_size); @@ -3608,33 +3599,36 @@ int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) { void JSConstructorProfile::Call(String* name, const NumberAndSizeInfo& number_and_size) { - SmartPointer s_name; - if (name != NULL) { - s_name = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - } + ASSERT(name != NULL); + SmartPointer s_name( + name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)); LOG(HeapSampleJSConstructorEvent(*s_name, number_and_size.number(), number_and_size.bytes())); } -void JSConstructorProfile::CollectStats(JSObject* obj) { - String* constructor_func = NULL; - if (obj->map()->constructor()->IsJSFunction()) { - JSFunction* constructor = JSFunction::cast(obj->map()->constructor()); - SharedFunctionInfo* sfi = constructor->shared(); - String* name = String::cast(sfi->name()); - constructor_func = name->length() > 0 ? name : sfi->inferred_name(); - } else if (obj->IsJSFunction()) { - constructor_func = Heap::function_class_symbol(); +void JSConstructorProfile::CollectStats(HeapObject* obj) { + String* constructor = NULL; + int size; + if (obj->IsString()) { + constructor = Heap::String_symbol(); + size = obj->Size(); + } else if (obj->IsJSObject()) { + JSObject* js_obj = JSObject::cast(obj); + constructor = js_obj->constructor_name(); + size = CalculateJSObjectNetworkSize(js_obj); + } else { + return; } + JSObjectsInfoTree::Locator loc; - if (!js_objects_info_tree_.Find(constructor_func, &loc)) { - js_objects_info_tree_.Insert(constructor_func, &loc); + if (!js_objects_info_tree_.Find(constructor, &loc)) { + js_objects_info_tree_.Insert(constructor, &loc); } NumberAndSizeInfo number_and_size = loc.value(); number_and_size.increment_number(1); - number_and_size.increment_bytes(CalculateJSObjectNetworkSize(obj)); + number_and_size.increment_bytes(size); loc.set_value(number_and_size); } @@ -3676,9 +3670,7 @@ void HeapProfiler::WriteSample() { while (iterator.has_next()) { HeapObject* obj = iterator.next(); CollectStats(obj, info); - if (obj->IsJSObject()) { - js_cons_profile.CollectStats(JSObject::cast(obj)); - } + js_cons_profile.CollectStats(obj); } // Lump all the string types together. diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 55dc92dd92..7793e49265 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -129,11 +129,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // eax: initial map __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); __ shl(edi, kPointerSizeLog2); - // Make sure that the maximum heap object size will never cause us - // problem here, because it is always greater than the maximum - // instance size that can be represented in a byte. - ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize); - __ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false); + __ AllocateObjectInNewSpace(edi, + ebx, + edi, + no_reg, + &rt_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields. // eax: initial map // ebx: JSObject @@ -188,8 +189,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ebx: JSObject // edi: start of next object (will be start of FixedArray) // edx: number of elements in properties array - ASSERT(Heap::MaxObjectSizeInPagedSpace() > - (FixedArray::kHeaderSize + 255*kPointerSize)); __ AllocateObjectInNewSpace(FixedArray::kHeaderSize, times_pointer_size, edx, @@ -197,7 +196,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ecx, no_reg, &undo_allocation, - true); + RESULT_CONTAINS_TOP); // Initialize the FixedArray. // ebx: JSObject @@ -245,10 +244,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { } // Allocate the new receiver object using the runtime call. - // edi: function (constructor) __ bind(&rt_call); // Must restore edi (constructor) before calling runtime. __ mov(edi, Operand(esp, 0)); + // edi: function (constructor) __ push(edi); __ CallRuntime(Runtime::kNewObject, 1); __ mov(ebx, Operand(eax)); // store result in ebx diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index a9face1d70..400a3607b7 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -6886,7 +6886,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { __ j(above_equal, &string1); // First and second argument are strings. - __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2); + __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1); // Only first argument is a string. __ bind(&string1); @@ -6954,12 +6954,11 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, scratch1, scratch2, need_gc, - false); + TAG_OBJECT); - // Set the map and tag the result. - __ mov(Operand(result, HeapObject::kMapOffset), + // Set the map. + __ mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(Factory::heap_number_map())); - __ or_(Operand(result), Immediate(kHeapObjectTag)); } @@ -7176,7 +7175,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { __ pop(ebx); // Return address. __ push(edx); __ push(ebx); - __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1); + __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1); } @@ -7201,7 +7200,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3); + __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1); } @@ -7437,7 +7436,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) { __ push(eax); // Do tail-call to runtime routine. - __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); + __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1); } @@ -7469,6 +7468,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { } +int CEntryStub::MinorKey() { + ASSERT(result_size_ <= 2); + // Result returned in eax, or eax+edx if result_size_ is 2. + return 0; +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // eax holds the exception. diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index 4ef0862af1..7e0dfd1489 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -36,9 +36,7 @@ namespace internal { #ifdef ENABLE_DEBUGGER_SUPPORT -// A debug break in the frame exit code is identified by a call instruction. bool BreakLocationIterator::IsDebugBreakAtReturn() { - // Opcode E8 is call. return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -49,7 +47,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() { void BreakLocationIterator::SetDebugBreakAtReturn() { ASSERT(Debug::kIa32JSReturnSequenceLength >= Debug::kIa32CallInstructionLength); - rinfo()->PatchCodeWithCall(Debug::debug_break_return_entry()->entry(), + rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(), Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength); } @@ -61,11 +59,11 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() { } -// Check whether the JS frame exit code has been patched with a debug break. +// A debug break in the frame exit code is identified by the JS frame exit code +// having been patched with a call instruction. bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); - // Opcode E8 is call. - return (*(rinfo->pc()) == 0xE8); + return rinfo->IsCallInstruction(); } @@ -194,17 +192,6 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { } -void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) { - // OK to clobber ebx as we are returning from a JS function through the code - // generated by CodeGenerator::GenerateReturnSequence() - ExternalReference debug_break_return = - ExternalReference(Debug_Address::DebugBreakReturn()); - __ mov(ebx, Operand::StaticVariable(debug_break_return)); - __ add(Operand(ebx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(ebx)); -} - - void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc). // ----------- S t a t e ------------- diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index e39808b2ee..9a2753d495 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -404,7 +404,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ push(eax); __ push(ecx); // Do tail-call to runtime routine. - __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3); + __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1); // Check whether the elements is a pixel array. // eax: value @@ -667,7 +667,7 @@ void CallIC::Generate(MacroAssembler* masm, __ push(ebx); // Call the entry. - CEntryStub stub; + CEntryStub stub(1); __ mov(eax, Immediate(2)); __ mov(ebx, Immediate(f)); __ CallStub(&stub); @@ -799,7 +799,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ push(ebx); // return address // Perform tail call to the entry. - __ TailCallRuntime(f, 2); + __ TailCallRuntime(f, 2, 1); } @@ -927,7 +927,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ push(ebx); // return address // Perform tail call to the entry. - __ TailCallRuntime(f, 2); + __ TailCallRuntime(f, 2, 1); } @@ -967,7 +967,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { // Perform tail call to the entry. __ TailCallRuntime( - ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1); } @@ -987,7 +987,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ push(ebx); // Perform tail call to the entry. - __ TailCallRuntime(f, 3); + __ TailCallRuntime(f, 3, 1); } @@ -1010,7 +1010,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ push(ecx); // Do tail-call to runtime routine. - __ TailCallRuntime(f, 3); + __ TailCallRuntime(f, 3, 1); } @@ -1032,7 +1032,7 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) { // Do tail-call to runtime routine. __ TailCallRuntime( - ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1); } #undef __ diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 754b74abef..79b308944f 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -620,18 +620,22 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } -void MacroAssembler::LoadAllocationTopHelper( - Register result, - Register result_end, - Register scratch, - bool result_contains_top_on_entry) { +void MacroAssembler::LoadAllocationTopHelper(Register result, + Register result_end, + Register scratch, + AllocationFlags flags) { ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(); // Just return if allocation top is already known. - if (result_contains_top_on_entry) { + if ((flags & RESULT_CONTAINS_TOP) != 0) { // No use of scratch if allocation top is provided. ASSERT(scratch.is(no_reg)); +#ifdef DEBUG + // Assert that result actually contains top on entry. + cmp(result, Operand::StaticVariable(new_space_allocation_top)); + Check(equal, "Unexpected allocation top"); +#endif return; } @@ -659,20 +663,17 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end, } } -void MacroAssembler::AllocateObjectInNewSpace( - int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { + +void MacroAssembler::AllocateObjectInNewSpace(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -683,25 +684,26 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } -void MacroAssembler::AllocateObjectInNewSpace( - int header_size, - ScaleFactor element_size, - Register element_count, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(int header_size, + ScaleFactor element_size, + Register element_count, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -712,24 +714,24 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } -void MacroAssembler::AllocateObjectInNewSpace( - Register object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(Register object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); - + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -743,6 +745,11 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } @@ -889,7 +896,8 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { void MacroAssembler::TailCallRuntime(const ExternalReference& ext, - int num_arguments) { + int num_arguments, + int result_size) { // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code @@ -902,7 +910,7 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext, void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) { // Set the entry point and jump to the C entry runtime stub. mov(ebx, Immediate(ext)); - CEntryStub ces; + CEntryStub ces(1); jmp(ces.GetCode(), RelocInfo::CODE_TARGET); } @@ -1162,8 +1170,9 @@ void MacroAssembler::Abort(const char* msg) { } +#ifdef ENABLE_DEBUGGER_SUPPORT CodePatcher::CodePatcher(byte* address, int size) - : address_(address), size_(size), masm_(address, size + Assembler::kGap) { + : address_(address), size_(size), masm_(address, size + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. @@ -1179,6 +1188,7 @@ CodePatcher::~CodePatcher() { ASSERT(masm_.pc_ == address_ + size_); ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } +#endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index f10ec16aa2..fa61183e58 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -37,25 +37,6 @@ namespace internal { class JumpTarget; -// Helper types to make flags easier to read at call sites. -enum InvokeFlag { - CALL_FUNCTION, - JUMP_FUNCTION -}; - -enum CodeLocation { - IN_JAVASCRIPT, - IN_JS_ENTRY, - IN_C_ENTRY -}; - -enum HandlerType { - TRY_CATCH_HANDLER, - TRY_FINALLY_HANDLER, - JS_ENTRY_HANDLER -}; - - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -201,7 +182,7 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(int header_size, ScaleFactor element_size, @@ -210,14 +191,14 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(Register object_size, Register result, Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); // Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. Make sure that no pointers are left to the @@ -275,7 +256,9 @@ class MacroAssembler: public Assembler { // Tail call of a runtime routine (jump). // Like JumpToBuiltin, but also takes care of passing the number // of arguments. - void TailCallRuntime(const ExternalReference& ext, int num_arguments); + void TailCallRuntime(const ExternalReference& ext, + int num_arguments, + int result_size); // Jump to the builtin routine. void JumpToBuiltin(const ExternalReference& ext); @@ -350,11 +333,12 @@ class MacroAssembler: public Assembler { void LoadAllocationTopHelper(Register result, Register result_end, Register scratch, - bool result_contains_top_on_entry); + AllocationFlags flags); void UpdateAllocationTopHelper(Register result_end, Register scratch); }; +#ifdef ENABLE_DEBUGGER_SUPPORT // The code patcher is used to patch (typically) small parts of code e.g. for // debugging and other types of instrumentation. When using the code patcher // the exact number of bytes specified must be emitted. Is not legal to emit @@ -373,6 +357,7 @@ class CodePatcher { int size_; // Number of bytes of the expected patch size. MacroAssembler masm_; // Macro assembler used to generate the code. }; +#endif // ENABLE_DEBUGGER_SUPPORT // ----------------------------------------------------------------------------- diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 049c57e4f1..58a3ce5209 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -302,7 +302,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, __ mov(eax, Immediate(5)); __ mov(ebx, Immediate(ref)); - CEntryStub stub; + CEntryStub stub(1); __ CallStub(&stub); } @@ -467,7 +467,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(ref, 5); + __ TailCallRuntime(ref, 5, 1); __ bind(&cleanup); __ pop(scratch1); @@ -489,7 +489,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { ExternalReference ref = ExternalReference( IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); - __ TailCallRuntime(ref, 5); + __ TailCallRuntime(ref, 5, 1); } private: @@ -593,7 +593,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { __ mov(eax, Immediate(5)); __ mov(ebx, Immediate(ref)); - CEntryStub stub; + CEntryStub stub(1); __ CallStub(&stub); __ LeaveInternalFrame(); @@ -789,7 +789,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 5); + __ TailCallRuntime(load_callback_property, 5, 1); } @@ -1237,7 +1237,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference store_callback_property = ExternalReference(IC_Utility(IC::kStoreCallbackProperty)); - __ TailCallRuntime(store_callback_property, 4); + __ TailCallRuntime(store_callback_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -1290,7 +1290,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallRuntime(store_ic_property, 3); + __ TailCallRuntime(store_ic_property, 3, 1); // Handle store cache miss. __ bind(&miss); @@ -1783,10 +1783,12 @@ Object* ConstructStubCompiler::CompileConstructStub( // ebx: initial map __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ shl(ecx, kPointerSizeLog2); - // Make sure that the maximum heap object size will never cause us - // problems here. - ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize); - __ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false); + __ AllocateObjectInNewSpace(ecx, + edx, + ecx, + no_reg, + &generic_stub_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields and add the heap tag. // ebx: initial map diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h index b6c06d8461..dd7ea1c9ca 100644 --- a/deps/v8/src/list.h +++ b/deps/v8/src/list.h @@ -62,9 +62,8 @@ class List { return data_[i]; } inline T& at(int i) const { return operator[](i); } - inline T& last() const { - return at(length_ - 1); - } + inline T& last() const { return at(length_ - 1); } + inline T& first() const { return at(0); } INLINE(bool is_empty() const) { return length_ == 0; } INLINE(int length() const) { return length_; } diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 56808202ad..6bbefbceb1 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -890,9 +890,7 @@ void Logger::HeapSampleJSConstructorEvent(const char* constructor, if (!Log::IsEnabled() || !FLAG_log_gc) return; LogMessageBuilder msg; msg.Append("heap-js-cons-item,%s,%d,%d\n", - constructor != NULL ? - (constructor[0] != '\0' ? constructor : "(anonymous)") : - "(no_constructor)", + constructor[0] != '\0' ? constructor : "(anonymous)", number, bytes); msg.WriteToLogFile(); #endif diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 983802e6a7..5631decba9 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -28,6 +28,40 @@ #ifndef V8_MACRO_ASSEMBLER_H_ #define V8_MACRO_ASSEMBLER_H_ + +// Helper types to make boolean flag easier to read at call-site. +enum InvokeFlag { + CALL_FUNCTION, + JUMP_FUNCTION +}; + + +enum CodeLocation { + IN_JAVASCRIPT, + IN_JS_ENTRY, + IN_C_ENTRY +}; + + +enum HandlerType { + TRY_CATCH_HANDLER, + TRY_FINALLY_HANDLER, + JS_ENTRY_HANDLER +}; + + +// Flags used for the AllocateObjectInNewSpace functions. +enum AllocationFlags { + // No special flags. + NO_ALLOCATION_FLAGS = 0, + // Return the pointer to the allocated already tagged as a heap object. + TAG_OBJECT = 1 << 0, + // The content of the result register already contains the allocation top in + // new space. + RESULT_CONTAINS_TOP = 1 << 1 +}; + + #if V8_TARGET_ARCH_IA32 #include "assembler.h" #include "ia32/assembler-ia32.h" diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index d139093a17..e682fe29f9 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -41,6 +41,7 @@ namespace internal { bool MarkCompactCollector::force_compaction_ = false; bool MarkCompactCollector::compacting_collection_ = false; +bool MarkCompactCollector::compact_on_next_gc_ = false; int MarkCompactCollector::previous_marked_count_ = 0; GCTracer* MarkCompactCollector::tracer_ = NULL; @@ -104,35 +105,15 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { // variable. tracer_ = tracer; - static const int kFragmentationLimit = 50; // Percent. #ifdef DEBUG ASSERT(state_ == IDLE); state_ = PREPARE_GC; #endif ASSERT(!FLAG_always_compact || !FLAG_never_compact); - compacting_collection_ = FLAG_always_compact || force_compaction_; - - // We compact the old generation if it gets too fragmented (ie, we could - // recover an expected amount of space by reclaiming the waste and free - // list blocks). We always compact when the flag --gc-global is true - // because objects do not get promoted out of new space on non-compacting - // GCs. - if (!compacting_collection_) { - int old_gen_recoverable = 0; - int old_gen_used = 0; - - OldSpaces spaces; - while (OldSpace* space = spaces.next()) { - old_gen_recoverable += space->Waste() + space->AvailableFree(); - old_gen_used += space->Size(); - } - int old_gen_fragmentation = - static_cast((old_gen_recoverable * 100.0) / old_gen_used); - if (old_gen_fragmentation > kFragmentationLimit) { - compacting_collection_ = true; - } - } + compacting_collection_ = + FLAG_always_compact || force_compaction_ || compact_on_next_gc_; + compact_on_next_gc_ = false; if (FLAG_never_compact) compacting_collection_ = false; if (FLAG_collect_maps) CreateBackPointers(); @@ -173,6 +154,31 @@ void MarkCompactCollector::Finish() { // GC, because it relies on the new address of certain old space // objects (empty string, illegal builtin). StubCache::Clear(); + + // If we've just compacted old space there's no reason to check the + // fragmentation limit. Just return. + if (HasCompacted()) return; + + // We compact the old generation on the next GC if it has gotten too + // fragmented (ie, we could recover an expected amount of space by + // reclaiming the waste and free list blocks). + static const int kFragmentationLimit = 15; // Percent. + static const int kFragmentationAllowed = 1 * MB; // Absolute. + int old_gen_recoverable = 0; + int old_gen_used = 0; + + OldSpaces spaces; + while (OldSpace* space = spaces.next()) { + old_gen_recoverable += space->Waste() + space->AvailableFree(); + old_gen_used += space->Size(); + } + + int old_gen_fragmentation = + static_cast((old_gen_recoverable * 100.0) / old_gen_used); + if (old_gen_fragmentation > kFragmentationLimit && + old_gen_recoverable > kFragmentationAllowed) { + compact_on_next_gc_ = true; + } } diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index 0bd212e0c1..2da2b1f757 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -130,6 +130,9 @@ class MarkCompactCollector: public AllStatic { // Global flag indicating whether spaces were compacted on the last GC. static bool compacting_collection_; + // Global flag indicating whether spaces will be compacted on the next GC. + static bool compact_on_next_gc_; + // The number of objects left marked at the end of the last completed full // GC (expected to be zero). static int previous_marked_count_; diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js index 76ae75bf69..ee3dd647a6 100644 --- a/deps/v8/src/mirror-delay.js +++ b/deps/v8/src/mirror-delay.js @@ -201,7 +201,8 @@ PropertyAttribute.DontDelete = DONT_DELETE; ScopeType = { Global: 0, Local: 1, With: 2, - Closure: 3 }; + Closure: 3, + Catch: 4 }; // Mirror hierarchy: diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index ef4aae5311..9fc9b1d4ef 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -769,11 +769,14 @@ void JSRegExp::JSRegExpVerify() { FixedArray* arr = FixedArray::cast(data()); Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex); - ASSERT(ascii_data->IsTheHole() - || (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray())); + // TheHole : Not compiled yet. + // JSObject: Compilation error. + // Code/ByteArray: Compiled code. + ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() || + (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray())); Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex); - ASSERT(uc16_data->IsTheHole() - || (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray())); + ASSERT(uc16_data->IsTheHole() || ascii_data->IsJSObject() || + (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray())); ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi()); ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi()); break; diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 9ea131fa7b..583af7c58a 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -1186,7 +1186,9 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) { String* JSObject::class_name() { - if (IsJSFunction()) return Heap::function_class_symbol(); + if (IsJSFunction()) { + return Heap::function_class_symbol(); + } if (map()->constructor()->IsJSFunction()) { JSFunction* constructor = JSFunction::cast(map()->constructor()); return String::cast(constructor->shared()->instance_class_name()); @@ -1196,6 +1198,20 @@ String* JSObject::class_name() { } +String* JSObject::constructor_name() { + if (IsJSFunction()) { + return Heap::function_class_symbol(); + } + if (map()->constructor()->IsJSFunction()) { + JSFunction* constructor = JSFunction::cast(map()->constructor()); + String* name = String::cast(constructor->shared()->name()); + return name->length() > 0 ? name : constructor->shared()->inferred_name(); + } + // If the constructor is not present, return "Object". + return Heap::Object_symbol(); +} + + void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) { // Iterate over all fields in the body. Assumes all are Object*. IteratePointers(v, kPropertiesOffset, object_size); diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 3f6f5fff52..d9edce7796 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -1392,6 +1392,10 @@ class JSObject: public HeapObject { // Returns the class name ([[Class]] property in the specification). String* class_name(); + // Returns the constructor name (the name (possibly, inferred name) of the + // function that was used to instantiate the object). + String* constructor_name(); + // Retrieve interceptors. InterceptorInfo* GetNamedInterceptor(); InterceptorInfo* GetIndexedInterceptor(); @@ -2634,8 +2638,8 @@ class Code: public HeapObject { // the layout of the code object into account. int ExecutableSize() { // Check that the assumptions about the layout of the code object holds. - ASSERT_EQ(instruction_start() - address(), - static_cast(Code::kHeaderSize)); + ASSERT_EQ(static_cast(instruction_start() - address()), + Code::kHeaderSize); return instruction_size() + Code::kHeaderSize; } @@ -2891,8 +2895,12 @@ class Map: public HeapObject { // Byte offsets within kInstanceSizesOffset. static const int kInstanceSizeOffset = kInstanceSizesOffset + 0; - static const int kInObjectPropertiesOffset = kInstanceSizesOffset + 1; - static const int kPreAllocatedPropertyFieldsOffset = kInstanceSizesOffset + 2; + static const int kInObjectPropertiesByte = 1; + static const int kInObjectPropertiesOffset = + kInstanceSizesOffset + kInObjectPropertiesByte; + static const int kPreAllocatedPropertyFieldsByte = 2; + static const int kPreAllocatedPropertyFieldsOffset = + kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte; // The byte at position 3 is not in use at the moment. // Byte offsets within kInstanceAttributesOffset attributes. @@ -3097,9 +3105,7 @@ class SharedFunctionInfo: public HeapObject { inline bool is_expression(); inline void set_is_expression(bool value); - // Is this function a top-level function. Used for accessing the - // caller of functions. Top-level functions (scripts, evals) are - // returned as null; see JSFunction::GetCallerAccessor(...). + // Is this function a top-level function (scripts, evals). inline bool is_toplevel(); inline void set_is_toplevel(bool value); @@ -3528,9 +3534,13 @@ class JSRegExp: public JSObject { static const int kAtomDataSize = kAtomPatternIndex + 1; - // Irregexp compiled code or bytecode for ASCII. + // Irregexp compiled code or bytecode for ASCII. If compilation + // fails, this fields hold an exception object that should be + // thrown if the regexp is used again. static const int kIrregexpASCIICodeIndex = kDataIndex; - // Irregexp compiled code or bytecode for UC16. + // Irregexp compiled code or bytecode for UC16. If compilation + // fails, this fields hold an exception object that should be + // thrown if the regexp is used again. static const int kIrregexpUC16CodeIndex = kDataIndex + 1; // Maximal number of registers used by either ASCII or UC16. // Only used to check that there is enough stack space diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 92d72f8524..44d283b36d 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -141,7 +141,9 @@ void* OS::Allocate(const size_t requested, void OS::Free(void* buf, const size_t length) { // TODO(1240712): munmap has a return value which is ignored here. - munmap(buf, length); + int result = munmap(buf, length); + USE(result); + ASSERT(result == 0); } @@ -334,7 +336,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool executable) { bool VirtualMemory::Uncommit(void* address, size_t size) { return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd, kMmapFdOffset) != MAP_FAILED; } diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index 6ec5070f91..fe4c31f515 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -56,6 +56,8 @@ #include "v8.h" #include "platform.h" +#include "top.h" +#include "v8threads.h" namespace v8 { @@ -145,7 +147,9 @@ void* OS::Allocate(const size_t requested, void OS::Free(void* address, const size_t size) { // TODO(1240712): munmap has a return value which is ignored here. - munmap(address, size); + int result = munmap(address, size); + USE(result); + ASSERT(result == 0); } @@ -360,7 +364,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { bool VirtualMemory::Uncommit(void* address, size_t size) { return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd, kMmapFdOffset) != MAP_FAILED; } @@ -580,6 +584,7 @@ Semaphore* OS::CreateSemaphore(int count) { #ifdef ENABLE_LOGGING_AND_PROFILING static Sampler* active_sampler_ = NULL; +static pthread_t vm_thread_ = 0; #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) @@ -608,6 +613,30 @@ enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; #endif +// A function that determines if a signal handler is called in the context +// of a VM thread. +// +// The problem is that SIGPROF signal can be delivered to an arbitrary thread +// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2) +// So, if the signal is being handled in the context of a non-VM thread, +// it means that the VM thread is running, and trying to sample its stack can +// cause a crash. +static inline bool IsVmThread() { + // In the case of a single VM thread, this check is enough. + if (pthread_equal(pthread_self(), vm_thread_)) return true; + // If there are multiple threads that use VM, they must have a thread id + // stored in TLS. To verify that the thread is really executing VM, + // we check Top's data. Having that ThreadManager::RestoreThread first + // restores ThreadLocalTop from TLS, and only then erases the TLS value, + // reading Top::thread_id() should not be affected by races. + if (ThreadManager::HasId() && !ThreadManager::IsArchived() && + ThreadManager::CurrentId() == Top::thread_id()) { + return true; + } + return false; +} + + static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { USE(info); if (signal != SIGPROF) return; @@ -640,7 +669,8 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample.fp = mcontext.arm_fp; #endif #endif - active_sampler_->SampleStack(&sample); + if (IsVmThread()) + active_sampler_->SampleStack(&sample); } // We always sample the VM state. @@ -678,6 +708,8 @@ void Sampler::Start() { // platforms. if (active_sampler_ != NULL) return; + vm_thread_ = pthread_self(); + // Request profiling signals. struct sigaction sa; sa.sa_sigaction = ProfilerSignalHandler; @@ -713,6 +745,7 @@ void Sampler::Stop() { active_ = false; } + #endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index c0810649fe..596b0fb040 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -141,7 +141,9 @@ void* OS::Allocate(const size_t requested, void OS::Free(void* address, const size_t size) { // TODO(1240712): munmap has a return value which is ignored here. - munmap(address, size); + int result = munmap(address, size); + USE(result); + ASSERT(result == 0); } @@ -211,8 +213,17 @@ void OS::LogSharedLibraryAddresses() { for (unsigned int i = 0; i < images_count; ++i) { const mach_header* header = _dyld_get_image_header(i); if (header == NULL) continue; +#if V8_HOST_ARCH_X64 + uint64_t size; + char* code_ptr = getsectdatafromheader_64( + reinterpret_cast(header), + SEG_TEXT, + SECT_TEXT, + &size); +#else unsigned int size; char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); +#endif if (code_ptr == NULL) continue; const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); const uintptr_t start = reinterpret_cast(code_ptr) + slide; @@ -309,7 +320,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { bool VirtualMemory::Uncommit(void* address, size_t size) { return mmap(address, size, PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd, kMmapFdOffset) != MAP_FAILED; } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index f772d32d9d..95776e54af 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -4556,22 +4556,25 @@ static Object* Runtime_LookupContext(Arguments args) { } -// A mechanism to return pairs of Object*'s. This is somewhat -// compiler-dependent as it assumes that a 64-bit value (a long long) -// is returned via two registers (edx:eax on ia32). Both the ia32 and -// arm platform support this; it is mostly an issue of "coaxing" the -// compiler to do the right thing. -// -// TODO(1236026): This is a non-portable hack that should be removed. +// A mechanism to return a pair of Object pointers in registers (if possible). +// How this is achieved is calling convention-dependent. +// All currently supported x86 compiles uses calling conventions that are cdecl +// variants where a 64-bit value is returned in two 32-bit registers +// (edx:eax on ia32, r1:r0 on ARM). +// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax. +// In Win64 calling convention, a struct of two pointers is returned in memory, +// allocated by the caller, and passed as a pointer in a hidden first parameter. #ifdef V8_HOST_ARCH_64_BIT -// Tested with GCC, not with MSVC. struct ObjectPair { Object* x; Object* y; }; + static inline ObjectPair MakePair(Object* x, Object* y) { ObjectPair result = {x, y}; - return result; // Pointers x and y returned in rax and rdx, in AMD-x64-abi. + // Pointers x and y returned in rax and rdx, in AMD-x64-abi. + // In Win64 they are assigned to a hidden first argument. + return result; } #else typedef uint64_t ObjectPair; @@ -4582,8 +4585,6 @@ static inline ObjectPair MakePair(Object* x, Object* y) { #endif - - static inline Object* Unhole(Object* x, PropertyAttributes attributes) { ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0); USE(attributes); @@ -4612,7 +4613,7 @@ static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) { static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) { HandleScope scope; - ASSERT(args.length() == 2); + ASSERT_EQ(2, args.length()); if (!args[0]->IsContext() || !args[1]->IsString()) { return MakePair(Top::ThrowIllegalOperation(), NULL); @@ -6341,7 +6342,12 @@ class ScopeIterator { ScopeTypeGlobal = 0, ScopeTypeLocal, ScopeTypeWith, - ScopeTypeClosure + ScopeTypeClosure, + // Every catch block contains an implicit with block (its parameter is + // a JSContextExtensionObject) that extends current scope with a variable + // holding exception object. Such with blocks are treated as scopes of their + // own type. + ScopeTypeCatch }; explicit ScopeIterator(JavaScriptFrame* frame) @@ -6417,7 +6423,14 @@ class ScopeIterator { return ScopeTypeClosure; } ASSERT(context_->has_extension()); - ASSERT(!context_->extension()->IsJSContextExtensionObject()); + // Current scope is either an explicit with statement or a with statement + // implicitely generated for a catch block. + // If the extension object here is a JSContextExtensionObject then + // current with statement is one frome a catch block otherwise it's a + // regular with statement. + if (context_->extension()->IsJSContextExtensionObject()) { + return ScopeTypeCatch; + } return ScopeTypeWith; } @@ -6432,6 +6445,7 @@ class ScopeIterator { return MaterializeLocalScope(frame_); break; case ScopeIterator::ScopeTypeWith: + case ScopeIterator::ScopeTypeCatch: // Return the with object. return Handle(CurrentContext()->extension()); break; @@ -6488,6 +6502,14 @@ class ScopeIterator { break; } + case ScopeIterator::ScopeTypeCatch: { + PrintF("Catch:\n"); + Handle extension = + Handle(CurrentContext()->extension()); + extension->Print(); + break; + } + case ScopeIterator::ScopeTypeClosure: { PrintF("Closure:\n"); CurrentContext()->Print(); @@ -6799,8 +6821,20 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle