From 615d8906226ffc56238b44141cdb3374f47e805a Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Mon, 10 May 2010 09:58:20 -0700 Subject: [PATCH] Upgrade V8 to 2.2.9 --- deps/v8/AUTHORS | 1 + deps/v8/ChangeLog | 13 + deps/v8/SConstruct | 3 +- deps/v8/include/v8-profiler.h | 2 +- deps/v8/include/v8.h | 104 +- deps/v8/samples/process.cc | 2 +- deps/v8/src/SConscript | 2 + deps/v8/src/api.cc | 125 +- deps/v8/src/api.h | 12 + deps/v8/src/arm/assembler-arm.cc | 29 + deps/v8/src/arm/assembler-arm.h | 14 + deps/v8/src/arm/builtins-arm.cc | 32 +- deps/v8/src/arm/codegen-arm.cc | 182 +- deps/v8/src/arm/constants-arm.h | 4 + deps/v8/src/arm/disasm-arm.cc | 47 + deps/v8/src/arm/full-codegen-arm.cc | 59 +- deps/v8/src/arm/ic-arm.cc | 263 +- deps/v8/src/arm/macro-assembler-arm.cc | 54 +- deps/v8/src/arm/macro-assembler-arm.h | 28 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 25 +- deps/v8/src/arm/simulator-arm.cc | 108 +- deps/v8/src/arm/simulator-arm.h | 4 + deps/v8/src/arm/stub-cache-arm.cc | 53 +- deps/v8/src/arm/virtual-frame-arm.cc | 50 +- deps/v8/src/arm/virtual-frame-arm.h | 7 +- deps/v8/src/assembler.cc | 10 - deps/v8/src/assembler.h | 3 - deps/v8/src/bootstrapper.cc | 27 + deps/v8/src/bootstrapper.h | 4 - deps/v8/src/builtins.cc | 3 +- deps/v8/src/conversions.cc | 36 +- deps/v8/src/dateparser.cc | 10 +- deps/v8/src/debug-agent.cc | 6 +- deps/v8/src/debug.cc | 18 +- deps/v8/src/dtoa.cc | 77 + deps/v8/src/dtoa.h | 81 + deps/v8/src/fast-dtoa.cc | 11 +- deps/v8/src/fast-dtoa.h | 3 +- deps/v8/src/fixed-dtoa.cc | 405 + deps/v8/src/fixed-dtoa.h | 55 + deps/v8/src/flags.cc | 14 +- deps/v8/src/frames.h | 1 + deps/v8/src/globals.h | 12 + deps/v8/src/heap.cc | 6 + deps/v8/src/heap.h | 19 +- deps/v8/src/ia32/codegen-ia32.cc | 286 +- deps/v8/src/ia32/codegen-ia32.h | 3 +- deps/v8/src/ia32/macro-assembler-ia32.h | 11 + deps/v8/src/ia32/stub-cache-ia32.cc | 26 +- deps/v8/src/ic.cc | 14 +- deps/v8/src/ic.h | 6 +- deps/v8/src/log.cc | 7 +- deps/v8/src/macro-assembler.h | 11 - deps/v8/src/messages.js | 7 +- deps/v8/src/objects-inl.h | 8 +- deps/v8/src/objects.cc | 19 +- deps/v8/src/objects.h | 7 +- deps/v8/src/platform-freebsd.cc | 9 +- deps/v8/src/platform-linux.cc | 9 +- deps/v8/src/platform-macos.cc | 10 +- deps/v8/src/platform-solaris.cc | 9 +- deps/v8/src/platform-win32.cc | 20 +- deps/v8/src/runtime.cc | 41 +- deps/v8/src/serialize.cc | 22 +- deps/v8/src/stub-cache.cc | 32 + deps/v8/src/stub-cache.h | 74 +- deps/v8/src/top.cc | 87 +- deps/v8/src/top.h | 5 +- deps/v8/src/utils.h | 3 + deps/v8/src/v8natives.js | 3 +- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/assembler-x64-inl.h | 2 +- deps/v8/src/x64/codegen-x64.cc | 422 +- deps/v8/src/x64/macro-assembler-x64.cc | 11 +- deps/v8/src/x64/macro-assembler-x64.h | 18 +- deps/v8/src/x64/stub-cache-x64.cc | 26 +- deps/v8/test/cctest/SConscript | 2 + deps/v8/test/cctest/gay-fixed.cc | 100049 +++++++++++++++ deps/v8/test/cctest/gay-fixed.h | 47 + deps/v8/test/cctest/gay-shortest.cc | 10 +- deps/v8/test/cctest/gay-shortest.h | 4 +- deps/v8/test/cctest/test-api.cc | 231 + deps/v8/test/cctest/test-debug.cc | 44 + deps/v8/test/cctest/test-fast-dtoa.cc | 37 +- deps/v8/test/cctest/test-fixed-dtoa.cc | 512 + .../test/cctest/test-macro-assembler-x64.cc | 118 +- deps/v8/test/mjsunit/instanceof-2.js | 329 + .../test/mjsunit/property-load-across-eval.js | 50 + deps/v8/test/mjsunit/regress/regress-696.js | 36 + deps/v8/test/mjsunit/regress/regress-697.js | 34 + deps/v8/test/mjsunit/smi-ops.js | 7 + deps/v8/tools/gyp/v8.gyp | 4 + 92 files changed, 103888 insertions(+), 860 deletions(-) create mode 100644 deps/v8/src/dtoa.cc create mode 100644 deps/v8/src/dtoa.h create mode 100644 deps/v8/src/fixed-dtoa.cc create mode 100644 deps/v8/src/fixed-dtoa.h create mode 100644 deps/v8/test/cctest/gay-fixed.cc create mode 100644 deps/v8/test/cctest/gay-fixed.h create mode 100644 deps/v8/test/cctest/test-fixed-dtoa.cc create mode 100644 deps/v8/test/mjsunit/instanceof-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-696.js create mode 100644 deps/v8/test/mjsunit/regress/regress-697.js diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index be34cd4f3e..7e8f1f8d52 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -18,6 +18,7 @@ Jan de Mooij Jay Freeman Joel Stanley John Jozwiak +Kun Zhang Matt Hanselman Martyn Capewell Paolo Giarrusso diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index b28c6bf23c..c2d4e46a63 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,16 @@ +2010-05-10: Version 2.2.9 + + Allow Object.create to be called with a function (issue 697). + + Fixed bug with Date.parse returning a non-NaN value when called on a + non date string (issue 696). + + Allow unaligned memory accesses on ARM targets that support it (by + Subrato K De of CodeAurora ). + + C++ API for retrieving JavaScript stack trace information. + + 2010-05-05: Version 2.2.8 Performance improvements in the x64 and ARM backends. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index becf31dbfc..022d7411da 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -84,6 +84,7 @@ ANDROID_FLAGS = ['-march=armv7-a', '-finline-limit=64', '-DCAN_USE_VFP_INSTRUCTIONS=1', '-DCAN_USE_ARMV7_INSTRUCTIONS=1', + '-DCAN_USE_UNALIGNED_ACCESSES=1', '-MD'] ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include', @@ -203,7 +204,7 @@ LIBRARY_FLAGS = { 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'] }, 'simulator:arm': { - 'CCFLAGS': ['-m32'], + 'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'], 'LINKFLAGS': ['-m32'] }, 'armvariant:thumb2': { diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index eca6548687..f1b8ffbbdc 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -109,7 +109,7 @@ class V8EXPORT CpuProfileNode { /** Retrieves a child node by index. */ const CpuProfileNode* GetChild(int index) const; - static const int kNoLineNumberInfo = 0; + static const int kNoLineNumberInfo = Message::kNoLineNumberInfo; }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index c07ba1f0cf..eb12de80b0 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -126,6 +126,8 @@ template class Persistent; class FunctionTemplate; class ObjectTemplate; class Data; +class StackTrace; +class StackFrame; namespace internal { @@ -691,6 +693,106 @@ class V8EXPORT Message { // TODO(1245381): Print to a string instead of on a FILE. static void PrintCurrentStackTrace(FILE* out); + + static const int kNoLineNumberInfo = 0; + static const int kNoColumnInfo = 0; +}; + + +/** + * Representation of a JavaScript stack trace. The information collected is a + * snapshot of the execution stack and the information remains valid after + * execution continues. + */ +class V8EXPORT StackTrace { + public: + /** + * Flags that determine what information is placed captured for each + * StackFrame when grabbing the current stack trace. + */ + enum StackTraceOptions { + kLineNumber = 1, + kColumnOffset = 1 << 1 | kLineNumber, + kScriptName = 1 << 2, + kFunctionName = 1 << 3, + kIsEval = 1 << 4, + kIsConstructor = 1 << 5, + kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName, + kDetailed = kOverview | kIsEval | kIsConstructor + }; + + /** + * Returns a StackFrame at a particular index. + */ + Local GetFrame(uint32_t index) const; + + /** + * Returns the number of StackFrames. + */ + int GetFrameCount() const; + + /** + * Returns StackTrace as a v8::Array that contains StackFrame objects. + */ + Local AsArray(); + + /** + * Grab a snapshot of the the current JavaScript execution stack. + * + * \param frame_limit The maximum number of stack frames we want to capture. + * \param options Enumerates the set of things we will capture for each + * StackFrame. + */ + static Local CurrentStackTrace( + int frame_limit, + StackTraceOptions options = kOverview); +}; + + +/** + * A single JavaScript stack frame. + */ +class V8EXPORT StackFrame { + public: + /** + * Returns the number, 1-based, of the line for the associate function call. + * This method will return Message::kNoLineNumberInfo if it is unable to + * retrieve the line number, or if kLineNumber was not passed as an option + * when capturing the StackTrace. + */ + int GetLineNumber() const; + + /** + * Returns the 1-based column offset on the line for the associated function + * call. + * This method will return Message::kNoColumnInfo if it is unable to retrieve + * the column number, or if kColumnOffset was not passed as an option when + * capturing the StackTrace. + */ + int GetColumn() const; + + /** + * Returns the name of the resource that contains the script for the + * function for this StackFrame. + */ + Local GetScriptName() const; + + /** + * Returns the name of the function associated with this stack frame. + */ + Local GetFunctionName() const; + + /** + * Returns whether or not the associated function is compiled via a call to + * eval(). + */ + bool IsEval() const; + + /** + * Returns whther or not the associated function is called as a + * constructor via "new". + */ + bool IsConstructor() const; }; @@ -2122,7 +2224,7 @@ class V8EXPORT ResourceConstraints { }; -bool SetResourceConstraints(ResourceConstraints* constraints); +bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints); // --- E x c e p t i o n s --- diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index 511e21a03d..9233c0dfa2 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -294,7 +294,7 @@ Handle JsHttpRequestProcessor::WrapMap(map* obj) { // Fetch the template for creating JavaScript map wrappers. // It only has to be created once, which we do on demand. - if (request_template_.IsEmpty()) { + if (map_template_.IsEmpty()) { Handle raw_template = MakeMapTemplate(); map_template_ = Persistent::New(raw_template); } diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 5add9999d1..b68f6d1d23 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -58,6 +58,7 @@ SOURCES = { debug.cc disassembler.cc diy-fp.cc + dtoa.cc execution.cc factory.cc flags.cc @@ -68,6 +69,7 @@ SOURCES = { func-name-inferrer.cc global-handles.cc fast-dtoa.cc + fixed-dtoa.cc handles.cc hashmap.cc heap-profiler.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 4709a156b4..a4c38b72c2 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1438,7 +1438,7 @@ static i::Handle CallV8HeapFunction(const char* name, int Message::GetLineNumber() const { - ON_BAILOUT("v8::Message::GetLineNumber()", return -1); + ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo); ENTER_V8; HandleScope scope; EXCEPTION_PREAMBLE(); @@ -1470,7 +1470,7 @@ int Message::GetEndPosition() const { int Message::GetStartColumn() const { - if (IsDeadCheck("v8::Message::GetStartColumn()")) return 0; + if (IsDeadCheck("v8::Message::GetStartColumn()")) return kNoColumnInfo; ENTER_V8; HandleScope scope; i::Handle data_obj = Utils::OpenHandle(this); @@ -1485,7 +1485,7 @@ int Message::GetStartColumn() const { int Message::GetEndColumn() const { - if (IsDeadCheck("v8::Message::GetEndColumn()")) return 0; + if (IsDeadCheck("v8::Message::GetEndColumn()")) return kNoColumnInfo; ENTER_V8; HandleScope scope; i::Handle data_obj = Utils::OpenHandle(this); @@ -1525,6 +1525,118 @@ void Message::PrintCurrentStackTrace(FILE* out) { } +// --- S t a c k T r a c e --- + +Local StackTrace::GetFrame(uint32_t index) const { + if (IsDeadCheck("v8::StackTrace::GetFrame()")) return Local(); + ENTER_V8; + HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle obj(i::JSObject::cast(self->GetElement(index))); + return scope.Close(Utils::StackFrameToLocal(obj)); +} + + +int StackTrace::GetFrameCount() const { + if (IsDeadCheck("v8::StackTrace::GetFrameCount()")) return -1; + ENTER_V8; + return i::Smi::cast(Utils::OpenHandle(this)->length())->value(); +} + + +Local StackTrace::AsArray() { + if (IsDeadCheck("v8::StackTrace::AsArray()")) Local(); + ENTER_V8; + return Utils::ToLocal(Utils::OpenHandle(this)); +} + + +Local StackTrace::CurrentStackTrace(int frame_limit, + StackTraceOptions options) { + if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local(); + ENTER_V8; + return i::Top::CaptureCurrentStackTrace(frame_limit, options); +} + + +// --- S t a c k F r a m e --- + +int StackFrame::GetLineNumber() const { + if (IsDeadCheck("v8::StackFrame::GetLineNumber()")) { + return Message::kNoLineNumberInfo; + } + ENTER_V8; + i::HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle line = GetProperty(self, "lineNumber"); + if (!line->IsSmi()) { + return Message::kNoLineNumberInfo; + } + return i::Smi::cast(*line)->value(); +} + + +int StackFrame::GetColumn() const { + if (IsDeadCheck("v8::StackFrame::GetColumn()")) { + return Message::kNoColumnInfo; + } + ENTER_V8; + i::HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle column = GetProperty(self, "column"); + if (!column->IsSmi()) { + return Message::kNoColumnInfo; + } + return i::Smi::cast(*column)->value(); +} + + +Local StackFrame::GetScriptName() const { + if (IsDeadCheck("v8::StackFrame::GetScriptName()")) return Local(); + ENTER_V8; + HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle name = GetProperty(self, "scriptName"); + if (!name->IsString()) { + return Local(); + } + return scope.Close(Local::Cast(Utils::ToLocal(name))); +} + + +Local StackFrame::GetFunctionName() const { + if (IsDeadCheck("v8::StackFrame::GetFunctionName()")) return Local(); + ENTER_V8; + HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle name = GetProperty(self, "functionName"); + if (!name->IsString()) { + return Local(); + } + return scope.Close(Local::Cast(Utils::ToLocal(name))); +} + + +bool StackFrame::IsEval() const { + if (IsDeadCheck("v8::StackFrame::IsEval()")) return false; + ENTER_V8; + i::HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle is_eval = GetProperty(self, "isEval"); + return is_eval->IsTrue(); +} + + +bool StackFrame::IsConstructor() const { + if (IsDeadCheck("v8::StackFrame::IsConstructor()")) return false; + ENTER_V8; + i::HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle is_constructor = GetProperty(self, "isConstructor"); + return is_constructor->IsTrue(); +} + + // --- D a t a --- bool Value::IsUndefined() const { @@ -2185,10 +2297,10 @@ Local v8::Object::ObjectProtoToString() { int postfix_len = i::StrLength(postfix); int buf_len = prefix_len + str_len + postfix_len; - char* buf = i::NewArray(buf_len); + i::ScopedVector buf(buf_len); // Write prefix. - char* ptr = buf; + char* ptr = buf.start(); memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize); ptr += prefix_len; @@ -2200,8 +2312,7 @@ Local v8::Object::ObjectProtoToString() { memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize); // Copy the buffer into a heap-allocated string and return it. - Local result = v8::String::New(buf, buf_len); - i::DeleteArray(buf); + Local result = v8::String::New(buf.start(), buf_len); return result; } } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 7b88112c04..e7b13949a9 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -192,6 +192,10 @@ class Utils { v8::internal::Handle obj); static inline Local MessageToLocal( v8::internal::Handle obj); + static inline Local StackTraceToLocal( + v8::internal::Handle obj); + static inline Local StackFrameToLocal( + v8::internal::Handle obj); static inline Local NumberToLocal( v8::internal::Handle obj); static inline Local IntegerToLocal( @@ -227,6 +231,10 @@ class Utils { OpenHandle(const Function* data); static inline v8::internal::Handle OpenHandle(const Message* message); + static inline v8::internal::Handle + OpenHandle(const StackTrace* stack_trace); + static inline v8::internal::Handle + OpenHandle(const StackFrame* stack_frame); static inline v8::internal::Handle OpenHandle(const v8::Context* context); static inline v8::internal::Handle @@ -275,6 +283,8 @@ MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch) MAKE_TO_LOCAL(MessageToLocal, Object, Message) +MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace) +MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame) MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) @@ -305,6 +315,8 @@ MAKE_OPEN_HANDLE(Function, JSFunction) MAKE_OPEN_HANDLE(Message, JSObject) MAKE_OPEN_HANDLE(Context, Context) MAKE_OPEN_HANDLE(External, Proxy) +MAKE_OPEN_HANDLE(StackTrace, JSArray) +MAKE_OPEN_HANDLE(StackFrame, JSObject) #undef MAKE_OPEN_HANDLE diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 7990368e66..f1f59ced7f 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1157,6 +1157,35 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { } +void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) { + ASSERT(src.rm().is(no_reg)); +#ifdef CAN_USE_ARMV7_INSTRUCTIONS + addrmod3(cond | B7 | B6 | B4, dst, src); +#else + ldr(dst, src, cond); + MemOperand src1(src); + src1.set_offset(src1.offset() + 4); + Register dst1(dst); + dst1.code_ = dst1.code_ + 1; + ldr(dst1, src1, cond); +#endif +} + + +void Assembler::strd(Register src, const MemOperand& dst, Condition cond) { + ASSERT(dst.rm().is(no_reg)); +#ifdef CAN_USE_ARMV7_INSTRUCTIONS + addrmod3(cond | B7 | B6 | B5 | B4, src, dst); +#else + str(src, dst, cond); + MemOperand dst1(dst); + dst1.set_offset(dst1.offset() + 4); + Register src1(src); + src1.code_ = src1.code_ + 1; + str(src1, dst1, cond); +#endif +} + // Load/Store multiple instructions. void Assembler::ldm(BlockAddrMode am, Register base, diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 839ed67375..61b84d434f 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -448,6 +448,18 @@ class MemOperand BASE_EMBEDDED { explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm, AddrMode am = Offset); + void set_offset(int32_t offset) { + ASSERT(rm_.is(no_reg)); + offset_ = offset; + } + + uint32_t offset() { + ASSERT(rm_.is(no_reg)); + return offset_; + } + + Register rm() const {return rm_;} + private: Register rn_; // base Register rm_; // register offset @@ -755,6 +767,8 @@ class Assembler : public Malloced { void strh(Register src, const MemOperand& dst, Condition cond = al); void ldrsb(Register dst, const MemOperand& src, Condition cond = al); void ldrsh(Register dst, const MemOperand& src, Condition cond = al); + void ldrd(Register dst, const MemOperand& src, Condition cond = al); + void strd(Register src, const MemOperand& dst, Condition cond = al); // Load/Store multiple instructions void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 7bb8c4675e..5718cb3ce2 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -107,7 +107,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, // Allocate the JSArray object together with space for a fixed array with the // requested elements. int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity); - __ AllocateInNewSpace(size / kPointerSize, + __ AllocateInNewSpace(size, result, scratch2, scratch3, @@ -191,7 +191,7 @@ static void AllocateJSArray(MacroAssembler* masm, // keeps the code below free of special casing for the empty array. int size = JSArray::kSize + FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size / kPointerSize, + __ AllocateInNewSpace(size, result, elements_array_end, scratch1, @@ -208,12 +208,13 @@ static void AllocateJSArray(MacroAssembler* masm, __ add(elements_array_end, elements_array_end, Operand(array_size, ASR, kSmiTagSize)); - __ AllocateInNewSpace(elements_array_end, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + __ AllocateInNewSpace( + elements_array_end, + result, + scratch1, + scratch2, + gc_required, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); // Allocated the JSArray. Now initialize the fields except for the elements // array. @@ -561,7 +562,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // r2: initial map // r7: undefined __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); // Allocated the JSObject, now initialize the fields. Map is set to initial // map and properties and elements are set to empty fixed array. @@ -632,12 +633,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // r5: start of next object // r7: undefined __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace(r0, - r5, - r6, - r2, - &undo_allocation, - RESULT_CONTAINS_TOP); + __ AllocateInNewSpace( + r0, + r5, + r6, + r2, + &undo_allocation, + static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); // Initialize the FixedArray. // r1: constructor diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 30860a1f99..5509830b30 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -191,7 +191,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { frame_->AllocateStackSlots(); VirtualFrame::SpilledScope spilled_scope(frame_); - int heap_slots = scope()->num_heap_slots(); + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { // Allocate local context. // Get outer context and create a new context based on it. @@ -1486,8 +1486,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, // Then process it as a normal function call. __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); - __ str(r1, MemOperand(sp, 3 * kPointerSize)); + __ strd(r0, MemOperand(sp, 2 * kPointerSize)); CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); frame_->CallStub(&call_function, 3); @@ -2279,8 +2278,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); - __ ldr(r0, frame_->ElementAt(0)); // load the current count - __ ldr(r1, frame_->ElementAt(1)); // load the length + // Load the current count to r0, load the length to r1. + __ ldrd(r0, frame_->ElementAt(0)); __ cmp(r0, r1); // compare to the array length node->break_target()->Branch(hs); @@ -2787,7 +2786,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { frame_->SpillAll(); Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads. + // This rules out argument loads because eval forces arguments + // access to be through the arguments object. if (potential_slot != NULL) { __ ldr(r0, ContextSlotOperandCheckExtensions(potential_slot, @@ -3473,7 +3473,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { if (node->is_compound()) { // For a compound assignment the right-hand side is a binary operation // between the current property value and the actual right-hand side. - // Load of the current value leaves receiver and key on the stack. + // Duplicate receiver and key for loading the current property value. + frame_->Dup2(); EmitKeyedLoad(); frame_->EmitPush(r0); @@ -3702,9 +3703,56 @@ void CodeGenerator::VisitCall(Call* node) { } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { // ---------------------------------- - // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj + // JavaScript examples: + // + // with (obj) foo(1, 2, 3) // foo is in obj + // + // function f() {}; + // function g() { + // eval(...); + // f(); // f could be in extension object + // } // ---------------------------------- + // JumpTargets do not yet support merging frames so the frame must be + // spilled when jumping to these targets. + JumpTarget slow; + JumpTarget done; + + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (var->mode() == Variable::DYNAMIC_GLOBAL) { + LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow); + frame_->EmitPush(r0); + LoadGlobalReceiver(r1); + done.Jump(); + + } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = var->local_if_not_shadowed()->slot(); + // Only generate the fast case for locals that rewrite to slots. + // This rules out argument loads because eval forces arguments + // access to be through the arguments object. + if (potential_slot != NULL) { + __ ldr(r0, + ContextSlotOperandCheckExtensions(potential_slot, + r1, + r2, + &slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r0, ip); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } + frame_->EmitPush(r0); + LoadGlobalReceiver(r1); + done.Jump(); + } + } + + slow.Bind(); // Load the function frame_->EmitPush(cp); __ mov(r0, Operand(var->name())); @@ -3716,7 +3764,9 @@ void CodeGenerator::VisitCall(Call* node) { frame_->EmitPush(r0); // function frame_->EmitPush(r1); // receiver - // Call the function. + done.Bind(); + // Call the function. At this point, everything is spilled but the + // function and receiver are in r0 and r1. CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); frame_->EmitPush(r0); @@ -3767,19 +3817,23 @@ void CodeGenerator::VisitCall(Call* node) { // ------------------------------------------- LoadAndSpill(property->obj()); + if (!property->is_synthetic()) { + // Duplicate receiver for later use. + __ ldr(r0, MemOperand(sp, 0)); + frame_->EmitPush(r0); + } LoadAndSpill(property->key()); EmitKeyedLoad(); - frame_->Drop(); // key // Put the function below the receiver. if (property->is_synthetic()) { // Use the global receiver. - frame_->Drop(); - frame_->EmitPush(r0); + frame_->EmitPush(r0); // Function. LoadGlobalReceiver(r0); } else { - frame_->EmitPop(r1); // receiver - frame_->EmitPush(r0); // function - frame_->EmitPush(r1); // receiver + // Switch receiver and function. + frame_->EmitPop(r1); // Receiver. + frame_->EmitPush(r0); // Function. + frame_->EmitPush(r1); // Receiver. } // Call the function. @@ -4359,12 +4413,13 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); __ add(r2, r5, Operand(objects_size)); - __ AllocateInNewSpace(r2, // In: Size, in words. - r0, // Out: Start of allocation (tagged). - r3, // Scratch register. - r4, // Scratch register. - &slowcase, - TAG_OBJECT); + __ AllocateInNewSpace( + r2, // In: Size, in words. + r0, // Out: Start of allocation (tagged). + r3, // Scratch register. + r4, // Scratch register. + &slowcase, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); // r0: Start of allocated area, object-tagged. // r1: Number of elements in array, as smi. // r5: Number of elements, untagged. @@ -5388,8 +5443,7 @@ void DeferredReferenceGetKeyedValue::Generate() { // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); - // Call keyed load IC. It has all arguments on the stack and the key in r0. - __ ldr(r0, MemOperand(sp, 0)); + // Call keyed load IC. It has the arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the @@ -5522,12 +5576,13 @@ void CodeGenerator::EmitKeyedLoad() { __ IncrementCounter(&Counters::keyed_load_inline, 1, frame_->scratch0(), frame_->scratch1()); - // Load the receiver and key from the stack. - frame_->SpillAllButCopyTOSToR1R0(); + // Load the key and receiver from the stack to r0 and r1. + frame_->PopToR1R0(); Register receiver = r0; Register key = r1; VirtualFrame::SpilledScope spilled(frame_); + // The deferred code expects key and receiver in r0 and r1. DeferredReferenceGetKeyedValue* deferred = new DeferredReferenceGetKeyedValue(); @@ -5721,6 +5776,9 @@ void Reference::GetValue() { Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); + if (!persist_after_get_) { + cgen_->UnloadReference(this); + } break; } @@ -5730,23 +5788,26 @@ void Reference::GetValue() { ASSERT(!is_global || var->is_global()); cgen_->EmitNamedLoad(GetName(), is_global); cgen_->frame()->EmitPush(r0); + if (!persist_after_get_) { + cgen_->UnloadReference(this); + } break; } case KEYED: { + if (persist_after_get_) { + cgen_->frame()->Dup2(); + } ASSERT(property != NULL); cgen_->EmitKeyedLoad(); cgen_->frame()->EmitPush(r0); + if (!persist_after_get_) set_unloaded(); break; } default: UNREACHABLE(); } - - if (!persist_after_get_) { - cgen_->UnloadReference(this); - } } @@ -5806,7 +5867,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ pop(r3); // Attempt to allocate new JSFunction in new space. - __ AllocateInNewSpace(JSFunction::kSize / kPointerSize, + __ AllocateInNewSpace(JSFunction::kSize, r0, r1, r2, @@ -5847,7 +5908,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { int length = slots_ + Context::MIN_CONTEXT_SLOTS; // Attempt to allocate the context in new space. - __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize), + __ AllocateInNewSpace(FixedArray::SizeFor(length), r0, r1, r2, @@ -5915,7 +5976,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size / kPointerSize, + __ AllocateInNewSpace(size, r0, r1, r2, @@ -6248,8 +6309,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, ConvertToDoubleStub stub1(r3, r2, r7, r6); __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); // Load rhs to a double in r0, r1. - __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); - __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ pop(lr); } @@ -6284,8 +6344,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } else { __ push(lr); // Load lhs to a double in r2, r3. - __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); - __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); + __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); // Convert rhs to a double in r0, r1. __ mov(r7, Operand(r0)); ConvertToDoubleStub stub2(r1, r0, r7, r6); @@ -6449,10 +6508,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, __ sub(r7, r1, Operand(kHeapObjectTag)); __ vldr(d7, r7, HeapNumber::kValueOffset); } else { - __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); - __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); - __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); - __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); + __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); } __ jmp(both_loaded_as_doubles); } @@ -6829,8 +6886,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ vldr(d7, r7, HeapNumber::kValueOffset); } else { // Calling convention says that second double is in r2 and r3. - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); + __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); } __ jmp(&finished_loading_r0); __ bind(&r0_is_smi); @@ -6882,8 +6938,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ vldr(d6, r7, HeapNumber::kValueOffset); } else { // Calling convention says that first double is in r0 and r1. - __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); - __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); + __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); } __ jmp(&finished_loading_r1); __ bind(&r1_is_smi); @@ -6954,8 +7009,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases( __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); #else // Double returned in registers 0 and 1. - __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); - __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); + __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); #endif __ mov(r0, Operand(r5)); // And we are done. @@ -8206,6 +8260,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Get the prototype of the function (r4 is result, r2 is scratch). __ ldr(r1, MemOperand(sp, 0)); + // r1 is function, r3 is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); + __ cmp(r1, ip); + __ b(ne, &miss); + __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); + __ cmp(r3, ip); + __ b(ne, &miss); + __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); + + __ bind(&miss); __ TryGetFunctionPrototype(r1, r4, r2, &slow); // Check that the function prototype is a JS object. @@ -8215,6 +8285,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); __ b(gt, &slow); + __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); + // Register mapping: r3 is object map and r4 is function prototype. // Get prototype of object into r2. __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); @@ -8232,12 +8305,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ bind(&is_instance); __ mov(r0, Operand(Smi::FromInt(0))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); __ pop(); __ pop(); __ mov(pc, Operand(lr)); // Return. __ bind(&is_not_instance); __ mov(r0, Operand(Smi::FromInt(1))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); __ pop(); __ pop(); __ mov(pc, Operand(lr)); // Return. @@ -8324,8 +8399,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { __ str(r3, MemOperand(sp, 1 * kPointerSize)); // Try the new space allocation. Start out with computing the size - // of the arguments object and the elements array (in words, not - // bytes because AllocateInNewSpace expects words). + // of the arguments object and the elements array in words. Label add_arguments_object; __ bind(&try_allocate); __ cmp(r1, Operand(0)); @@ -8336,7 +8410,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); // Do the allocation of both objects in one go. - __ AllocateInNewSpace(r1, r0, r2, r3, &runtime, TAG_OBJECT); + __ AllocateInNewSpace( + r1, + r0, + r2, + r3, + &runtime, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); // Get the arguments boilerplate from the current (global) context. int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); @@ -8501,9 +8581,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // string length. A negative value will be greater (unsigned comparison). __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &runtime); + __ b(ne, &runtime); __ cmp(r3, Operand(r0)); - __ b(le, &runtime); + __ b(ls, &runtime); // r2: Number of capture registers // subject: Subject string diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 5eed13ff9d..57c5c1c00c 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -72,6 +72,10 @@ # define CAN_USE_THUMB_INSTRUCTIONS 1 #endif +#if CAN_USE_UNALIGNED_ACCESSES +#define V8_TARGET_CAN_READ_UNALIGNED 1 +#endif + // Using blx may yield better code, so use it when required or when available #if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS) #define USE_BLX 1 diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 4ba309467b..4051096fca 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -418,6 +418,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) { ASSERT(STRING_STARTS_WITH(format, "memop")); if (instr->HasL()) { Print("ldr"); + } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) { + if (instr->Bits(7, 4) == 0xf) { + Print("strd"); + } else { + Print("ldrd"); + } } else { Print("str"); } @@ -614,6 +620,47 @@ void Decoder::DecodeType01(Instr* instr) { } else { Unknown(instr); // not used by V8 } + } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) { + // ldrd, strd + switch (instr->PUField()) { + case 0: { + if (instr->Bit(22) == 0) { + Format(instr, "'memop'cond's 'rd, ['rn], -'rm"); + } else { + Format(instr, "'memop'cond's 'rd, ['rn], #-'off8"); + } + break; + } + case 1: { + if (instr->Bit(22) == 0) { + Format(instr, "'memop'cond's 'rd, ['rn], +'rm"); + } else { + Format(instr, "'memop'cond's 'rd, ['rn], #+'off8"); + } + break; + } + case 2: { + if (instr->Bit(22) == 0) { + Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w"); + } else { + Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w"); + } + break; + } + case 3: { + if (instr->Bit(22) == 0) { + Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w"); + } else { + Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w"); + } + break; + } + default: { + // The PU field is a 2-bit field. + UNREACHABLE(); + break; + } + } } else { // extra load/store instructions switch (instr->PUField()) { diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index e9bdfe55f7..6680af9a97 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -738,15 +738,10 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, // Load the key. __ mov(r0, Operand(key_literal->handle())); - // Push both as arguments to ic. - __ Push(r1, r0); - - // Call keyed load IC. It has all arguments on the stack and the key in r0. + // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - - // Drop key and object left on the stack by IC, and push the result. - DropAndApply(2, context, r0); + Apply(context, r0); } } @@ -935,8 +930,16 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } break; case KEYED_PROPERTY: - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + // We need the key and receiver on both the stack and in r0 and r1. + if (expr->is_compound()) { + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kAccumulator); + __ ldr(r1, MemOperand(sp, 0)); + __ push(r0); + } else { + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kStack); + } break; } @@ -1005,8 +1008,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); - // Call keyed load IC. It has all arguments on the stack and the key in r0. - __ ldr(r0, MemOperand(sp, 0)); + // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); } @@ -1171,10 +1173,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) { // Drop receiver left on the stack by IC. DropAndApply(1, context_, r0); } else { - VisitForValue(expr->key(), kStack); + VisitForValue(expr->key(), kAccumulator); + __ pop(r1); EmitKeyedPropertyLoad(expr); - // Drop key and receiver left on the stack by IC. - DropAndApply(2, context_, r0); + Apply(context_, r0); } } @@ -1246,24 +1248,31 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Call to a keyed property, use keyed load IC followed by function // call. VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + VisitForValue(prop->key(), kAccumulator); // Record source code position for IC call. SetSourcePosition(prop->position()); - // Call keyed load IC. It has all arguments on the stack and the key in - // r0. - __ ldr(r0, MemOperand(sp, 0)); + if (prop->is_synthetic()) { + __ pop(r1); // We do not need to keep the receiver. + } else { + __ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on. + } + Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - // Load receiver object into r1. if (prop->is_synthetic()) { + // Push result (function). + __ push(r0); + // Push Global receiver. __ ldr(r1, CodeGenerator::GlobalObject()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); + __ push(r1); } else { - __ ldr(r1, MemOperand(sp, kPointerSize)); + // Pop receiver. + __ pop(r1); + // Push result (function). + __ push(r0); + __ push(r1); } - // Overwrite (object, key) with (function, receiver). - __ str(r0, MemOperand(sp, kPointerSize)); - __ str(r1, MemOperand(sp)); EmitCallWithStub(expr); } } else { @@ -1552,7 +1561,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == NAMED_PROPERTY) { EmitNamedPropertyLoad(prop); } else { - VisitForValue(prop->key(), kStack); + VisitForValue(prop->key(), kAccumulator); + __ ldr(r1, MemOperand(sp, 0)); + __ push(r0); EmitKeyedPropertyLoad(prop); } } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 5b1915f639..34ba5e5f78 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -683,11 +683,9 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- - __ ldr(r1, MemOperand(sp, kPointerSize)); __ Push(r1, r0); ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); @@ -699,11 +697,9 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- - __ ldr(r1, MemOperand(sp, kPointerSize)); __ Push(r1, r0); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); @@ -714,18 +710,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow, fast, check_pixel_array, check_number_dictionary; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); + Register key = r0; + Register receiver = r1; // Check that the object isn't a smi. - __ BranchOnSmi(r1, &slow); + __ BranchOnSmi(receiver, &slow); // Get the map of the receiver. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check bit field. __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); __ tst(r3, Operand(kSlowCaseBitFieldMask)); @@ -740,60 +735,65 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ b(lt, &slow); // Check that the key is a smi. - __ BranchOnNotSmi(r0, &slow); - // Save key in r2 in case we want it for the number dictionary case. - __ mov(r2, r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ BranchOnNotSmi(key, &slow); + // Untag key into r2.. + __ mov(r2, Operand(key, ASR, kSmiTagSize)); // Get the elements array of the object. - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r3, ip); __ b(ne, &check_pixel_array); // Check that the key (index) is within bounds. - __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); - __ cmp(r0, r3); + __ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset)); + __ cmp(r2, r3); __ b(hs, &slow); // Fast case: Do the load. - __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); + __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); + __ cmp(r2, ip); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ b(eq, &slow); + __ mov(r0, r2); __ Ret(); // Check whether the elements is a pixel array. + // r0: key + // r2: untagged index + // r3: elements map + // r4: elements __ bind(&check_pixel_array); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ cmp(r3, ip); __ b(ne, &check_number_dictionary); - __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); - __ cmp(r0, ip); + __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset)); + __ cmp(r2, ip); __ b(hs, &slow); - __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset)); - __ ldrb(r0, MemOperand(ip, r0)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. + __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset)); + __ ldrb(r2, MemOperand(ip, r2)); + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi. __ Ret(); __ bind(&check_number_dictionary); // Check whether the elements is a number dictionary. - // r0: untagged index - // r1: elements - // r2: key + // r0: key + // r2: untagged index + // r3: elements map + // r4: elements __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r3, ip); __ b(ne, &slow); - GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4); + GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5); + __ mov(r0, r2); __ Ret(); - // Slow case: Push extra copies of the arguments (2). + // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); - __ ldr(r0, MemOperand(sp, 0)); + __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3); GenerateRuntimeGetProperty(masm); } @@ -802,8 +802,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; Label index_not_smi; @@ -811,9 +810,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Label slow_char_code; Label got_char_code; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - Register object = r1; Register index = r0; Register code = r2; @@ -913,25 +909,21 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow, failed_allocation; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - - // r0: key - // r1: receiver object + Register key = r0; + Register receiver = r1; // Check that the object isn't a smi - __ BranchOnSmi(r1, &slow); + __ BranchOnSmi(receiver, &slow); // Check that the key is a smi. - __ BranchOnNotSmi(r0, &slow); + __ BranchOnNotSmi(key, &slow); // Check that the object is a JS object. Load map into r2. - __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); + __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); __ b(lt, &slow); // Check that the receiver does not require access checks. We need @@ -943,53 +935,51 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Check that the elements array is the appropriate type of // ExternalArray. - // r0: index (as a smi) - // r1: JSObject - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); __ cmp(r2, ip); __ b(ne, &slow); // Check that the index is in range. - __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset)); - __ cmp(r1, Operand(r0, ASR, kSmiTagSize)); + __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); + __ cmp(ip, Operand(key, ASR, kSmiTagSize)); // Unsigned comparison catches both negative and too-large values. __ b(lo, &slow); - // r0: index (smi) - // r1: elements array - __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset)); - // r1: base pointer of external storage + // r3: elements array + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + // r3: base pointer of external storage // We are not untagging smi key and instead work with it // as if it was premultiplied by 2. ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); + Register value = r2; switch (array_type) { case kExternalByteArray: - __ ldrsb(r0, MemOperand(r1, r0, LSR, 1)); + __ ldrsb(value, MemOperand(r3, key, LSR, 1)); break; case kExternalUnsignedByteArray: - __ ldrb(r0, MemOperand(r1, r0, LSR, 1)); + __ ldrb(value, MemOperand(r3, key, LSR, 1)); break; case kExternalShortArray: - __ ldrsh(r0, MemOperand(r1, r0, LSL, 0)); + __ ldrsh(value, MemOperand(r3, key, LSL, 0)); break; case kExternalUnsignedShortArray: - __ ldrh(r0, MemOperand(r1, r0, LSL, 0)); + __ ldrh(value, MemOperand(r3, key, LSL, 0)); break; case kExternalIntArray: case kExternalUnsignedIntArray: - __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + __ ldr(value, MemOperand(r3, key, LSL, 1)); break; case kExternalFloatArray: if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ add(r0, r1, Operand(r0, LSL, 1)); - __ vldr(s0, r0, 0); + __ add(r2, r3, Operand(key, LSL, 1)); + __ vldr(s0, r2, 0); } else { - __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + __ ldr(value, MemOperand(r3, key, LSL, 1)); } break; default: @@ -998,37 +988,36 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, } // For integer array types: - // r0: value + // r2: value // For floating-point array type // s0: value (if VFP3 is supported) - // r0: value (if VFP3 is not supported) + // r2: value (if VFP3 is not supported) if (array_type == kExternalIntArray) { // For the Int and UnsignedInt array types, we need to see whether // the value can be represented in a Smi. If not, we need to convert // it to a HeapNumber. Label box_int; - __ cmp(r0, Operand(0xC0000000)); + __ cmp(value, Operand(0xC0000000)); __ b(mi, &box_int); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); __ bind(&box_int); - - __ mov(r1, r0); - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Use r0 for result as key is not needed any more. __ AllocateHeapNumber(r0, r3, r4, &slow); if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r1); + __ vmov(s0, value); __ vcvt_f64_s32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); __ Ret(); } else { - WriteInt32ToHeapNumberStub stub(r1, r0, r3); + WriteInt32ToHeapNumberStub stub(value, r0, r3); __ TailCallStub(&stub); } } else if (array_type == kExternalUnsignedIntArray) { @@ -1038,51 +1027,60 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); Label box_int, done; - __ tst(r0, Operand(0xC0000000)); + __ tst(value, Operand(0xC0000000)); __ b(ne, &box_int); - - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); __ bind(&box_int); - __ vmov(s0, r0); - __ AllocateHeapNumber(r0, r1, r2, &slow); + __ vmov(s0, value); + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all + // registers - also when jumping due to exhausted young space. + __ AllocateHeapNumber(r2, r3, r4, &slow); __ vcvt_f64_u32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); + __ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); __ Ret(); } else { // Check whether unsigned integer fits into smi. Label box_int_0, box_int_1, done; - __ tst(r0, Operand(0x80000000)); + __ tst(value, Operand(0x80000000)); __ b(ne, &box_int_0); - __ tst(r0, Operand(0x40000000)); + __ tst(value, Operand(0x40000000)); __ b(ne, &box_int_1); - // Tag integer as smi and return it. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); + Register hiword = value; // r2. + Register loword = r3; + __ bind(&box_int_0); // Integer does not have leading zeros. - GenerateUInt2Double(masm, r0, r1, r2, 0); + GenerateUInt2Double(masm, hiword, loword, r4, 0); __ b(&done); __ bind(&box_int_1); // Integer has one leading zero. - GenerateUInt2Double(masm, r0, r1, r2, 1); + GenerateUInt2Double(masm, hiword, loword, r4, 1); + __ bind(&done); - // Integer was converted to double in registers r0:r1. - // Wrap it into a HeapNumber. - __ AllocateHeapNumber(r2, r3, r5, &slow); + // Integer was converted to double in registers hiword:loword. + // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber + // clobbers all registers - also when jumping due to exhausted young + // space. + __ AllocateHeapNumber(r4, r5, r6, &slow); - __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset)); - __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); - - __ mov(r0, r2); + __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); + __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); + __ mov(r0, r4); __ Ret(); } } else if (array_type == kExternalFloatArray) { @@ -1090,40 +1088,52 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // HeapNumber. if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ AllocateHeapNumber(r0, r1, r2, &slow); + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ AllocateHeapNumber(r2, r3, r4, &slow); __ vcvt_f64_f32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); + __ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); __ Ret(); } else { - __ AllocateHeapNumber(r3, r1, r2, &slow); + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ AllocateHeapNumber(r3, r4, r5, &slow); // VFP is not available, do manual single to double conversion. - // r0: floating point value (binary32) + // r2: floating point value (binary32) + // r3: heap number for result - // Extract mantissa to r1. - __ and_(r1, r0, Operand(kBinary32MantissaMask)); + // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to + // the slow case from here. + __ and_(r0, value, Operand(kBinary32MantissaMask)); - // Extract exponent to r2. - __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits)); - __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + // Extract exponent to r1. OK to clobber r1 now as there are no jumps to + // the slow case from here. + __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); + __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); Label exponent_rebiased; - __ teq(r2, Operand(0x00)); + __ teq(r1, Operand(0x00)); __ b(eq, &exponent_rebiased); - __ teq(r2, Operand(0xff)); - __ mov(r2, Operand(0x7ff), LeaveCC, eq); + __ teq(r1, Operand(0xff)); + __ mov(r1, Operand(0x7ff), LeaveCC, eq); __ b(eq, &exponent_rebiased); // Rebias exponent. - __ add(r2, - r2, + __ add(r1, + r1, Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); __ bind(&exponent_rebiased); - __ and_(r0, r0, Operand(kBinary32SignMask)); - __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord)); + __ and_(r2, value, Operand(kBinary32SignMask)); + value = no_reg; + __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); // Shift mantissa. static const int kMantissaShiftForHiWord = @@ -1132,24 +1142,25 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, static const int kMantissaShiftForLoWord = kBitsPerInt - kMantissaShiftForHiWord; - __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord)); - __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord)); + __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); + __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); + + __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); - __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset)); - __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); __ mov(r0, r3); __ Ret(); } } else { - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); } - // Slow case: Load name and receiver from stack and jump to runtime. + // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); - __ ldr(r0, MemOperand(sp, 0)); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); GenerateRuntimeGetProperty(masm); } @@ -1158,14 +1169,10 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - // Check that the receiver isn't a smi. __ BranchOnSmi(r1, &slow); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index d97f04b71c..c4b153f82e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -232,6 +232,13 @@ void MacroAssembler::LoadRoot(Register destination, } +void MacroAssembler::StoreRoot(Register source, + Heap::RootListIndex index, + Condition cond) { + str(source, MemOperand(roots, index << kPointerSizeLog2), cond); +} + + void MacroAssembler::RecordWriteHelper(Register object, Register offset, Register scratch) { @@ -926,6 +933,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ASSERT(!result.is(scratch1)); ASSERT(!scratch1.is(scratch2)); + // Make object size into bytes. + if ((flags & SIZE_IN_WORDS) != 0) { + object_size *= kPointerSize; + } + ASSERT_EQ(0, object_size & kObjectAlignmentMask); + // Load address of new object into result and allocation top address into // scratch1. ExternalReference new_space_allocation_top = @@ -948,23 +961,16 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ExternalReference::new_space_allocation_limit_address(); mov(scratch2, Operand(new_space_allocation_limit)); ldr(scratch2, MemOperand(scratch2)); - add(result, result, Operand(object_size * kPointerSize)); + add(result, result, Operand(object_size)); cmp(result, Operand(scratch2)); b(hi, gc_required); - - // Update allocation top. result temporarily holds the new top. - if (FLAG_debug_code) { - tst(result, Operand(kObjectAlignmentMask)); - Check(eq, "Unaligned allocation in new space"); - } str(result, MemOperand(scratch1)); // Tag and adjust back to start of new object. if ((flags & TAG_OBJECT) != 0) { - sub(result, result, Operand((object_size * kPointerSize) - - kHeapObjectTag)); + sub(result, result, Operand(object_size - kHeapObjectTag)); } else { - sub(result, result, Operand(object_size * kPointerSize)); + sub(result, result, Operand(object_size)); } } @@ -1001,7 +1007,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, ExternalReference::new_space_allocation_limit_address(); mov(scratch2, Operand(new_space_allocation_limit)); ldr(scratch2, MemOperand(scratch2)); - add(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + if ((flags & SIZE_IN_WORDS) != 0) { + add(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + } else { + add(result, result, Operand(object_size)); + } cmp(result, Operand(scratch2)); b(hi, gc_required); @@ -1013,7 +1023,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, str(result, MemOperand(scratch1)); // Adjust back to start of new object. - sub(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + if ((flags & SIZE_IN_WORDS) != 0) { + sub(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + } else { + sub(result, result, Operand(object_size)); + } // Tag object if requested. if ((flags & TAG_OBJECT) != 0) { @@ -1054,10 +1068,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. add(scratch1, scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); - // AllocateInNewSpace expects the size in words, so we can round down - // to kObjectAlignment and divide by kPointerSize in the same shift. - ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1); - mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2)); + and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate two-byte string in new space. AllocateInNewSpace(scratch1, @@ -1088,10 +1099,7 @@ void MacroAssembler::AllocateAsciiString(Register result, ASSERT(kCharSize == 1); add(scratch1, length, Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); - // AllocateInNewSpace expects the size in words, so we can round down - // to kObjectAlignment and divide by kPointerSize in the same shift. - ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1); - mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2)); + and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate ASCII string in new space. AllocateInNewSpace(scratch1, @@ -1115,7 +1123,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize / kPointerSize, + AllocateInNewSpace(ConsString::kSize, result, scratch1, scratch2, @@ -1135,7 +1143,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize / kPointerSize, + AllocateInNewSpace(ConsString::kSize, result, scratch1, scratch2, @@ -1549,7 +1557,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, Label* gc_required) { // Allocate an object in the heap for the heap number and tag it as a heap // object. - AllocateInNewSpace(HeapNumber::kSize / kPointerSize, + AllocateInNewSpace(HeapNumber::kSize, result, scratch1, scratch2, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 2ec7a39eab..9cf93da341 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -52,6 +52,21 @@ enum InvokeJSFlags { }; +// Flags used for the AllocateInNewSpace functions. +enum AllocationFlags { + // No special flags. + NO_ALLOCATION_FLAGS = 0, + // Return the pointer to the allocated already tagged as a heap object. + TAG_OBJECT = 1 << 0, + // The content of the result register already contains the allocation top in + // new space. + RESULT_CONTAINS_TOP = 1 << 1, + // Specify that the requested size of the space to allocate is specified in + // words instead of bytes. + SIZE_IN_WORDS = 1 << 2 +}; + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -85,6 +100,10 @@ class MacroAssembler: public Assembler { void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond = al); + // Store an object to the root table. + void StoreRoot(Register source, + Heap::RootListIndex index, + Condition cond = al); // Check if object is in new space. @@ -280,7 +299,9 @@ class MacroAssembler: public Assembler { // Allocate an object in new space. The object_size is specified in words (not // bytes). If the new space is exhausted control continues at the gc_required // label. The allocated object is returned in result. If the flag - // tag_allocated_object is true the result is tagged as as a heap object. + // tag_allocated_object is true the result is tagged as as a heap object. All + // registers are clobbered also when control continues at the gc_required + // label. void AllocateInNewSpace(int object_size, Register result, Register scratch1, @@ -324,8 +345,9 @@ class MacroAssembler: public Assembler { Register scratch2, Label* gc_required); - // Allocates a heap number or jumps to the need_gc label if the young space - // is full and a scavenge is needed. + // Allocates a heap number or jumps to the gc_required label if the young + // space is full and a scavenge is needed. All registers are clobbered also + // when control continues at the gc_required label. void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 2fdba14af4..64fe5d69c1 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1210,14 +1210,31 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); offset = r0; } - // We assume that we cannot do unaligned loads on ARM, so this function - // must only be used to load a single character at a time. + // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU + // and the operating system running on the target allow it. + // If unaligned load/stores are not supported then this function must only + // be used to load a single character at a time. +#if !V8_TARGET_CAN_READ_UNALIGNED ASSERT(characters == 1); +#endif + if (mode_ == ASCII) { - __ ldrb(current_character(), MemOperand(end_of_input_address(), offset)); + if (characters == 4) { + __ ldr(current_character(), MemOperand(end_of_input_address(), offset)); + } else if (characters == 2) { + __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); + } else { + ASSERT(characters == 1); + __ ldrb(current_character(), MemOperand(end_of_input_address(), offset)); + } } else { ASSERT(mode_ == UC16); - __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); + if (characters == 2) { + __ ldr(current_character(), MemOperand(end_of_input_address(), offset)); + } else { + ASSERT(characters == 1); + __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); + } } } diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 5fe7d5f76e..e4601f3e3f 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -728,6 +728,13 @@ int32_t Simulator::get_register(int reg) const { } +void Simulator::set_dw_register(int dreg, const int* dbl) { + ASSERT((dreg >= 0) && (dreg < num_d_registers)); + registers_[dreg] = dbl[0]; + registers_[dreg + 1] = dbl[1]; +} + + // Raw access to the PC register. void Simulator::set_pc(int32_t value) { pc_modified_ = true; @@ -864,27 +871,42 @@ void Simulator::TrashCallerSaveRegisters() { registers_[12] = 0x50Bad4U; } - -// The ARM cannot do unaligned reads and writes. On some ARM platforms an -// interrupt is caused. On others it does a funky rotation thing. For now we -// simply disallow unaligned reads, but at some point we may want to move to -// emulating the rotate behaviour. Note that simulator runs have the runtime +// Some Operating Systems allow unaligned access on ARMv7 targets. We +// assume that unaligned accesses are not allowed unless the v8 build system +// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero. +// The following statements below describes the behavior of the ARM CPUs +// that don't support unaligned access. +// Some ARM platforms raise an interrupt on detecting unaligned access. +// On others it does a funky rotation thing. For now we +// simply disallow unaligned reads. Note that simulator runs have the runtime // system running directly on the host system and only generated code is // executed in the simulator. Since the host is typically IA32 we will not -// get the correct ARM-like behaviour on unaligned accesses. +// get the correct ARM-like behaviour on unaligned accesses for those ARM +// targets that don't support unaligned loads and stores. + int Simulator::ReadW(int32_t addr, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + intptr_t* ptr = reinterpret_cast(addr); + return *ptr; +#else if ((addr & 3) == 0) { intptr_t* ptr = reinterpret_cast(addr); return *ptr; } - PrintF("Unaligned read at 0x%08x\n", addr); + PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); return 0; +#endif } void Simulator::WriteW(int32_t addr, int value, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + intptr_t* ptr = reinterpret_cast(addr); + *ptr = value; + return; +#else if ((addr & 3) == 0) { intptr_t* ptr = reinterpret_cast(addr); *ptr = value; @@ -892,10 +914,15 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) { } PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); +#endif } uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + uint16_t* ptr = reinterpret_cast(addr); + return *ptr; +#else if ((addr & 1) == 0) { uint16_t* ptr = reinterpret_cast(addr); return *ptr; @@ -903,10 +930,15 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) { PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); return 0; +#endif } int16_t Simulator::ReadH(int32_t addr, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + int16_t* ptr = reinterpret_cast(addr); + return *ptr; +#else if ((addr & 1) == 0) { int16_t* ptr = reinterpret_cast(addr); return *ptr; @@ -914,10 +946,16 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) { PrintF("Unaligned signed halfword read at 0x%08x\n", addr); UNIMPLEMENTED(); return 0; +#endif } void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + uint16_t* ptr = reinterpret_cast(addr); + *ptr = value; + return; +#else if ((addr & 1) == 0) { uint16_t* ptr = reinterpret_cast(addr); *ptr = value; @@ -925,10 +963,16 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) { } PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); +#endif } void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) { +#if V8_TARGET_CAN_READ_UNALIGNED + int16_t* ptr = reinterpret_cast(addr); + *ptr = value; + return; +#else if ((addr & 1) == 0) { int16_t* ptr = reinterpret_cast(addr); *ptr = value; @@ -936,6 +980,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) { } PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); +#endif } @@ -963,6 +1008,41 @@ void Simulator::WriteB(int32_t addr, int8_t value) { } +int32_t* Simulator::ReadDW(int32_t addr) { +#if V8_TARGET_CAN_READ_UNALIGNED + int32_t* ptr = reinterpret_cast(addr); + return ptr; +#else + if ((addr & 3) == 0) { + int32_t* ptr = reinterpret_cast(addr); + return ptr; + } + PrintF("Unaligned read at 0x%08x\n", addr); + UNIMPLEMENTED(); + return 0; +#endif +} + + +void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { +#if V8_TARGET_CAN_READ_UNALIGNED + int32_t* ptr = reinterpret_cast(addr); + *ptr++ = value1; + *ptr = value2; + return; +#else + if ((addr & 3) == 0) { + int32_t* ptr = reinterpret_cast(addr); + *ptr++ = value1; + *ptr = value2; + return; + } + PrintF("Unaligned write at 0x%08x\n", addr); + UNIMPLEMENTED(); +#endif +} + + // Returns the limit of the stack area to enable checking for stack overflows. uintptr_t Simulator::StackLimit() const { // Leave a safety margin of 256 bytes to prevent overrunning the stack when @@ -1590,7 +1670,19 @@ void Simulator::DecodeType01(Instr* instr) { } } } - if (instr->HasH()) { + if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) { + ASSERT((rd % 2) == 0); + if (instr->HasH()) { + // The strd instruction. + int32_t value1 = get_register(rd); + int32_t value2 = get_register(rd+1); + WriteDW(addr, value1, value2); + } else { + // The ldrd instruction. + int* rn_data = ReadDW(addr); + set_dw_register(rd, rn_data); + } + } else if (instr->HasH()) { if (instr->HasSign()) { if (instr->HasL()) { int16_t val = ReadH(addr, instr); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 91614ea2d7..61af3aa6e0 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -159,6 +159,7 @@ class Simulator { // instruction. void set_register(int reg, int32_t value); int32_t get_register(int reg) const; + void set_dw_register(int dreg, const int* dbl); // Support for VFP. void set_s_register(int reg, unsigned int value); @@ -252,6 +253,9 @@ class Simulator { inline int ReadW(int32_t addr, Instr* instr); inline void WriteW(int32_t addr, int value, Instr* instr); + int32_t* ReadDW(int32_t addr); + void WriteDW(int32_t addr, int32_t value1, int32_t value2); + // Executing is handled based on the instruction type. void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one void DecodeType2(Instr* instr); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 095631d642..877354ccae 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1121,11 +1121,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } @@ -1175,11 +1171,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } @@ -1194,9 +1186,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, // ----------------------------------- SharedFunctionInfo* function_info = function->shared(); if (function_info->HasCustomCallGenerator()) { - CustomCallGenerator generator = - ToCData(function_info->function_data()); - Object* result = generator(this, object, holder, function, name, check); + const int id = function_info->custom_call_generator_id(); + Object* result = + CompileCustomCall(id, object, holder, function, name, check); // undefined means bail out to regular compiler. if (!result->IsUndefined()) { return result; @@ -1334,11 +1326,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } @@ -1825,8 +1813,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1834,7 +1821,6 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1850,8 +1836,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1860,7 +1845,6 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ b(ne, &miss); Failure* failure = Failure::InternalError(); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, callback, name, &miss, &failure); if (!success) return failure; @@ -1879,8 +1863,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1888,7 +1871,6 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1904,8 +1886,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1915,7 +1896,6 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, LookupResult lookup; LookupPostInterceptor(holder, name, &lookup); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadInterceptor(receiver, holder, &lookup, @@ -1936,8 +1916,7 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1945,7 +1924,6 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadArrayLength(masm(), r1, r2, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1958,8 +1936,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); @@ -1968,7 +1945,6 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadStringLength(masm(), r1, r2, r3, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); @@ -1984,8 +1960,7 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -2085,7 +2060,7 @@ Object* ConstructStubCompiler::CompileConstructStub( r5, r6, &generic_stub_call, - NO_ALLOCATION_FLAGS); + SIZE_IN_WORDS); // Allocated the JSObject, now initialize the fields. Map is set to initial // map and properties and elements are set to empty fixed array. diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index bf5cff2998..0ec6e203d2 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -323,7 +323,8 @@ void VirtualFrame::CallStoreIC(Handle name, bool is_contextual) { void VirtualFrame::CallKeyedLoadIC() { Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); - SpillAllButCopyTOSToR0(); + PopToR1R0(); + SpillAll(); CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); } @@ -505,21 +506,25 @@ void VirtualFrame::Dup() { break; case R0_TOS: __ mov(r1, r0); + // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. top_of_stack_state_ = R0_R1_TOS; break; case R1_TOS: __ mov(r0, r1); + // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. top_of_stack_state_ = R0_R1_TOS; break; case R0_R1_TOS: __ push(r1); __ mov(r1, r0); - // No need to change state as r0 and r1 now contains the same value. + // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; break; case R1_R0_TOS: __ push(r0); __ mov(r0, r1); - // No need to change state as r0 and r1 now contains the same value. + // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; break; default: UNREACHABLE(); @@ -528,6 +533,45 @@ void VirtualFrame::Dup() { } +void VirtualFrame::Dup2() { + if (SpilledScope::is_spilled()) { + __ ldr(ip, MemOperand(sp, kPointerSize)); + __ push(ip); + __ ldr(ip, MemOperand(sp, kPointerSize)); + __ push(ip); + } else { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_TOS: + __ push(r0); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ push(r1); + __ ldr(r0, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R1_R0_TOS; + break; + case R0_R1_TOS: + __ Push(r1, r0); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_R0_TOS: + __ Push(r0, r1); + top_of_stack_state_ = R1_R0_TOS; + break; + default: + UNREACHABLE(); + } + } + element_count_ += 2; +} + + Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { ASSERT(but_not_to_this_one.is(r0) || but_not_to_this_one.is(r1) || diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index 77bc70ec33..b255929e2b 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -316,8 +316,8 @@ class VirtualFrame : public ZoneObject { // Result is returned in r0. void CallStoreIC(Handle name, bool is_contextual); - // Call keyed load IC. Key and receiver are on the stack. Result is returned - // in r0. + // Call keyed load IC. Key and receiver are on the stack. Both are consumed. + // Result is returned in r0. void CallKeyedLoadIC(); // Call keyed store IC. Key and receiver are on the stack and the value is in @@ -355,6 +355,9 @@ class VirtualFrame : public ZoneObject { // Duplicate the top of stack. void Dup(); + // Duplicate the two elements on top of stack. + void Dup2(); + // Flushes all registers, but it puts a copy of the top-of-stack in r0. void SpillAllButCopyTOSToR0(); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ac03c200c2..87f363b0c0 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -670,16 +670,6 @@ ExternalReference ExternalReference::scheduled_exception_address() { } -ExternalReference ExternalReference::compile_array_pop_call() { - return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall)); -} - - -ExternalReference ExternalReference::compile_array_push_call() { - return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall)); -} - - #ifndef V8_INTERPRETED_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state() { diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 03a2f8ea1c..5d03c1f854 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -444,9 +444,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference scheduled_exception_address(); - static ExternalReference compile_array_pop_call(); - static ExternalReference compile_array_push_call(); - Address address() const {return reinterpret_cast
(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 657d0dc3da..df1e98a66b 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -37,6 +37,7 @@ #include "macro-assembler.h" #include "natives.h" #include "snapshot.h" +#include "stub-cache.h" namespace v8 { namespace internal { @@ -228,6 +229,7 @@ class Genesis BASE_EMBEDDED { // Used for creating a context from scratch. void InstallNativeFunctions(); bool InstallNatives(); + void InstallCustomCallGenerators(); void InstallJSFunctionResultCaches(); // Used both for deserialized and from-scratch contexts to add the extensions // provided. @@ -1229,6 +1231,8 @@ bool Genesis::InstallNatives() { InstallNativeFunctions(); + InstallCustomCallGenerators(); + // Install Function.prototype.call and apply. { Handle key = Factory::function_class_symbol(); Handle function = @@ -1326,6 +1330,29 @@ bool Genesis::InstallNatives() { } +static void InstallCustomCallGenerator(Handle holder_function, + const char* function_name, + int id) { + Handle proto(JSObject::cast(holder_function->instance_prototype())); + Handle name = Factory::LookupAsciiSymbol(function_name); + Handle function(JSFunction::cast(proto->GetProperty(*name))); + function->shared()->set_function_data(Smi::FromInt(id)); +} + + +void Genesis::InstallCustomCallGenerators() { + HandleScope scope; +#define INSTALL_CALL_GENERATOR(holder_fun, fun_name, name) \ + { \ + Handle holder(global_context()->holder_fun##_function()); \ + const int id = CallStubCompiler::k##name##CallGenerator; \ + InstallCustomCallGenerator(holder, #fun_name, id); \ + } + CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR) +#undef INSTALL_CALL_GENERATOR +} + + // Do not forget to update macros.py with named constant // of cache id. #define JSFUNCTION_RESULT_CACHE_LIST(F) \ diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 66b8ff478e..2b789e28ea 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -80,10 +80,6 @@ class Bootstrapper : public AllStatic { // Tells whether bootstrapping is active. static bool IsActive() { return BootstrapperActive::IsActive(); } - // Encoding/decoding support for fixup flags. - class FixupFlagsUseCodeObject: public BitField {}; - class FixupFlagsArgumentsCount: public BitField {}; - // Support for thread preemption. static int ArchiveSpacePerThread(); static char* ArchiveState(char* to); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index e6cbd94f8c..4971275792 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -377,7 +377,7 @@ static Object* CallJsBuiltin(const char* name, name); ASSERT(js_builtin->IsJSFunction()); Handle function(Handle::cast(js_builtin)); - Vector argv(Vector::New(args.length() - 1)); + ScopedVector argv(args.length() - 1); int n_args = args.length() - 1; for (int i = 0; i < n_args; i++) { argv[i] = args.at(i + 1).location(); @@ -388,7 +388,6 @@ static Object* CallJsBuiltin(const char* name, n_args, argv.start(), &pending_exception); - argv.Dispose(); if (pending_exception) return Failure::Exception(); return *result; } diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 66faae8625..1e2bb20c4f 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -31,8 +31,8 @@ #include "v8.h" #include "conversions-inl.h" +#include "dtoa.h" #include "factory.h" -#include "fast-dtoa.h" #include "scanner.h" namespace v8 { @@ -766,15 +766,16 @@ const char* DoubleToCString(double v, Vector buffer) { default: { int decimal_point; int sign; - char* decimal_rep; bool used_gay_dtoa = false; - const int kFastDtoaBufferCapacity = kFastDtoaMaximalLength + 1; - char fast_dtoa_buffer[kFastDtoaBufferCapacity]; + const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1; + char v8_dtoa_buffer[kV8DtoaBufferCapacity]; int length; - if (FastDtoa(v, Vector(fast_dtoa_buffer, kFastDtoaBufferCapacity), - &sign, &length, &decimal_point)) { - decimal_rep = fast_dtoa_buffer; + + if (DoubleToAscii(v, DTOA_SHORTEST, 0, + Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity), + &sign, &length, &decimal_point)) { + decimal_rep = v8_dtoa_buffer; } else { decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL); used_gay_dtoa = true; @@ -842,7 +843,11 @@ const char* IntToCString(int n, Vector buffer) { char* DoubleToFixedCString(double value, int f) { + const int kMaxDigitsBeforePoint = 20; + const double kFirstNonFixed = 1e21; + const int kMaxDigitsAfterPoint = 20; ASSERT(f >= 0); + ASSERT(f <= kMaxDigitsAfterPoint); bool negative = false; double abs_value = value; @@ -851,7 +856,9 @@ char* DoubleToFixedCString(double value, int f) { negative = true; } - if (abs_value >= 1e21) { + // If abs_value has more than kMaxDigitsBeforePoint digits before the point + // use the non-fixed conversion routine. + if (abs_value >= kFirstNonFixed) { char arr[100]; Vector buffer(arr, ARRAY_SIZE(arr)); return StrDup(DoubleToCString(value, buffer)); @@ -860,8 +867,16 @@ char* DoubleToFixedCString(double value, int f) { // Find a sufficiently precise decimal representation of n. int decimal_point; int sign; - char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL); - int decimal_rep_length = StrLength(decimal_rep); + // Add space for the '.' and the '\0' byte. + const int kDecimalRepCapacity = + kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 2; + char decimal_rep[kDecimalRepCapacity]; + int decimal_rep_length; + bool status = DoubleToAscii(value, DTOA_FIXED, f, + Vector(decimal_rep, kDecimalRepCapacity), + &sign, &decimal_rep_length, &decimal_point); + USE(status); + ASSERT(status); // Create a representation that is padded with zeros if needed. int zero_prefix_length = 0; @@ -884,7 +899,6 @@ char* DoubleToFixedCString(double value, int f) { rep_builder.AddString(decimal_rep); rep_builder.AddPadding('0', zero_postfix_length); char* rep = rep_builder.Finalize(); - freedtoa(decimal_rep); // Create the result string by appending a minus and putting in a // decimal point if needed. diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc index e68532f686..6d8048876f 100644 --- a/deps/v8/src/dateparser.cc +++ b/deps/v8/src/dateparser.cc @@ -33,14 +33,10 @@ namespace v8 { namespace internal { bool DateParser::DayComposer::Write(FixedArray* output) { - // Set year to 0 by default. - if (index_ < 1) { - comp_[index_++] = 1; - } - + if (index_ < 1) return false; // Day and month defaults to 1. while (index_ < kSize) { - comp_[index_++] = 1; + comp_[index_++] = 1; } int year = 0; // Default year is 0 (=> 2000) for KJS compatibility. @@ -48,7 +44,6 @@ bool DateParser::DayComposer::Write(FixedArray* output) { int day = kNone; if (named_month_ == kNone) { - if (index_ < 2) return false; if (index_ == 3 && !IsDay(comp_[0])) { // YMD year = comp_[0]; @@ -62,7 +57,6 @@ bool DateParser::DayComposer::Write(FixedArray* output) { } } else { month = named_month_; - if (index_ < 1) return false; if (index_ == 1) { // MD or DM day = comp_[0]; diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index 41151d8076..e2d9304338 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -181,15 +181,15 @@ void DebuggerAgentSession::Run() { buf.GetNext(); len++; } - int16_t* temp = NewArray(len + 1); + ScopedVector temp(len + 1); buf.Reset(*message, StrLength(*message)); for (int i = 0; i < len; i++) { temp[i] = buf.GetNext(); } // Send the request received to the debugger. - v8::Debug::SendCommand(reinterpret_cast(temp), len); - DeleteArray(temp); + v8::Debug::SendCommand(reinterpret_cast(temp.start()), + len); } } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 729f0ab304..bf1f893b7d 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -52,14 +52,13 @@ namespace internal { #ifdef ENABLE_DEBUGGER_SUPPORT static void PrintLn(v8::Local value) { v8::Local s = value->ToString(); - char* data = NewArray(s->Length() + 1); - if (data == NULL) { + ScopedVector data(s->Length() + 1); + if (data.start() == NULL) { V8::FatalProcessOutOfMemory("PrintLn"); return; } - s->WriteAscii(data); - PrintF("%s\n", data); - DeleteArray(data); + s->WriteAscii(data.start()); + PrintF("%s\n", data.start()); } @@ -431,8 +430,13 @@ void BreakLocationIterator::SetDebugBreakAtIC() { // is set the patching performed by the runtime system will take place in // the code copy and will therefore have no effect on the running code // keeping it from using the inlined code. - if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc()); - if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc()); + if (code->is_keyed_load_stub()) { + KeyedLoadIC::ClearInlinedVersion(pc()); + } else if (code->is_keyed_store_stub()) { + KeyedStoreIC::ClearInlinedVersion(pc()); + } else if (code->is_load_stub()) { + LoadIC::ClearInlinedVersion(pc()); + } } } diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc new file mode 100644 index 0000000000..e3dcbf2d61 --- /dev/null +++ b/deps/v8/src/dtoa.cc @@ -0,0 +1,77 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "v8.h" +#include "dtoa.h" + +#include "double.h" +#include "fast-dtoa.h" +#include "fixed-dtoa.h" + +namespace v8 { +namespace internal { + +bool DoubleToAscii(double v, DtoaMode mode, int requested_digits, + Vector buffer, int* sign, int* length, int* point) { + ASSERT(!Double(v).IsSpecial()); + ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0); + + if (Double(v).Sign() < 0) { + *sign = 1; + v = -v; + } else { + *sign = 0; + } + + if (v == 0) { + buffer[0] = '0'; + buffer[1] = '\0'; + *length = 1; + *point = 1; + return true; + } + + if (mode == DTOA_PRECISION && requested_digits == 0) { + buffer[0] = '\0'; + *length = 0; + return true; + } + + switch (mode) { + case DTOA_SHORTEST: + return FastDtoa(v, buffer, length, point); + case DTOA_FIXED: + return FastFixedDtoa(v, requested_digits, buffer, length, point); + default: + break; + } + return false; +} + +} } // namespace v8::internal diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/dtoa.h new file mode 100644 index 0000000000..be0d5456b2 --- /dev/null +++ b/deps/v8/src/dtoa.h @@ -0,0 +1,81 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_DTOA_H_ +#define V8_DTOA_H_ + +namespace v8 { +namespace internal { + +enum DtoaMode { + // 0.9999999999999999 becomes 0.1 + DTOA_SHORTEST, + // Fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + DTOA_FIXED, + // Fixed number of digits (independent of the decimal point). + DTOA_PRECISION +}; + +// The maximal length of digits a double can have in base 10. +// Note that DoubleToAscii null-terminates its input. So the given buffer should +// be at least kBase10MaximalLength + 1 characters long. +static const int kBase10MaximalLength = 17; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the one farther away +// from 0 is chosen (halfway cases - ending with 5 - are rounded up). +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// toFixed(0.15, 2) thus returns buffer="2", point=0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded away from 0. +// 'DoubleToAscii' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +bool DoubleToAscii(double v, DtoaMode mode, int requested_digits, + Vector buffer, int* sign, int* length, int* point); + +} } // namespace v8::internal + +#endif // V8_DTOA_H_ diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc index 4c0d15d68a..b4b7be053f 100644 --- a/deps/v8/src/fast-dtoa.cc +++ b/deps/v8/src/fast-dtoa.cc @@ -314,7 +314,7 @@ static void BiggestPowerTen(uint32_t number, // w's fractional part is therefore 0x567890abcdef. // Printing w's integral part is easy (simply print 0x1234 in decimal). // In order to print its fraction we repeatedly multiply the fraction by 10 and -// get each digit. Example the first digit after the comma would be computed by +// get each digit. Example the first digit after the point would be computed by // (0x567890abcdef * 10) >> 48. -> 3 // The whole thing becomes slightly more complicated because we want to stop // once we have enough digits. That is, once the digits inside the buffer @@ -490,18 +490,11 @@ bool grisu3(double v, Vector buffer, int* length, int* decimal_exponent) { bool FastDtoa(double v, Vector buffer, - int* sign, int* length, int* point) { - ASSERT(v != 0); + ASSERT(v > 0); ASSERT(!Double(v).IsSpecial()); - if (v < 0) { - v = -v; - *sign = 1; - } else { - *sign = 0; - } int decimal_exponent; bool result = grisu3(v, buffer, length, &decimal_exponent); *point = *length + decimal_exponent; diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h index 9f1f76aeb7..4403a75029 100644 --- a/deps/v8/src/fast-dtoa.h +++ b/deps/v8/src/fast-dtoa.h @@ -36,7 +36,7 @@ namespace internal { static const int kFastDtoaMaximalLength = 17; // Provides a decimal representation of v. -// v must not be (positive or negative) zero and it must not be Infinity or NaN. +// v must be a strictly positive finite double. // Returns true if it succeeds, otherwise the result can not be trusted. // There will be *length digits inside the buffer followed by a null terminator. // If the function returns true then @@ -50,7 +50,6 @@ static const int kFastDtoaMaximalLength = 17; // otherwise. bool FastDtoa(double d, Vector buffer, - int* sign, int* length, int* point); diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc new file mode 100644 index 0000000000..8ad88f6528 --- /dev/null +++ b/deps/v8/src/fixed-dtoa.cc @@ -0,0 +1,405 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "v8.h" + +#include "double.h" +#include "fixed-dtoa.h" + +namespace v8 { +namespace internal { + +// Represents a 128bit type. This class should be replaced by a native type on +// platforms that support 128bit integers. +class UInt128 { + public: + UInt128() : high_bits_(0), low_bits_(0) { } + UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { } + + void Multiply(uint32_t multiplicand) { + uint64_t accumulator; + + accumulator = (low_bits_ & kMask32) * multiplicand; + uint32_t part = static_cast(accumulator & kMask32); + accumulator >>= 32; + accumulator = accumulator + (low_bits_ >> 32) * multiplicand; + low_bits_ = (accumulator << 32) + part; + accumulator >>= 32; + accumulator = accumulator + (high_bits_ & kMask32) * multiplicand; + part = static_cast(accumulator & kMask32); + accumulator >>= 32; + accumulator = accumulator + (high_bits_ >> 32) * multiplicand; + high_bits_ = (accumulator << 32) + part; + ASSERT((accumulator >> 32) == 0); + } + + void Shift(int shift_amount) { + ASSERT(-64 <= shift_amount && shift_amount <= 64); + if (shift_amount == 0) { + return; + } else if (shift_amount == -64) { + high_bits_ = low_bits_; + low_bits_ = 0; + } else if (shift_amount == 64) { + low_bits_ = high_bits_; + high_bits_ = 0; + } else if (shift_amount <= 0) { + high_bits_ <<= -shift_amount; + high_bits_ += low_bits_ >> (64 + shift_amount); + low_bits_ <<= -shift_amount; + } else { + low_bits_ >>= shift_amount; + low_bits_ += high_bits_ << (64 - shift_amount); + high_bits_ >>= shift_amount; + } + } + + // Modifies *this to *this MOD (2^power). + // Returns *this DIV (2^power). + int DivModPowerOf2(int power) { + if (power >= 64) { + int result = static_cast(high_bits_ >> (power - 64)); + high_bits_ -= static_cast(result) << (power - 64); + return result; + } else { + uint64_t part_low = low_bits_ >> power; + uint64_t part_high = high_bits_ << (64 - power); + int result = static_cast(part_low + part_high); + high_bits_ = 0; + low_bits_ -= part_low << power; + return result; + } + } + + bool IsZero() const { + return high_bits_ == 0 && low_bits_ == 0; + } + + int BitAt(int position) { + if (position >= 64) { + return static_cast(high_bits_ >> (position - 64)) & 1; + } else { + return static_cast(low_bits_ >> position) & 1; + } + } + + private: + static const uint64_t kMask32 = 0xFFFFFFFF; + // Value == (high_bits_ << 64) + low_bits_ + uint64_t high_bits_; + uint64_t low_bits_; +}; + + +static const int kDoubleSignificandSize = 53; // Includes the hidden bit. + + +static void FillDigits32FixedLength(uint32_t number, int requested_length, + Vector buffer, int* length) { + for (int i = requested_length - 1; i >= 0; --i) { + buffer[(*length) + i] = '0' + number % 10; + number /= 10; + } + *length += requested_length; +} + + +static void FillDigits32(uint32_t number, Vector buffer, int* length) { + int number_length = 0; + // We fill the digits in reverse order and exchange them afterwards. + while (number != 0) { + int digit = number % 10; + number /= 10; + buffer[(*length) + number_length] = '0' + digit; + number_length++; + } + // Exchange the digits. + int i = *length; + int j = *length + number_length - 1; + while (i < j) { + char tmp = buffer[i]; + buffer[i] = buffer[j]; + buffer[j] = tmp; + i++; + j--; + } + *length += number_length; +} + + +static void FillDigits64FixedLength(uint64_t number, int requested_length, + Vector buffer, int* length) { + const uint32_t kTen7 = 10000000; + // For efficiency cut the number into 3 uint32_t parts, and print those. + uint32_t part2 = static_cast(number % kTen7); + number /= kTen7; + uint32_t part1 = static_cast(number % kTen7); + uint32_t part0 = static_cast(number / kTen7); + + FillDigits32FixedLength(part0, 3, buffer, length); + FillDigits32FixedLength(part1, 7, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); +} + + +static void FillDigits64(uint64_t number, Vector buffer, int* length) { + const uint32_t kTen7 = 10000000; + // For efficiency cut the number into 3 uint32_t parts, and print those. + uint32_t part2 = static_cast(number % kTen7); + number /= kTen7; + uint32_t part1 = static_cast(number % kTen7); + uint32_t part0 = static_cast(number / kTen7); + + if (part0 != 0) { + FillDigits32(part0, buffer, length); + FillDigits32FixedLength(part1, 7, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); + } else if (part1 != 0) { + FillDigits32(part1, buffer, length); + FillDigits32FixedLength(part2, 7, buffer, length); + } else { + FillDigits32(part2, buffer, length); + } +} + + +static void RoundUp(Vector buffer, int* length, int* decimal_point) { + // An empty buffer represents 0. + if (*length == 0) { + buffer[0] = '1'; + *decimal_point = 1; + *length = 1; + return; + } + // Round the last digit until we either have a digit that was not '9' or until + // we reached the first digit. + buffer[(*length) - 1]++; + for (int i = (*length) - 1; i > 0; --i) { + if (buffer[i] != '0' + 10) { + return; + } + buffer[i] = '0'; + buffer[i - 1]++; + } + // If the first digit is now '0' + 10, we would need to set it to '0' and add + // a '1' in front. However we reach the first digit only if all following + // digits had been '9' before rounding up. Now all trailing digits are '0' and + // we simply switch the first digit to '1' and update the decimal-point + // (indicating that the point is now one digit to the right). + if (buffer[0] == '0' + 10) { + buffer[0] = '1'; + (*decimal_point)++; + } +} + + +// The given fractionals number represents a fixed-point number with binary +// point at bit (-exponent). +// Preconditions: +// -128 <= exponent <= 0. +// 0 <= fractionals * 2^exponent < 1 +// The buffer holds the result. +// The function will round its result. During the rounding-process digits not +// generated by this function might be updated, and the decimal-point variable +// might be updated. If this function generates the digits 99 and the buffer +// already contained "199" (thus yielding a buffer of "19999") then a +// rounding-up will change the contents of the buffer to "20000". +static void FillFractionals(uint64_t fractionals, int exponent, + int fractional_count, Vector buffer, + int* length, int* decimal_point) { + ASSERT(-128 <= exponent && exponent <= 0); + // 'fractionals' is a fixed-point number, with binary point at bit + // (-exponent). Inside the function the non-converted remainder of fractionals + // is a fixed-point number, with binary point at bit 'point'. + if (-exponent <= 64) { + // One 64 bit number is sufficient. + ASSERT(fractionals >> 56 == 0); + int point = -exponent; + for (int i = 0; i < fractional_count; ++i) { + if (fractionals == 0) break; + // Instead of multiplying by 10 we multiply by 5 and adjust the point + // location. This way the fractionals variable will not overflow. + // Invariant at the beginning of the loop: fractionals < 2^point. + // Initially we have: point <= 64 and fractionals < 2^56 + // After each iteration the point is decremented by one. + // Note that 5^3 = 125 < 128 = 2^7. + // Therefore three iterations of this loop will not overflow fractionals + // (even without the subtraction at the end of the loop body). At this + // time point will satisfy point <= 61 and therefore fractionals < 2^point + // and any further multiplication of fractionals by 5 will not overflow. + fractionals *= 5; + point--; + int digit = static_cast(fractionals >> point); + buffer[*length] = '0' + digit; + (*length)++; + fractionals -= static_cast(digit) << point; + } + // If the first bit after the point is set we have to round up. + if (((fractionals >> (point - 1)) & 1) == 1) { + RoundUp(buffer, length, decimal_point); + } + } else { // We need 128 bits. + ASSERT(64 < -exponent && -exponent <= 128); + UInt128 fractionals128 = UInt128(fractionals, 0); + fractionals128.Shift(-exponent - 64); + int point = 128; + for (int i = 0; i < fractional_count; ++i) { + if (fractionals128.IsZero()) break; + // As before: instead of multiplying by 10 we multiply by 5 and adjust the + // point location. + // This multiplication will not overflow for the same reasons as before. + fractionals128.Multiply(5); + point--; + int digit = fractionals128.DivModPowerOf2(point); + buffer[*length] = '0' + digit; + (*length)++; + } + if (fractionals128.BitAt(point - 1) == 1) { + RoundUp(buffer, length, decimal_point); + } + } +} + + +// Removes leading and trailing zeros. +// If leading zeros are removed then the decimal point position is adjusted. +static void TrimZeros(Vector buffer, int* length, int* decimal_point) { + while (*length > 0 && buffer[(*length) - 1] == '0') { + (*length)--; + } + int first_non_zero = 0; + while (first_non_zero < *length && buffer[first_non_zero] == '0') { + first_non_zero++; + } + if (first_non_zero != 0) { + for (int i = first_non_zero; i < *length; ++i) { + buffer[i - first_non_zero] = buffer[i]; + } + *length -= first_non_zero; + *decimal_point -= first_non_zero; + } +} + + +bool FastFixedDtoa(double v, + int fractional_count, + Vector buffer, + int* length, + int* decimal_point) { + const uint32_t kMaxUInt32 = 0xFFFFFFFF; + uint64_t significand = Double(v).Significand(); + int exponent = Double(v).Exponent(); + // v = significand * 2^exponent (with significand a 53bit integer). + // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we + // don't know how to compute the representation. 2^73 ~= 9.5*10^21. + // If necessary this limit could probably be increased, but we don't need + // more. + if (exponent > 20) return false; + if (fractional_count > 20) return false; + *length = 0; + // At most kDoubleSignificandSize bits of the significand are non-zero. + // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero + // bits: 0..11*..0xxx..53*..xx + if (exponent + kDoubleSignificandSize > 64) { + // The exponent must be > 11. + // + // We know that v = significand * 2^exponent. + // And the exponent > 11. + // We simplify the task by dividing v by 10^17. + // The quotient delivers the first digits, and the remainder fits into a 64 + // bit number. + // Dividing by 10^17 is equivalent to dividing by 5^17*2^17. + const uint64_t kFive17 = V8_2PART_UINT64_C(0xB1, A2BC2EC5); // 5^17 + uint64_t divisor = kFive17; + int divisor_power = 17; + uint64_t dividend = significand; + uint32_t quotient; + uint64_t remainder; + // Let v = f * 2^e with f == significand and e == exponent. + // Then need q (quotient) and r (remainder) as follows: + // v = q * 10^17 + r + // f * 2^e = q * 10^17 + r + // f * 2^e = q * 5^17 * 2^17 + r + // If e > 17 then + // f * 2^(e-17) = q * 5^17 + r/2^17 + // else + // f = q * 5^17 * 2^(17-e) + r/2^e + if (exponent > divisor_power) { + // We only allow exponents of up to 20 and therefore (17 - e) <= 3 + dividend <<= exponent - divisor_power; + quotient = static_cast(dividend / divisor); + remainder = (dividend % divisor) << divisor_power; + } else { + divisor <<= divisor_power - exponent; + quotient = static_cast(dividend / divisor); + remainder = (dividend % divisor) << exponent; + } + FillDigits32(quotient, buffer, length); + FillDigits64FixedLength(remainder, divisor_power, buffer, length); + *decimal_point = *length; + } else if (exponent >= 0) { + // 0 <= exponent <= 11 + significand <<= exponent; + FillDigits64(significand, buffer, length); + *decimal_point = *length; + } else if (exponent > -kDoubleSignificandSize) { + // We have to cut the number. + uint64_t integrals = significand >> -exponent; + uint64_t fractionals = significand - (integrals << -exponent); + if (integrals > kMaxUInt32) { + FillDigits64(integrals, buffer, length); + } else { + FillDigits32(static_cast(integrals), buffer, length); + } + *decimal_point = *length; + FillFractionals(fractionals, exponent, fractional_count, + buffer, length, decimal_point); + } else if (exponent < -128) { + // This configuration (with at most 20 digits) means that all digits must be + // 0. + ASSERT(fractional_count <= 20); + buffer[0] = '\0'; + *length = 0; + *decimal_point = -fractional_count; + } else { + *decimal_point = 0; + FillFractionals(significand, exponent, fractional_count, + buffer, length, decimal_point); + } + TrimZeros(buffer, length, decimal_point); + buffer[*length] = '\0'; + if ((*length) == 0) { + // The string is empty and the decimal_point thus has no importance. Mimick + // Gay's dtoa and and set it to -fractional_count. + *decimal_point = -fractional_count; + } + return true; +} + +} } // namespace v8::internal diff --git a/deps/v8/src/fixed-dtoa.h b/deps/v8/src/fixed-dtoa.h new file mode 100644 index 0000000000..93f826fe84 --- /dev/null +++ b/deps/v8/src/fixed-dtoa.h @@ -0,0 +1,55 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_FIXED_DTOA_H_ +#define V8_FIXED_DTOA_H_ + +namespace v8 { +namespace internal { + +// Produces digits necessary to print a given number with +// 'fractional_count' digits after the decimal point. +// The buffer must be big enough to hold the result plus one terminating null +// character. +// +// The produced digits might be too short in which case the caller has to fill +// the gaps with '0's. +// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and +// decimal_point = -2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// +// This method only works for some parameters. If it can't handle the input it +// returns false. The output is null-terminated when the function succeeds. +bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + +} } // namespace v8::internal + +#endif // V8_FIXED_DTOA_H_ diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc index d444c976fb..bbe6bb720e 100644 --- a/deps/v8/src/flags.cc +++ b/deps/v8/src/flags.cc @@ -470,12 +470,12 @@ static char* SkipBlackSpace(char* p) { // static int FlagList::SetFlagsFromString(const char* str, int len) { // make a 0-terminated copy of str - char* copy0 = NewArray(len + 1); - memcpy(copy0, str, len); + ScopedVector copy0(len + 1); + memcpy(copy0.start(), str, len); copy0[len] = '\0'; // strip leading white space - char* copy = SkipWhiteSpace(copy0); + char* copy = SkipWhiteSpace(copy0.start()); // count the number of 'arguments' int argc = 1; // be compatible with SetFlagsFromCommandLine() @@ -485,7 +485,7 @@ int FlagList::SetFlagsFromString(const char* str, int len) { } // allocate argument array - char** argv = NewArray(argc); + ScopedVector argv(argc); // split the flags string into arguments argc = 1; // be compatible with SetFlagsFromCommandLine() @@ -497,11 +497,7 @@ int FlagList::SetFlagsFromString(const char* str, int len) { } // set the flags - int result = SetFlagsFromCommandLine(&argc, argv, false); - - // cleanup - DeleteArray(argv); - DeleteArray(copy0); + int result = SetFlagsFromCommandLine(&argc, argv.start(), false); return result; } diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 98aaead28b..102244c9ba 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -357,6 +357,7 @@ class StandardFrame: public StackFrame { private: friend class StackFrame; + friend class StackFrameIterator; }; diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index bef5e8ed03..981ea16d72 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -46,6 +46,12 @@ namespace internal { #elif defined(__ARMEL__) #define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_32_BIT 1 +// Some CPU-OS combinations allow unaligned access on ARM. We assume +// that unaligned accesses are not allowed unless the build system +// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero. +#if CAN_USE_UNALIGNED_ACCESSES +#define V8_HOST_CAN_READ_UNALIGNED 1 +#endif #elif defined(_MIPS_ARCH_MIPS32R2) #define V8_HOST_ARCH_MIPS 1 #define V8_HOST_ARCH_32_BIT 1 @@ -73,6 +79,12 @@ namespace internal { #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) #define V8_TARGET_CAN_READ_UNALIGNED 1 #elif V8_TARGET_ARCH_ARM +// Some CPU-OS combinations allow unaligned access on ARM. We assume +// that unaligned accesses are not allowed unless the build system +// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero. +#if CAN_USE_UNALIGNED_ACCESSES +#define V8_TARGET_CAN_READ_UNALIGNED 1 +#endif #elif V8_TARGET_ARCH_MIPS #else #error Target architecture is not supported by v8 diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 193f082f35..0a276ca995 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -674,6 +674,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) { Top::MarkCompactPrologue(is_compacting); ThreadManager::MarkCompactPrologue(is_compacting); + CompletelyClearInstanceofCache(); + if (is_compacting) FlushNumberStringCache(); } @@ -1685,6 +1687,10 @@ bool Heap::CreateInitialObjects() { if (obj->IsFailure()) return false; set_non_monomorphic_cache(NumberDictionary::cast(obj)); + set_instanceof_cache_function(Smi::FromInt(0)); + set_instanceof_cache_map(Smi::FromInt(0)); + set_instanceof_cache_answer(Smi::FromInt(0)); + CreateFixedStubs(); if (InitializeNumberStringCache()->IsFailure()) return false; diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 902fc77ee6..b4af6d9c22 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -93,6 +93,9 @@ class ZoneScopeInfo; V(Map, proxy_map, ProxyMap) \ V(Object, nan_value, NanValue) \ V(Object, minus_zero_value, MinusZeroValue) \ + V(Object, instanceof_cache_function, InstanceofCacheFunction) \ + V(Object, instanceof_cache_map, InstanceofCacheMap) \ + V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ V(String, empty_string, EmptyString) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Map, neander_map, NeanderMap) \ @@ -361,6 +364,11 @@ class Heap : public AllStatic { // Allocates an empty code cache. static Object* AllocateCodeCache(); + // Clear the Instanceof cache (used when a prototype changes). + static void ClearInstanceofCache() { + set_instanceof_cache_function(the_hole_value()); + } + // Allocates and fully initializes a String. There are two String // encodings: ASCII and two byte. One should choose between the three string // allocation functions based on the encoding of the string buffer used to @@ -971,6 +979,8 @@ class Heap : public AllStatic { static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; } + static void ClearJSFunctionResultCaches(); + private: static int reserved_semispace_size_; static int max_semispace_size_; @@ -1171,6 +1181,13 @@ class Heap : public AllStatic { static void MarkCompactPrologue(bool is_compacting); static void MarkCompactEpilogue(bool is_compacting); + // Completely clear the Instanceof cache (to stop it keeping objects alive + // around a GC). + static void CompletelyClearInstanceofCache() { + set_instanceof_cache_map(the_hole_value()); + set_instanceof_cache_function(the_hole_value()); + } + // Helper function used by CopyObject to copy a source object to an // allocated target object and update the forwarding pointer in the source // object. Returns the target object. @@ -1178,8 +1195,6 @@ class Heap : public AllStatic { HeapObject* target, int size); - static void ClearJSFunctionResultCaches(); - #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Record the copy of an object in the NewSpace's statistics. static void RecordCopiedObject(HeapObject* obj); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 83060c1c28..63286a762a 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -829,14 +829,6 @@ void CodeGenerator::LoadReference(Reference* ref) { } -void CodeGenerator::UnloadReference(Reference* ref) { - // Pop a reference from the stack while preserving TOS. - Comment cmnt(masm_, "[ UnloadReference"); - frame_->Nip(ref->size()); - ref->set_unloaded(); -} - - // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and // convert it to a boolean in the condition code register or jump to // 'false_target'/'true_target' as appropriate. @@ -1426,6 +1418,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, Result* left, Result* right, OverwriteMode overwrite_mode) { + // Copy the type info because left and right may be overwritten. + TypeInfo left_type_info = left->type_info(); + TypeInfo right_type_info = right->type_info(); Token::Value op = expr->op(); Result answer; // Special handling of div and mod because they use fixed registers. @@ -1501,8 +1496,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, (op == Token::DIV) ? eax : edx, left->reg(), right->reg(), - left->type_info(), - right->type_info(), + left_type_info, + right_type_info, overwrite_mode); if (left->reg().is(right->reg())) { __ test(left->reg(), Immediate(kSmiTagMask)); @@ -1605,18 +1600,18 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, answer.reg(), left->reg(), ecx, - left->type_info(), - right->type_info(), + left_type_info, + right_type_info, overwrite_mode); Label do_op, left_nonsmi; // If right is a smi we make a fast case if left is either a smi // or a heapnumber. - if (CpuFeatures::IsSupported(SSE2) && right->type_info().IsSmi()) { + if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) { CpuFeatures::Scope use_sse2(SSE2); __ mov(answer.reg(), left->reg()); // Fast case - both are actually smis. - if (!left->type_info().IsSmi()) { + if (!left_type_info.IsSmi()) { __ test(answer.reg(), Immediate(kSmiTagMask)); __ j(not_zero, &left_nonsmi); } else { @@ -1640,7 +1635,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, deferred->Branch(negative); } else { CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(), - left->type_info(), right->type_info(), deferred); + left_type_info, right_type_info, deferred); // Untag both operands. __ mov(answer.reg(), left->reg()); @@ -1713,11 +1708,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, answer.reg(), left->reg(), right->reg(), - left->type_info(), - right->type_info(), + left_type_info, + right_type_info, overwrite_mode); CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(), - left->type_info(), right->type_info(), deferred); + left_type_info, right_type_info, deferred); __ mov(answer.reg(), left->reg()); switch (op) { @@ -1988,18 +1983,13 @@ void DeferredInlineSmiSub::Generate() { } -Result CodeGenerator::ConstantSmiBinaryOperation( - BinaryOperation* expr, - Result* operand, - Handle value, - bool reversed, - OverwriteMode overwrite_mode) { - // NOTE: This is an attempt to inline (a bit) more of the code for - // some possible smi operations (like + and -) when (at least) one - // of the operands is a constant smi. - // Consumes the argument "operand". - // TODO(199): Optimize some special cases of operations involving a - // smi literal (multiply by 2, shift by 0, etc.). +Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, + Result* operand, + Handle value, + bool reversed, + OverwriteMode overwrite_mode) { + // Generate inline code for a binary operation when one of the + // operands is a constant smi. Consumes the argument "operand". if (IsUnsafeSmi(value)) { Result unsafe_operand(value); if (reversed) { @@ -2499,7 +2489,9 @@ void CodeGenerator::Comparison(AstNode* node, // by reconstituting them on the non-fall-through path. if (left_side.is_smi()) { - if (FLAG_debug_code) __ AbortIfNotSmi(left_side.reg()); + if (FLAG_debug_code) { + __ AbortIfNotSmi(left_side.reg()); + } } else { JumpTarget is_smi; __ test(left_side.reg(), Immediate(kSmiTagMask)); @@ -2528,7 +2520,7 @@ void CodeGenerator::Comparison(AstNode* node, __ cvtsi2sd(xmm0, Operand(temp.reg())); temp.Unuse(); } - __ comisd(xmm1, xmm0); + __ ucomisd(xmm1, xmm0); // Jump to builtin for NaN. not_number.Branch(parity_even, &left_side); left_side.Unuse(); @@ -2819,11 +2811,7 @@ void CodeGenerator::Comparison(AstNode* node, // number comparison in the stub if it was inlined. CompareStub stub(cc, strict, nan_info, !inline_number_compare); Result answer = frame_->CallStub(&stub, &left_side, &right_side); - if (cc == equal) { - __ test(answer.reg(), Operand(answer.reg())); - } else { - __ cmp(answer.reg(), 0); - } + __ test(answer.reg(), Operand(answer.reg())); answer.Unuse(); dest->true_target()->Branch(cc); dest->false_target()->Jump(); @@ -4750,7 +4738,8 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads. + // This rules out argument loads because eval forces arguments + // access to be through the arguments object. if (potential_slot != NULL) { // Allocate a fresh register to use as a temp in // ContextSlotOperandCheckExtensions and to hold the result @@ -5774,11 +5763,66 @@ void CodeGenerator::VisitCall(Call* node) { } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { // ---------------------------------- - // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj + // JavaScript examples: + // + // with (obj) foo(1, 2, 3) // foo is in obj + // + // function f() {}; + // function g() { + // eval(...); + // f(); // f could be in extension object + // } // ---------------------------------- - // Load the function from the context. Sync the frame so we can - // push the arguments directly into place. + JumpTarget slow; + JumpTarget done; + + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + Result function; + if (var->mode() == Variable::DYNAMIC_GLOBAL) { + function = LoadFromGlobalSlotCheckExtensions(var->slot(), + NOT_INSIDE_TYPEOF, + &slow); + frame_->Push(&function); + LoadGlobalReceiver(); + done.Jump(); + + } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = var->local_if_not_shadowed()->slot(); + // Only generate the fast case for locals that rewrite to slots. + // This rules out argument loads because eval forces arguments + // access to be through the arguments object. + if (potential_slot != NULL) { + // Allocate a fresh register to use as a temp in + // ContextSlotOperandCheckExtensions and to hold the result + // value. + function = allocator()->Allocate(); + ASSERT(function.is_valid()); + __ mov(function.reg(), + ContextSlotOperandCheckExtensions(potential_slot, + function, + &slow)); + JumpTarget push_function_and_receiver; + if (potential_slot->var()->mode() == Variable::CONST) { + __ cmp(function.reg(), Factory::the_hole_value()); + push_function_and_receiver.Branch(not_equal, &function); + __ mov(function.reg(), Factory::undefined_value()); + } + push_function_and_receiver.Bind(&function); + frame_->Push(&function); + LoadGlobalReceiver(); + done.Jump(); + } + } + + slow.Bind(); + // Enter the runtime system to load the function from the context. + // Sync the frame so we can push the arguments directly into + // place. frame_->SyncRange(0, frame_->element_count() - 1); frame_->EmitPush(esi); frame_->EmitPush(Immediate(var->name())); @@ -5795,6 +5839,7 @@ void CodeGenerator::VisitCall(Call* node) { ASSERT(!allocator()->is_used(edx)); frame_->EmitPush(edx); + done.Bind(); // Call the function. CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); @@ -6582,14 +6627,120 @@ class DeferredSearchCache: public DeferredCode { virtual void Generate(); private: - Register dst_, cache_, key_; + Register dst_; // on invocation Smi index of finger, on exit + // holds value being looked up. + Register cache_; // instance of JSFunctionResultCache. + Register key_; // key being looked up. }; +// Return a position of the element at |index_as_smi| + |additional_offset| +// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi. +static Operand ArrayElement(Register array, + Register index_as_smi, + int additional_offset = 0) { + int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; + return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); +} + + void DeferredSearchCache::Generate() { - __ push(cache_); + Label first_loop, search_further, second_loop, cache_miss; + + // Smi-tagging is equivalent to multiplying by 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + + Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize); + Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex); + + // Check the cache from finger to start of the cache. + __ bind(&first_loop); + __ sub(Operand(dst_), Immediate(kEntrySizeSmi)); + __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi)); + __ j(less, &search_further); + + __ cmp(key_, ArrayElement(cache_, dst_)); + __ j(not_equal, &first_loop); + + __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); + __ mov(dst_, ArrayElement(cache_, dst_, 1)); + __ jmp(exit_label()); + + __ bind(&search_further); + + // Check the cache from end of cache up to finger. + __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset)); + + __ bind(&second_loop); + __ sub(Operand(dst_), Immediate(kEntrySizeSmi)); + // Consider prefetching into some reg. + __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset)); + __ j(less_equal, &cache_miss); + + __ cmp(key_, ArrayElement(cache_, dst_)); + __ j(not_equal, &second_loop); + + __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); + __ mov(dst_, ArrayElement(cache_, dst_, 1)); + __ jmp(exit_label()); + + __ bind(&cache_miss); + __ push(cache_); // store a reference to cache + __ push(key_); // store a key + Handle receiver(Top::global_context()->global()); + __ push(Immediate(receiver)); __ push(key_); - __ CallRuntime(Runtime::kGetFromCache, 2); + // On ia32 function must be in edi. + __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset)); + ParameterCount expected(1); + __ InvokeFunction(edi, expected, CALL_FUNCTION); + + // Find a place to put new cached value into. + Label add_new_entry, update_cache; + __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache + // Possible optimization: cache size is constant for the given cache + // so technically we could use a constant here. However, if we have + // cache miss this optimization would hardly matter much. + + // Check if we could add new entry to cache. + __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset)); + __ SmiTag(ebx); + __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset)); + __ j(greater, &add_new_entry); + + // Check if we could evict entry after finger. + __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset)); + __ add(Operand(edx), Immediate(kEntrySizeSmi)); + __ cmp(ebx, Operand(edx)); + __ j(greater, &update_cache); + + // Need to wrap over the cache. + __ mov(edx, Immediate(kEntriesIndexSmi)); + __ jmp(&update_cache); + + __ bind(&add_new_entry); + __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset)); + __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1)); + __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx); + + // Update the cache itself. + // edx holds the index. + __ bind(&update_cache); + __ pop(ebx); // restore the key + __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx); + // Store key. + __ mov(ArrayElement(ecx, edx), ebx); + __ RecordWrite(ecx, 0, ebx, edx); + + // Store value. + __ pop(ecx); // restore the cache. + __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset)); + __ add(Operand(edx), Immediate(Smi::FromInt(1))); + __ mov(ebx, eax); + __ mov(ArrayElement(ecx, edx), ebx); + __ RecordWrite(ecx, 0, ebx, edx); + if (!dst_.is(eax)) { __ mov(dst_, eax); } @@ -6631,21 +6782,14 @@ void CodeGenerator::GenerateGetFromCache(ZoneList* args) { cache.reg(), key.reg()); - const int kFingerOffset = - FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex); // tmp.reg() now holds finger offset as a smi. ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ mov(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset)); - __ cmp(key.reg(), FieldOperand(cache.reg(), - tmp.reg(), // as smi - times_half_pointer_size, - FixedArray::kHeaderSize)); + __ mov(tmp.reg(), FieldOperand(cache.reg(), + JSFunctionResultCache::kFingerOffset)); + __ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg())); deferred->Branch(not_equal); - __ mov(tmp.reg(), FieldOperand(cache.reg(), - tmp.reg(), // as smi - times_half_pointer_size, - kPointerSize + FixedArray::kHeaderSize)); + __ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1)); deferred->BindExit(); frame_->Push(&tmp); @@ -10958,7 +11102,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // string length. A negative value will be greater (unsigned comparison). __ mov(eax, Operand(esp, kPreviousIndexOffset)); __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); + __ j(not_zero, &runtime); __ cmp(eax, Operand(ebx)); __ j(above_equal, &runtime); @@ -12128,6 +12272,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Get the prototype of the function. __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address + // edx is function, eax is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ ret(2 * kPointerSize); + + __ bind(&miss); __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); // Check that the function prototype is a JS object. @@ -12140,7 +12300,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ cmp(ecx, LAST_JS_OBJECT_TYPE); __ j(greater, &slow, not_taken); - // Register mapping: eax is object map and ebx is function prototype. + // Register mapping: + // eax is object map. + // edx is function. + // ebx is function prototype. + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx); + __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); // Loop through the prototype chain looking for the function prototype. @@ -12156,10 +12324,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ bind(&is_instance); __ Set(eax, Immediate(0)); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); __ ret(2 * kPointerSize); __ bind(&is_not_instance); __ Set(eax, Immediate(Smi::FromInt(1))); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); __ ret(2 * kPointerSize); // Slow-case: Go through the JavaScript implementation. diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 0d3fee5925..5967338da2 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -48,7 +48,7 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; // A reference is a C++ stack-allocated object that puts a // reference on the virtual frame. The reference may be consumed -// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference. +// by GetValue, TakeValue and SetValue. // When the lifetime (scope) of a valid reference ends, it must have // been consumed, and be in state UNLOADED. class Reference BASE_EMBEDDED { @@ -414,7 +414,6 @@ class CodeGenerator: public AstVisitor { // The following are used by class Reference. void LoadReference(Reference* ref); - void UnloadReference(Reference* ref); static Operand ContextOperand(Register context, int index) { return Operand(context, Context::SlotOffset(index)); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index c3a019ba4c..9c8dfb2803 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -33,6 +33,17 @@ namespace v8 { namespace internal { +// Flags used for the AllocateInNewSpace functions. +enum AllocationFlags { + // No special flags. + NO_ALLOCATION_FLAGS = 0, + // Return the pointer to the allocated already tagged as a heap object. + TAG_OBJECT = 1 << 0, + // The content of the result register already contains the allocation top in + // new space. + RESULT_CONTAINS_TOP = 1 << 1 +}; + // Convenience for platform-independent signatures. We do not normally // distinguish memory operands from other operands on ia32. typedef Operand MemOperand; diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 809228c7ec..189c0e4d16 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -1264,16 +1264,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, } __ bind(&miss); - Handle ic = ComputeCallMiss(arguments().immediate()); __ jmp(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } @@ -1351,16 +1346,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, 1); __ bind(&miss); - Handle ic = ComputeCallMiss(arguments().immediate()); __ jmp(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } @@ -1379,9 +1369,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, SharedFunctionInfo* function_info = function->shared(); if (function_info->HasCustomCallGenerator()) { - CustomCallGenerator generator = - ToCData(function_info->function_data()); - Object* result = generator(this, object, holder, function, name, check); + const int id = function_info->custom_call_generator_id(); + Object* result = + CompileCustomCall(id, object, holder, function, name, check); // undefined means bail out to regular compiler. if (!result->IsUndefined()) { return result; @@ -1518,11 +1508,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, __ jmp(ic, RelocInfo::CODE_TARGET); // Return the generated code. - String* function_name = NULL; - if (function->shared()->name()->IsString()) { - function_name = String::cast(function->shared()->name()); - } - return GetCode(CONSTANT_FUNCTION, function_name); + return GetCode(function); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 64c3ec1813..678876df72 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -596,10 +596,16 @@ Object* LoadIC::Load(State state, Handle object, Handle name) { #ifdef DEBUG if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n"); #endif + Map* map = HeapObject::cast(*object)->map(); + if (object->IsString()) { + const int offset = String::kLengthOffset; + PatchInlinedLoad(address(), map, offset); + } + Code* target = NULL; target = Builtins::builtin(Builtins::LoadIC_StringLength); set_target(target); - StubCache::Set(*name, HeapObject::cast(*object)->map(), target); + StubCache::Set(*name, map, target); return Smi::FromInt(String::cast(*object)->length()); } @@ -608,9 +614,13 @@ Object* LoadIC::Load(State state, Handle object, Handle name) { #ifdef DEBUG if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n"); #endif + Map* map = HeapObject::cast(*object)->map(); + const int offset = JSArray::kLengthOffset; + PatchInlinedLoad(address(), map, offset); + Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength); set_target(target); - StubCache::Set(*name, HeapObject::cast(*object)->map(), target); + StubCache::Set(*name, map, target); return JSArray::cast(*object)->length(); } diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 6aae096fc2..a7ff6e671e 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -239,6 +239,9 @@ class LoadIC: public IC { static void GenerateStringLength(MacroAssembler* masm); static void GenerateFunctionPrototype(MacroAssembler* masm); + // Clear the use of the inlined version. + static void ClearInlinedVersion(Address address); + // The offset from the inlined patch site to the start of the // inlined load instruction. It is architecture-dependent, and not // used on ARM. @@ -265,9 +268,6 @@ class LoadIC: public IC { static void Clear(Address address, Code* target); - // Clear the use of the inlined version. - static void ClearInlinedVersion(Address address); - static bool PatchInlinedLoad(Address address, Object* map, int index); friend class IC; diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index e1ebc87c65..891b0e2b82 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -1313,9 +1313,8 @@ void Logger::LogCodeObjects() { void Logger::LogCompiledFunctions() { HandleScope scope; const int compiled_funcs_count = EnumerateCompiledFunctions(NULL); - Handle* sfis = - NewArray< Handle >(compiled_funcs_count); - EnumerateCompiledFunctions(sfis); + ScopedVector< Handle > sfis(compiled_funcs_count); + EnumerateCompiledFunctions(sfis.start()); // During iteration, there can be heap allocation due to // GetScriptLineNumber call. @@ -1360,8 +1359,6 @@ void Logger::LogCompiledFunctions() { Logger::LAZY_COMPILE_TAG, shared->code(), *func_name)); } } - - DeleteArray(sfis); } diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 81e5bf7a40..a21e9604c2 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -50,17 +50,6 @@ enum HandlerType { }; -// Flags used for the AllocateInNewSpace functions. -enum AllocationFlags { - // No special flags. - NO_ALLOCATION_FLAGS = 0, - // Return the pointer to the allocated already tagged as a heap object. - TAG_OBJECT = 1 << 0, - // The content of the result register already contains the allocation top in - // new space. - RESULT_CONTAINS_TOP = 1 << 1 -}; - // Invalid depth in prototype chain. const int kInvalidProtoDepth = -1; diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index de6a362c33..a46af4ac2d 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -42,6 +42,9 @@ var COMPILATION_TYPE_JSON = 2; var kVowelSounds = 0; var kCapitalVowelSounds = 0; +// Matches Messages::kNoLineNumberInfo from v8.h +var kNoLineNumberInfo = 0; + // If this object gets passed to an error constructor the error will // get an accessor for .message that constructs a descriptive error // message on access. @@ -203,9 +206,9 @@ function FormatMessage(message) { function GetLineNumber(message) { - if (message.startPos == -1) return -1; + if (message.startPos == -1) return kNoLineNumberInfo; var location = message.script.locationFromPosition(message.startPos, true); - if (location == null) return -1; + if (location == null) return kNoLineNumberInfo; return location.line + 1; } diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index ae7d2c2a97..ad15104152 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -2530,7 +2530,13 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() { bool SharedFunctionInfo::HasCustomCallGenerator() { - return function_data()->IsProxy(); + return function_data()->IsSmi(); +} + + +int SharedFunctionInfo::custom_call_generator_id() { + ASSERT(HasCustomCallGenerator()); + return Smi::cast(function_data())->value(); } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 459c8aacab..c8acb47071 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -682,11 +682,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. ASSERT(static_cast(this->length()) == resource->length()); - SmartPointer smart_chars(NewArray(this->length())); - String::WriteToFlat(this, *smart_chars, 0, this->length()); - ASSERT(memcmp(*smart_chars, + ScopedVector smart_chars(this->length()); + String::WriteToFlat(this, smart_chars.start(), 0, this->length()); + ASSERT(memcmp(smart_chars.start(), resource->data(), - resource->length() * sizeof(**smart_chars)) == 0); + resource->length() * sizeof(smart_chars[0])) == 0); } #endif // DEBUG @@ -728,11 +728,11 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. ASSERT(static_cast(this->length()) == resource->length()); - SmartPointer smart_chars(NewArray(this->length())); - String::WriteToFlat(this, *smart_chars, 0, this->length()); - ASSERT(memcmp(*smart_chars, + ScopedVector smart_chars(this->length()); + String::WriteToFlat(this, smart_chars.start(), 0, this->length()); + ASSERT(memcmp(smart_chars.start(), resource->data(), - resource->length()*sizeof(**smart_chars)) == 0); + resource->length() * sizeof(smart_chars[0])) == 0); } #endif // DEBUG @@ -4900,6 +4900,7 @@ Object* JSFunction::SetInstancePrototype(Object* value) { // prototype is put into the initial map where it belongs. set_prototype_or_initial_map(value); } + Heap::ClearInstanceofCache(); return value; } @@ -5601,6 +5602,8 @@ Object* JSObject::SetPrototype(Object* value, Map::cast(new_map)->set_prototype(value); real_receiver->set_map(Map::cast(new_map)); + Heap::ClearInstanceofCache(); + return value; } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index dcfb2eedad..8b114a64ff 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -2328,6 +2328,10 @@ class JSFunctionResultCache: public FixedArray { static const int kEntrySize = 2; // key + value + static const int kFactoryOffset = kHeaderSize; + static const int kFingerOffset = kFactoryOffset + kPointerSize; + static const int kCacheSizeOffset = kFingerOffset + kPointerSize; + inline void MakeZeroSize(); inline void Clear(); @@ -3200,7 +3204,7 @@ class SharedFunctionInfo: public HeapObject { // [function data]: This field holds some additional data for function. // Currently it either has FunctionTemplateInfo to make benefit the API - // or Proxy wrapping CustomCallGenerator. + // or Smi identifying a custom call generator. // In the long run we don't want all functions to have this field but // we can fix that when we have a better model for storing hidden data // on objects. @@ -3209,6 +3213,7 @@ class SharedFunctionInfo: public HeapObject { inline bool IsApiFunction(); inline FunctionTemplateInfo* get_api_func_data(); inline bool HasCustomCallGenerator(); + inline int custom_call_generator_id(); // [script info]: Script from which the function originates. DECL_ACCESSORS(script, Object) diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 67e52ce947..b1075cf321 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -286,14 +286,12 @@ void OS::LogSharedLibraryAddresses() { int OS::StackWalk(Vector frames) { int frames_size = frames.length(); - void** addresses = NewArray(frames_size); + ScopedVector addresses(frames_size); - int frames_count = backtrace(addresses, frames_size); + int frames_count = backtrace(addresses.start(), frames_size); - char** symbols; - symbols = backtrace_symbols(addresses, frames_count); + char** symbols = backtrace_symbols(addresses, frames_count); if (symbols == NULL) { - DeleteArray(addresses); return kStackWalkError; } @@ -308,7 +306,6 @@ int OS::StackWalk(Vector frames) { frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; } - DeleteArray(addresses); free(symbols); return frames_count; diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index b28597d034..fca218fe9e 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -376,14 +376,12 @@ int OS::StackWalk(Vector frames) { // backtrace is a glibc extension. #ifdef __GLIBC__ int frames_size = frames.length(); - void** addresses = NewArray(frames_size); + ScopedVector addresses(frames_size); - int frames_count = backtrace(addresses, frames_size); + int frames_count = backtrace(addresses.start(), frames_size); - char** symbols; - symbols = backtrace_symbols(addresses, frames_count); + char** symbols = backtrace_symbols(addresses.start(), frames_count); if (symbols == NULL) { - DeleteArray(addresses); return kStackWalkError; } @@ -398,7 +396,6 @@ int OS::StackWalk(Vector frames) { frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; } - DeleteArray(addresses); free(symbols); return frames_count; diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 551690989f..23747c35f5 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -283,13 +283,12 @@ int OS::StackWalk(Vector frames) { return 0; int frames_size = frames.length(); - void** addresses = NewArray(frames_size); - int frames_count = backtrace(addresses, frames_size); + ScopedVector addresses(frames_size); - char** symbols; - symbols = backtrace_symbols(addresses, frames_count); + int frames_count = backtrace(addresses.start(), frames_size); + + char** symbols = backtrace_symbols(addresses.start(), frames_count); if (symbols == NULL) { - DeleteArray(addresses); return kStackWalkError; } @@ -305,7 +304,6 @@ int OS::StackWalk(Vector frames) { frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; } - DeleteArray(addresses); free(symbols); return frames_count; diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index 1fa652d9ee..0d9547b8bd 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -233,14 +233,12 @@ void OS::LogSharedLibraryAddresses() { int OS::StackWalk(Vector frames) { int frames_size = frames.length(); - void** addresses = NewArray(frames_size); + ScopedVector addresses(frames_size); - int frames_count = backtrace(addresses, frames_size); + int frames_count = backtrace(addresses.start(), frames_size); - char** symbols; - symbols = backtrace_symbols(addresses, frames_count); + char** symbols = backtrace_symbols(addresses.start(), frames_count); if (symbols == NULL) { - DeleteArray(addresses); return kStackWalkError; } @@ -255,7 +253,6 @@ int OS::StackWalk(Vector frames) { frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; } - DeleteArray(addresses); free(symbols); return frames_count; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index d03a0a964e..bee517364a 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -1249,16 +1249,16 @@ int OS::StackWalk(Vector frames) { // Try to locate a symbol for this frame. DWORD64 symbol_displacement; - IMAGEHLP_SYMBOL64* symbol = NULL; - symbol = NewArray(kStackWalkMaxNameLen); - if (!symbol) return kStackWalkError; // Out of memory. - memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen); - symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64); - symbol->MaxNameLength = kStackWalkMaxNameLen; + SmartPointer symbol( + NewArray(kStackWalkMaxNameLen)); + if (symbol.is_empty()) return kStackWalkError; // Out of memory. + memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen); + (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64); + (*symbol)->MaxNameLength = kStackWalkMaxNameLen; ok = _SymGetSymFromAddr64(process_handle, // hProcess stack_frame.AddrPC.Offset, // Address &symbol_displacement, // Displacement - symbol); // Symbol + *symbol); // Symbol if (ok) { // Try to locate more source information for the symbol. IMAGEHLP_LINE64 Line; @@ -1276,13 +1276,13 @@ int OS::StackWalk(Vector frames) { SNPrintF(MutableCStrVector(frames[frames_count].text, kStackWalkMaxTextLen), "%s %s:%d:%d", - symbol->Name, Line.FileName, Line.LineNumber, + (*symbol)->Name, Line.FileName, Line.LineNumber, line_displacement); } else { SNPrintF(MutableCStrVector(frames[frames_count].text, kStackWalkMaxTextLen), "%s", - symbol->Name); + (*symbol)->Name); } // Make sure line termination is in place. frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0'; @@ -1294,11 +1294,9 @@ int OS::StackWalk(Vector frames) { // module will never be found). int err = GetLastError(); if (err != ERROR_MOD_NOT_FOUND) { - DeleteArray(symbol); break; } } - DeleteArray(symbol); frames_count++; } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 823889aced..b421ac7147 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -1325,18 +1325,9 @@ static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) { } -static void SetCustomCallGenerator(Handle function, - ExternalReference* generator) { - if (function->shared()->function_data()->IsUndefined()) { - function->shared()->set_function_data(*FromCData(generator->address())); - } -} - - static Handle InstallBuiltin(Handle holder, const char* name, - Builtins::Name builtin_name, - ExternalReference* generator = NULL) { + Builtins::Name builtin_name) { Handle key = Factory::LookupAsciiSymbol(name); Handle code(Builtins::builtin(builtin_name)); Handle optimized = Factory::NewFunction(key, @@ -1345,44 +1336,18 @@ static Handle InstallBuiltin(Handle holder, code, false); optimized->shared()->DontAdaptArguments(); - if (generator != NULL) { - SetCustomCallGenerator(optimized, generator); - } SetProperty(holder, key, optimized, NONE); return optimized; } -Object* CompileArrayPushCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check) { - return compiler->CompileArrayPushCall(object, holder, function, name, check); -} - - -Object* CompileArrayPopCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check) { - return compiler->CompileArrayPopCall(object, holder, function, name, check); -} - - static Object* Runtime_SpecialArrayFunctions(Arguments args) { HandleScope scope; ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, holder, 0); - ExternalReference pop = ExternalReference::compile_array_pop_call(); - ExternalReference push = ExternalReference::compile_array_push_call(); - - InstallBuiltin(holder, "pop", Builtins::ArrayPop, &pop); - InstallBuiltin(holder, "push", Builtins::ArrayPush, &push); + InstallBuiltin(holder, "pop", Builtins::ArrayPop); + InstallBuiltin(holder, "push", Builtins::ArrayPush); InstallBuiltin(holder, "shift", Builtins::ArrayShift); InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift); InstallBuiltin(holder, "slice", Builtins::ArraySlice); diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 68412677f4..dcaa101155 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -414,44 +414,36 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 19, "compare_doubles"); - Add(ExternalReference::compile_array_pop_call().address(), - UNCLASSIFIED, - 20, - "compile_array_pop"); - Add(ExternalReference::compile_array_push_call().address(), - UNCLASSIFIED, - 21, - "compile_array_push"); #ifndef V8_INTERPRETED_REGEXP Add(ExternalReference::re_case_insensitive_compare_uc16().address(), UNCLASSIFIED, - 22, + 20, "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); Add(ExternalReference::re_check_stack_guard_state().address(), UNCLASSIFIED, - 23, + 21, "RegExpMacroAssembler*::CheckStackGuardState()"); Add(ExternalReference::re_grow_stack().address(), UNCLASSIFIED, - 24, + 22, "NativeRegExpMacroAssembler::GrowStack()"); Add(ExternalReference::re_word_character_map().address(), UNCLASSIFIED, - 25, + 23, "NativeRegExpMacroAssembler::word_character_map"); #endif // V8_INTERPRETED_REGEXP // Keyed lookup cache. Add(ExternalReference::keyed_lookup_cache_keys().address(), UNCLASSIFIED, - 26, + 24, "KeyedLookupCache::keys()"); Add(ExternalReference::keyed_lookup_cache_field_offsets().address(), UNCLASSIFIED, - 27, + 25, "KeyedLookupCache::field_offsets()"); Add(ExternalReference::transcendental_cache_array_address().address(), UNCLASSIFIED, - 28, + 26, "TranscendentalCache::caches()"); } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index f3532533e7..6ebe495f16 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -1142,6 +1142,29 @@ Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) { } +Object* CallStubCompiler::CompileCustomCall(int generator_id, + Object* object, + JSObject* holder, + JSFunction* function, + String* fname, + CheckType check) { + ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators); + switch (generator_id) { +#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \ + case k##name##CallGenerator: \ + return CallStubCompiler::Compile##name##Call(object, \ + holder, \ + function, \ + fname, \ + check); + CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE) +#undef CALL_GENERATOR_CASE + } + UNREACHABLE(); + return Heap::undefined_value(); +} + + Object* CallStubCompiler::GetCode(PropertyType type, String* name) { int argc = arguments_.immediate(); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, @@ -1152,6 +1175,15 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) { } +Object* CallStubCompiler::GetCode(JSFunction* function) { + String* function_name = NULL; + if (function->shared()->name()->IsString()) { + function_name = String::cast(function->shared()->name()); + } + return GetCode(CONSTANT_FUNCTION, function_name); +} + + Object* ConstructStubCompiler::GetCode() { Code::Flags flags = Code::ComputeFlags(Code::STUB); Object* result = GetCodeWithFlags(flags, "ConstructStub"); diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 2e0faf6a89..45aaf75c91 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -559,8 +559,30 @@ class KeyedStoreStubCompiler: public StubCompiler { }; +// List of functions with custom constant call IC stubs. +// +// Installation of custom call generators for the selected builtins is +// handled by the bootstrapper. +// +// Each entry has a name of a global function (lowercased), a name of +// a builtin function on its instance prototype (the one the generator +// is set for), and a name of a generator itself (used to build ids +// and generator function names). +#define CUSTOM_CALL_IC_GENERATORS(V) \ + V(array, push, ArrayPush) \ + V(array, pop, ArrayPop) + + class CallStubCompiler: public StubCompiler { public: + enum { +#define DECLARE_CALL_GENERATOR_ID(ignored1, ignored2, name) \ + k##name##CallGenerator, + CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID) +#undef DECLARE_CALL_GENERATOR_ID + kNumCallGenerators + }; + CallStubCompiler(int argc, InLoopFlag in_loop) : arguments_(argc), in_loop_(in_loop) { } @@ -582,17 +604,22 @@ class CallStubCompiler: public StubCompiler { JSFunction* function, String* name); - Object* CompileArrayPushCall(Object* object, - JSObject* holder, - JSFunction* function, - String* name, - CheckType check); + // Compiles a custom call constant IC using the generator with given id. + Object* CompileCustomCall(int generator_id, + Object* object, + JSObject* holder, + JSFunction* function, + String* name, + CheckType check); - Object* CompileArrayPopCall(Object* object, - JSObject* holder, - JSFunction* function, - String* name, +#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \ + Object* Compile##name##Call(Object* object, \ + JSObject* holder, \ + JSFunction* function, \ + String* fname, \ CheckType check); + CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR) +#undef DECLARE_CALL_GENERATOR private: const ParameterCount arguments_; @@ -601,6 +628,10 @@ class CallStubCompiler: public StubCompiler { const ParameterCount& arguments() { return arguments_; } Object* GetCode(PropertyType type, String* name); + + // Convenience function. Calls GetCode above passing + // CONSTANT_FUNCTION type and the name of the given function. + Object* GetCode(JSFunction* function); }; @@ -663,31 +694,6 @@ class CallOptimization BASE_EMBEDDED { CallHandlerInfo* api_call_info_; }; - -typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check); - - -Object* CompileArrayPushCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check); - - -Object* CompileArrayPopCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check); - - } } // namespace v8::internal #endif // V8_STUB_CACHE_H_ diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc index 2f75c8fc1c..87dc1f61ff 100644 --- a/deps/v8/src/top.cc +++ b/deps/v8/src/top.cc @@ -337,7 +337,7 @@ static int stack_trace_nesting_level = 0; static StringStream* incomplete_message = NULL; -Handle Top::StackTrace() { +Handle Top::StackTraceString() { if (stack_trace_nesting_level == 0) { stack_trace_nesting_level++; HeapStringAllocator allocator; @@ -365,6 +365,89 @@ Handle Top::StackTrace() { } +Local Top::CaptureCurrentStackTrace( + int frame_limit, StackTrace::StackTraceOptions options) { + v8::HandleScope scope; + // Ensure no negative values. + int limit = Max(frame_limit, 0); + Handle stackTrace = Factory::NewJSArray(frame_limit); + FixedArray* frames = FixedArray::cast(stackTrace->elements()); + + Handle column_key = Factory::LookupAsciiSymbol("column"); + Handle line_key = Factory::LookupAsciiSymbol("lineNumber"); + Handle script_key = Factory::LookupAsciiSymbol("scriptName"); + Handle function_key = Factory::LookupAsciiSymbol("functionName"); + Handle eval_key = Factory::LookupAsciiSymbol("isEval"); + Handle constructor_key = Factory::LookupAsciiSymbol("isConstructor"); + + StackTraceFrameIterator it; + int frames_seen = 0; + while (!it.done() && (frames_seen < limit)) { + // Create a JSObject to hold the information for the StackFrame. + Handle stackFrame = Factory::NewJSObject(object_function()); + + JavaScriptFrame* frame = it.frame(); + JSFunction* fun(JSFunction::cast(frame->function())); + Script* script = Script::cast(fun->shared()->script()); + + if (options & StackTrace::kLineNumber) { + int script_line_offset = script->line_offset()->value(); + int position = frame->code()->SourcePosition(frame->pc()); + int line_number = GetScriptLineNumber(Handle