Browse Source

Upgrade V8 to 2.2.9

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
615d890622
  1. 1
      deps/v8/AUTHORS
  2. 13
      deps/v8/ChangeLog
  3. 3
      deps/v8/SConstruct
  4. 2
      deps/v8/include/v8-profiler.h
  5. 104
      deps/v8/include/v8.h
  6. 2
      deps/v8/samples/process.cc
  7. 2
      deps/v8/src/SConscript
  8. 125
      deps/v8/src/api.cc
  9. 12
      deps/v8/src/api.h
  10. 29
      deps/v8/src/arm/assembler-arm.cc
  11. 14
      deps/v8/src/arm/assembler-arm.h
  12. 16
      deps/v8/src/arm/builtins-arm.cc
  13. 174
      deps/v8/src/arm/codegen-arm.cc
  14. 4
      deps/v8/src/arm/constants-arm.h
  15. 47
      deps/v8/src/arm/disasm-arm.cc
  16. 55
      deps/v8/src/arm/full-codegen-arm.cc
  17. 263
      deps/v8/src/arm/ic-arm.cc
  18. 50
      deps/v8/src/arm/macro-assembler-arm.cc
  19. 28
      deps/v8/src/arm/macro-assembler-arm.h
  20. 21
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  21. 106
      deps/v8/src/arm/simulator-arm.cc
  22. 4
      deps/v8/src/arm/simulator-arm.h
  23. 53
      deps/v8/src/arm/stub-cache-arm.cc
  24. 50
      deps/v8/src/arm/virtual-frame-arm.cc
  25. 7
      deps/v8/src/arm/virtual-frame-arm.h
  26. 10
      deps/v8/src/assembler.cc
  27. 3
      deps/v8/src/assembler.h
  28. 27
      deps/v8/src/bootstrapper.cc
  29. 4
      deps/v8/src/bootstrapper.h
  30. 3
      deps/v8/src/builtins.cc
  31. 34
      deps/v8/src/conversions.cc
  32. 8
      deps/v8/src/dateparser.cc
  33. 6
      deps/v8/src/debug-agent.cc
  34. 18
      deps/v8/src/debug.cc
  35. 77
      deps/v8/src/dtoa.cc
  36. 81
      deps/v8/src/dtoa.h
  37. 11
      deps/v8/src/fast-dtoa.cc
  38. 3
      deps/v8/src/fast-dtoa.h
  39. 405
      deps/v8/src/fixed-dtoa.cc
  40. 55
      deps/v8/src/fixed-dtoa.h
  41. 14
      deps/v8/src/flags.cc
  42. 1
      deps/v8/src/frames.h
  43. 12
      deps/v8/src/globals.h
  44. 6
      deps/v8/src/heap.cc
  45. 19
      deps/v8/src/heap.h
  46. 276
      deps/v8/src/ia32/codegen-ia32.cc
  47. 3
      deps/v8/src/ia32/codegen-ia32.h
  48. 11
      deps/v8/src/ia32/macro-assembler-ia32.h
  49. 26
      deps/v8/src/ia32/stub-cache-ia32.cc
  50. 14
      deps/v8/src/ic.cc
  51. 6
      deps/v8/src/ic.h
  52. 7
      deps/v8/src/log.cc
  53. 11
      deps/v8/src/macro-assembler.h
  54. 7
      deps/v8/src/messages.js
  55. 8
      deps/v8/src/objects-inl.h
  56. 19
      deps/v8/src/objects.cc
  57. 7
      deps/v8/src/objects.h
  58. 9
      deps/v8/src/platform-freebsd.cc
  59. 9
      deps/v8/src/platform-linux.cc
  60. 10
      deps/v8/src/platform-macos.cc
  61. 9
      deps/v8/src/platform-solaris.cc
  62. 20
      deps/v8/src/platform-win32.cc
  63. 41
      deps/v8/src/runtime.cc
  64. 22
      deps/v8/src/serialize.cc
  65. 32
      deps/v8/src/stub-cache.cc
  66. 66
      deps/v8/src/stub-cache.h
  67. 87
      deps/v8/src/top.cc
  68. 5
      deps/v8/src/top.h
  69. 3
      deps/v8/src/utils.h
  70. 3
      deps/v8/src/v8natives.js
  71. 2
      deps/v8/src/version.cc
  72. 2
      deps/v8/src/x64/assembler-x64-inl.h
  73. 338
      deps/v8/src/x64/codegen-x64.cc
  74. 11
      deps/v8/src/x64/macro-assembler-x64.cc
  75. 18
      deps/v8/src/x64/macro-assembler-x64.h
  76. 26
      deps/v8/src/x64/stub-cache-x64.cc
  77. 2
      deps/v8/test/cctest/SConscript
  78. 100049
      deps/v8/test/cctest/gay-fixed.cc
  79. 47
      deps/v8/test/cctest/gay-fixed.h
  80. 10
      deps/v8/test/cctest/gay-shortest.cc
  81. 4
      deps/v8/test/cctest/gay-shortest.h
  82. 231
      deps/v8/test/cctest/test-api.cc
  83. 44
      deps/v8/test/cctest/test-debug.cc
  84. 37
      deps/v8/test/cctest/test-fast-dtoa.cc
  85. 512
      deps/v8/test/cctest/test-fixed-dtoa.cc
  86. 60
      deps/v8/test/cctest/test-macro-assembler-x64.cc
  87. 329
      deps/v8/test/mjsunit/instanceof-2.js
  88. 50
      deps/v8/test/mjsunit/property-load-across-eval.js
  89. 36
      deps/v8/test/mjsunit/regress/regress-696.js
  90. 34
      deps/v8/test/mjsunit/regress/regress-697.js
  91. 7
      deps/v8/test/mjsunit/smi-ops.js
  92. 4
      deps/v8/tools/gyp/v8.gyp

1
deps/v8/AUTHORS

@ -18,6 +18,7 @@ Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Kun Zhang <zhangk@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Paolo Giarrusso <p.giarrusso@gmail.com>

13
deps/v8/ChangeLog

@ -1,3 +1,16 @@
2010-05-10: Version 2.2.9
Allow Object.create to be called with a function (issue 697).
Fixed bug with Date.parse returning a non-NaN value when called on a
non date string (issue 696).
Allow unaligned memory accesses on ARM targets that support it (by
Subrato K De of CodeAurora <subratokde@codeaurora.org>).
C++ API for retrieving JavaScript stack trace information.
2010-05-05: Version 2.2.8
Performance improvements in the x64 and ARM backends.

3
deps/v8/SConstruct

@ -84,6 +84,7 @@ ANDROID_FLAGS = ['-march=armv7-a',
'-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
'-DCAN_USE_UNALIGNED_ACCESSES=1',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
@ -203,7 +204,7 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
'LINKFLAGS': ['-m32']
},
'armvariant:thumb2': {

2
deps/v8/include/v8-profiler.h

@ -109,7 +109,7 @@ class V8EXPORT CpuProfileNode {
/** Retrieves a child node by index. */
const CpuProfileNode* GetChild(int index) const;
static const int kNoLineNumberInfo = 0;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
};

104
deps/v8/include/v8.h

@ -126,6 +126,8 @@ template <class T> class Persistent;
class FunctionTemplate;
class ObjectTemplate;
class Data;
class StackTrace;
class StackFrame;
namespace internal {
@ -691,6 +693,106 @@ class V8EXPORT Message {
// TODO(1245381): Print to a string instead of on a FILE.
static void PrintCurrentStackTrace(FILE* out);
static const int kNoLineNumberInfo = 0;
static const int kNoColumnInfo = 0;
};
/**
* Representation of a JavaScript stack trace. The information collected is a
* snapshot of the execution stack and the information remains valid after
* execution continues.
*/
class V8EXPORT StackTrace {
public:
/**
* Flags that determine what information is placed captured for each
* StackFrame when grabbing the current stack trace.
*/
enum StackTraceOptions {
kLineNumber = 1,
kColumnOffset = 1 << 1 | kLineNumber,
kScriptName = 1 << 2,
kFunctionName = 1 << 3,
kIsEval = 1 << 4,
kIsConstructor = 1 << 5,
kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
kDetailed = kOverview | kIsEval | kIsConstructor
};
/**
* Returns a StackFrame at a particular index.
*/
Local<StackFrame> GetFrame(uint32_t index) const;
/**
* Returns the number of StackFrames.
*/
int GetFrameCount() const;
/**
* Returns StackTrace as a v8::Array that contains StackFrame objects.
*/
Local<Array> AsArray();
/**
* Grab a snapshot of the the current JavaScript execution stack.
*
* \param frame_limit The maximum number of stack frames we want to capture.
* \param options Enumerates the set of things we will capture for each
* StackFrame.
*/
static Local<StackTrace> CurrentStackTrace(
int frame_limit,
StackTraceOptions options = kOverview);
};
/**
* A single JavaScript stack frame.
*/
class V8EXPORT StackFrame {
public:
/**
* Returns the number, 1-based, of the line for the associate function call.
* This method will return Message::kNoLineNumberInfo if it is unable to
* retrieve the line number, or if kLineNumber was not passed as an option
* when capturing the StackTrace.
*/
int GetLineNumber() const;
/**
* Returns the 1-based column offset on the line for the associated function
* call.
* This method will return Message::kNoColumnInfo if it is unable to retrieve
* the column number, or if kColumnOffset was not passed as an option when
* capturing the StackTrace.
*/
int GetColumn() const;
/**
* Returns the name of the resource that contains the script for the
* function for this StackFrame.
*/
Local<String> GetScriptName() const;
/**
* Returns the name of the function associated with this stack frame.
*/
Local<String> GetFunctionName() const;
/**
* Returns whether or not the associated function is compiled via a call to
* eval().
*/
bool IsEval() const;
/**
* Returns whther or not the associated function is called as a
* constructor via "new".
*/
bool IsConstructor() const;
};
@ -2122,7 +2224,7 @@ class V8EXPORT ResourceConstraints {
};
bool SetResourceConstraints(ResourceConstraints* constraints);
bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints);
// --- E x c e p t i o n s ---

2
deps/v8/samples/process.cc

@ -294,7 +294,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate();
map_template_ = Persistent<ObjectTemplate>::New(raw_template);
}

2
deps/v8/src/SConscript

@ -58,6 +58,7 @@ SOURCES = {
debug.cc
disassembler.cc
diy-fp.cc
dtoa.cc
execution.cc
factory.cc
flags.cc
@ -68,6 +69,7 @@ SOURCES = {
func-name-inferrer.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
handles.cc
hashmap.cc
heap-profiler.cc

125
deps/v8/src/api.cc

@ -1438,7 +1438,7 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
int Message::GetLineNumber() const {
ON_BAILOUT("v8::Message::GetLineNumber()", return -1);
ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo);
ENTER_V8;
HandleScope scope;
EXCEPTION_PREAMBLE();
@ -1470,7 +1470,7 @@ int Message::GetEndPosition() const {
int Message::GetStartColumn() const {
if (IsDeadCheck("v8::Message::GetStartColumn()")) return 0;
if (IsDeadCheck("v8::Message::GetStartColumn()")) return kNoColumnInfo;
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@ -1485,7 +1485,7 @@ int Message::GetStartColumn() const {
int Message::GetEndColumn() const {
if (IsDeadCheck("v8::Message::GetEndColumn()")) return 0;
if (IsDeadCheck("v8::Message::GetEndColumn()")) return kNoColumnInfo;
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@ -1525,6 +1525,118 @@ void Message::PrintCurrentStackTrace(FILE* out) {
}
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
if (IsDeadCheck("v8::StackTrace::GetFrame()")) return Local<StackFrame>();
ENTER_V8;
HandleScope scope;
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
i::Handle<i::JSObject> obj(i::JSObject::cast(self->GetElement(index)));
return scope.Close(Utils::StackFrameToLocal(obj));
}
int StackTrace::GetFrameCount() const {
if (IsDeadCheck("v8::StackTrace::GetFrameCount()")) return -1;
ENTER_V8;
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
Local<Array> StackTrace::AsArray() {
if (IsDeadCheck("v8::StackTrace::AsArray()")) Local<Array>();
ENTER_V8;
return Utils::ToLocal(Utils::OpenHandle(this));
}
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
ENTER_V8;
return i::Top::CaptureCurrentStackTrace(frame_limit, options);
}
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
if (IsDeadCheck("v8::StackFrame::GetLineNumber()")) {
return Message::kNoLineNumberInfo;
}
ENTER_V8;
i::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> line = GetProperty(self, "lineNumber");
if (!line->IsSmi()) {
return Message::kNoLineNumberInfo;
}
return i::Smi::cast(*line)->value();
}
int StackFrame::GetColumn() const {
if (IsDeadCheck("v8::StackFrame::GetColumn()")) {
return Message::kNoColumnInfo;
}
ENTER_V8;
i::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> column = GetProperty(self, "column");
if (!column->IsSmi()) {
return Message::kNoColumnInfo;
}
return i::Smi::cast(*column)->value();
}
Local<String> StackFrame::GetScriptName() const {
if (IsDeadCheck("v8::StackFrame::GetScriptName()")) return Local<String>();
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptName");
if (!name->IsString()) {
return Local<String>();
}
return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
}
Local<String> StackFrame::GetFunctionName() const {
if (IsDeadCheck("v8::StackFrame::GetFunctionName()")) return Local<String>();
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "functionName");
if (!name->IsString()) {
return Local<String>();
}
return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
}
bool StackFrame::IsEval() const {
if (IsDeadCheck("v8::StackFrame::IsEval()")) return false;
ENTER_V8;
i::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
return is_eval->IsTrue();
}
bool StackFrame::IsConstructor() const {
if (IsDeadCheck("v8::StackFrame::IsConstructor()")) return false;
ENTER_V8;
i::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
return is_constructor->IsTrue();
}
// --- D a t a ---
bool Value::IsUndefined() const {
@ -2185,10 +2297,10 @@ Local<String> v8::Object::ObjectProtoToString() {
int postfix_len = i::StrLength(postfix);
int buf_len = prefix_len + str_len + postfix_len;
char* buf = i::NewArray<char>(buf_len);
i::ScopedVector<char> buf(buf_len);
// Write prefix.
char* ptr = buf;
char* ptr = buf.start();
memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
@ -2200,8 +2312,7 @@ Local<String> v8::Object::ObjectProtoToString() {
memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::New(buf, buf_len);
i::DeleteArray(buf);
Local<String> result = v8::String::New(buf.start(), buf_len);
return result;
}
}

12
deps/v8/src/api.h

@ -192,6 +192,10 @@ class Utils {
v8::internal::Handle<v8::internal::Proxy> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal(
@ -227,6 +231,10 @@ class Utils {
OpenHandle(const Function* data);
static inline v8::internal::Handle<v8::internal::JSObject>
OpenHandle(const Message* message);
static inline v8::internal::Handle<v8::internal::JSArray>
OpenHandle(const StackTrace* stack_trace);
static inline v8::internal::Handle<v8::internal::JSObject>
OpenHandle(const StackFrame* stack_frame);
static inline v8::internal::Handle<v8::internal::Context>
OpenHandle(const v8::Context* context);
static inline v8::internal::Handle<v8::internal::SignatureInfo>
@ -275,6 +283,8 @@ MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
@ -305,6 +315,8 @@ MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context)
MAKE_OPEN_HANDLE(External, Proxy)
MAKE_OPEN_HANDLE(StackTrace, JSArray)
MAKE_OPEN_HANDLE(StackFrame, JSObject)
#undef MAKE_OPEN_HANDLE

29
deps/v8/src/arm/assembler-arm.cc

@ -1157,6 +1157,35 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B4, dst, src);
#else
ldr(dst, src, cond);
MemOperand src1(src);
src1.set_offset(src1.offset() + 4);
Register dst1(dst);
dst1.code_ = dst1.code_ + 1;
ldr(dst1, src1, cond);
#endif
}
void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
ASSERT(dst.rm().is(no_reg));
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
#else
str(src, dst, cond);
MemOperand dst1(dst);
dst1.set_offset(dst1.offset() + 4);
Register src1(src);
src1.code_ = src1.code_ + 1;
str(src1, dst1, cond);
#endif
}
// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,

14
deps/v8/src/arm/assembler-arm.h

@ -448,6 +448,18 @@ class MemOperand BASE_EMBEDDED {
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
void set_offset(int32_t offset) {
ASSERT(rm_.is(no_reg));
offset_ = offset;
}
uint32_t offset() {
ASSERT(rm_.is(no_reg));
return offset_;
}
Register rm() const {return rm_;}
private:
Register rn_; // base
Register rm_; // register offset
@ -755,6 +767,8 @@ class Assembler : public Malloced {
void strh(Register src, const MemOperand& dst, Condition cond = al);
void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
void ldrd(Register dst, const MemOperand& src, Condition cond = al);
void strd(Register src, const MemOperand& dst, Condition cond = al);
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);

16
deps/v8/src/arm/builtins-arm.cc

@ -107,7 +107,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
__ AllocateInNewSpace(size / kPointerSize,
__ AllocateInNewSpace(size,
result,
scratch2,
scratch3,
@ -191,7 +191,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize +
FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size / kPointerSize,
__ AllocateInNewSpace(size,
result,
elements_array_end,
scratch1,
@ -208,12 +208,13 @@ static void AllocateJSArray(MacroAssembler* masm,
__ add(elements_array_end,
elements_array_end,
Operand(array_size, ASR, kSmiTagSize));
__ AllocateInNewSpace(elements_array_end,
__ AllocateInNewSpace(
elements_array_end,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
@ -561,7 +562,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@ -632,12 +633,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: start of next object
// r7: undefined
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(r0,
__ AllocateInNewSpace(
r0,
r5,
r6,
r2,
&undo_allocation,
RESULT_CONTAINS_TOP);
static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
// Initialize the FixedArray.
// r1: constructor

174
deps/v8/src/arm/codegen-arm.cc

@ -191,7 +191,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
VirtualFrame::SpilledScope spilled_scope(frame_);
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@ -1486,8 +1486,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Then process it as a normal function call.
__ ldr(r0, MemOperand(sp, 3 * kPointerSize));
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
__ str(r1, MemOperand(sp, 3 * kPointerSize));
__ strd(r0, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
frame_->CallStub(&call_function, 3);
@ -2279,8 +2278,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ ldr(r0, frame_->ElementAt(0)); // load the current count
__ ldr(r1, frame_->ElementAt(1)); // load the length
// Load the current count to r0, load the length to r1.
__ ldrd(r0, frame_->ElementAt(0));
__ cmp(r0, r1); // compare to the array length
node->break_target()->Branch(hs);
@ -2787,7 +2786,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
__ ldr(r0,
ContextSlotOperandCheckExtensions(potential_slot,
@ -3473,7 +3473,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
// between the current property value and the actual right-hand side.
// Load of the current value leaves receiver and key on the stack.
// Duplicate receiver and key for loading the current property value.
frame_->Dup2();
EmitKeyedLoad();
frame_->EmitPush(r0);
@ -3702,9 +3703,56 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// JavaScript examples:
//
// with (obj) foo(1, 2, 3) // foo is in obj
//
// function f() {};
// function g() {
// eval(...);
// f(); // f could be in extension object
// }
// ----------------------------------
// JumpTargets do not yet support merging frames so the frame must be
// spilled when jumping to these targets.
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow);
frame_->EmitPush(r0);
LoadGlobalReceiver(r1);
done.Jump();
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = var->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
__ ldr(r0,
ContextSlotOperandCheckExtensions(potential_slot,
r1,
r2,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
frame_->EmitPush(r0);
LoadGlobalReceiver(r1);
done.Jump();
}
}
slow.Bind();
// Load the function
frame_->EmitPush(cp);
__ mov(r0, Operand(var->name()));
@ -3716,7 +3764,9 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r0); // function
frame_->EmitPush(r1); // receiver
// Call the function.
done.Bind();
// Call the function. At this point, everything is spilled but the
// function and receiver are in r0 and r1.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
@ -3767,19 +3817,23 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
LoadAndSpill(property->obj());
if (!property->is_synthetic()) {
// Duplicate receiver for later use.
__ ldr(r0, MemOperand(sp, 0));
frame_->EmitPush(r0);
}
LoadAndSpill(property->key());
EmitKeyedLoad();
frame_->Drop(); // key
// Put the function below the receiver.
if (property->is_synthetic()) {
// Use the global receiver.
frame_->Drop();
frame_->EmitPush(r0);
frame_->EmitPush(r0); // Function.
LoadGlobalReceiver(r0);
} else {
frame_->EmitPop(r1); // receiver
frame_->EmitPush(r0); // function
frame_->EmitPush(r1); // receiver
// Switch receiver and function.
frame_->EmitPop(r1); // Receiver.
frame_->EmitPush(r0); // Function.
frame_->EmitPush(r1); // Receiver.
}
// Call the function.
@ -4359,12 +4413,13 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
__ add(r2, r5, Operand(objects_size));
__ AllocateInNewSpace(r2, // In: Size, in words.
__ AllocateInNewSpace(
r2, // In: Size, in words.
r0, // Out: Start of allocation (tagged).
r3, // Scratch register.
r4, // Scratch register.
&slowcase,
TAG_OBJECT);
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// r0: Start of allocated area, object-tagged.
// r1: Number of elements in array, as smi.
// r5: Number of elements, untagged.
@ -5388,8 +5443,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has all arguments on the stack and the key in r0.
__ ldr(r0, MemOperand(sp, 0));
// Call keyed load IC. It has the arguments key and receiver in r0 and r1.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
@ -5522,12 +5576,13 @@ void CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Load the receiver and key from the stack.
frame_->SpillAllButCopyTOSToR1R0();
// Load the key and receiver from the stack to r0 and r1.
frame_->PopToR1R0();
Register receiver = r0;
Register key = r1;
VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects key and receiver in r0 and r1.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue();
@ -5721,6 +5776,9 @@ void Reference::GetValue() {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
@ -5730,23 +5788,26 @@ void Reference::GetValue() {
ASSERT(!is_global || var->is_global());
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
case KEYED: {
if (persist_after_get_) {
cgen_->frame()->Dup2();
}
ASSERT(property != NULL);
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) set_unloaded();
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
}
@ -5806,7 +5867,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ pop(r3);
// Attempt to allocate new JSFunction in new space.
__ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
__ AllocateInNewSpace(JSFunction::kSize,
r0,
r1,
r2,
@ -5847,7 +5908,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
__ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
__ AllocateInNewSpace(FixedArray::SizeFor(length),
r0,
r1,
r2,
@ -5915,7 +5976,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size / kPointerSize,
__ AllocateInNewSpace(size,
r0,
r1,
r2,
@ -6248,8 +6309,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Load rhs to a double in r0, r1.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ pop(lr);
}
@ -6284,8 +6344,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
@ -6449,10 +6508,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
__ sub(r7, r1, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(both_loaded_as_doubles);
}
@ -6829,8 +6886,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that second double is in r2 and r3.
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
__ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
@ -6882,8 +6938,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that first double is in r0 and r1.
__ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
__ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
@ -6954,8 +7009,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
__ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
#endif
__ mov(r0, Operand(r5));
// And we are done.
@ -8206,6 +8260,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the prototype of the function (r4 is result, r2 is scratch).
__ ldr(r1, MemOperand(sp, 0));
// r1 is function, r3 is map.
// Look up the function and the map in the instanceof cache.
Label miss;
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
__ cmp(r1, ip);
__ b(ne, &miss);
__ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &miss);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ pop();
__ pop();
__ mov(pc, Operand(lr));
__ bind(&miss);
__ TryGetFunctionPrototype(r1, r4, r2, &slow);
// Check that the function prototype is a JS object.
@ -8215,6 +8285,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
__ b(gt, &slow);
__ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
// Register mapping: r3 is object map and r4 is function prototype.
// Get prototype of object into r2.
__ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
@ -8232,12 +8305,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
__ mov(r0, Operand(Smi::FromInt(0)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ pop();
__ pop();
__ mov(pc, Operand(lr)); // Return.
__ bind(&is_not_instance);
__ mov(r0, Operand(Smi::FromInt(1)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ pop();
__ pop();
__ mov(pc, Operand(lr)); // Return.
@ -8324,8 +8399,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ str(r3, MemOperand(sp, 1 * kPointerSize));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array (in words, not
// bytes because AllocateInNewSpace expects words).
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
__ cmp(r1, Operand(0));
@ -8336,7 +8410,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(r1, r0, r2, r3, &runtime, TAG_OBJECT);
__ AllocateInNewSpace(
r1,
r0,
r2,
r3,
&runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
@ -8501,9 +8581,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &runtime);
__ b(ne, &runtime);
__ cmp(r3, Operand(r0));
__ b(le, &runtime);
__ b(ls, &runtime);
// r2: Number of capture registers
// subject: Subject string

4
deps/v8/src/arm/constants-arm.h

@ -72,6 +72,10 @@
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_TARGET_CAN_READ_UNALIGNED 1
#endif
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1

47
deps/v8/src/arm/disasm-arm.cc

@ -418,6 +418,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
} else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
if (instr->Bits(7, 4) == 0xf) {
Print("strd");
} else {
Print("ldrd");
}
} else {
Print("str");
}
@ -614,6 +620,47 @@ void Decoder::DecodeType01(Instr* instr) {
} else {
Unknown(instr); // not used by V8
}
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
switch (instr->PUField()) {
case 0: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
} else {
Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
}
break;
}
case 1: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
} else {
Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
}
break;
}
case 2: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
} else {
Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
}
break;
}
case 3: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
} else {
Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
}
break;
}
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
break;
}
}
} else {
// extra load/store instructions
switch (instr->PUField()) {

55
deps/v8/src/arm/full-codegen-arm.cc

@ -738,15 +738,10 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Load the key.
__ mov(r0, Operand(key_literal->handle()));
// Push both as arguments to ic.
__ Push(r1, r0);
// Call keyed load IC. It has all arguments on the stack and the key in r0.
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
DropAndApply(2, context, r0);
Apply(context, r0);
}
}
@ -935,8 +930,16 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in r0 and r1.
if (expr->is_compound()) {
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
}
break;
}
@ -1005,8 +1008,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has all arguments on the stack and the key in r0.
__ ldr(r0, MemOperand(sp, 0));
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
@ -1171,10 +1173,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
// Drop receiver left on the stack by IC.
DropAndApply(1, context_, r0);
} else {
VisitForValue(expr->key(), kStack);
VisitForValue(expr->key(), kAccumulator);
__ pop(r1);
EmitKeyedPropertyLoad(expr);
// Drop key and receiver left on the stack by IC.
DropAndApply(2, context_, r0);
Apply(context_, r0);
}
}
@ -1246,24 +1248,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property, use keyed load IC followed by function
// call.
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call.
SetSourcePosition(prop->position());
// Call keyed load IC. It has all arguments on the stack and the key in
// r0.
__ ldr(r0, MemOperand(sp, 0));
if (prop->is_synthetic()) {
__ pop(r1); // We do not need to keep the receiver.
} else {
__ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on.
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Load receiver object into r1.
if (prop->is_synthetic()) {
// Push result (function).
__ push(r0);
// Push Global receiver.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ push(r1);
} else {
__ ldr(r1, MemOperand(sp, kPointerSize));
// Pop receiver.
__ pop(r1);
// Push result (function).
__ push(r0);
__ push(r1);
}
// Overwrite (object, key) with (function, receiver).
__ str(r0, MemOperand(sp, kPointerSize));
__ str(r1, MemOperand(sp));
EmitCallWithStub(expr);
}
} else {
@ -1552,7 +1561,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) {
EmitNamedPropertyLoad(prop);
} else {
VisitForValue(prop->key(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
}
}

263
deps/v8/src/arm/ic-arm.cc

@ -683,11 +683,9 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
__ ldr(r1, MemOperand(sp, kPointerSize));
__ Push(r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
@ -699,11 +697,9 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
__ ldr(r1, MemOperand(sp, kPointerSize));
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@ -714,18 +710,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label slow, fast, check_pixel_array, check_number_dictionary;
// Get the object from the stack.
__ ldr(r1, MemOperand(sp, kPointerSize));
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi.
__ BranchOnSmi(r1, &slow);
__ BranchOnSmi(receiver, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(kSlowCaseBitFieldMask));
@ -740,60 +735,65 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(lt, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
// Save key in r2 in case we want it for the number dictionary case.
__ mov(r2, r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
__ BranchOnNotSmi(key, &slow);
// Untag key into r2..
__ mov(r2, Operand(key, ASR, kSmiTagSize));
// Get the elements array of the object.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, r3);
__ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
__ cmp(r2, r3);
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ mov(r0, r2);
__ Ret();
// Check whether the elements is a pixel array.
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
__ cmp(r0, ip);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
__ cmp(r2, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset));
__ ldrb(r0, MemOperand(ip, r0));
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi.
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
__ ldrb(r2, MemOperand(ip, r2));
__ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi.
__ Ret();
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: untagged index
// r1: elements
// r2: key
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
__ mov(r0, r2);
__ Ret();
// Slow case: Push extra copies of the arguments (2).
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
__ ldr(r0, MemOperand(sp, 0));
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
}
@ -802,8 +802,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
Label index_not_smi;
@ -811,9 +810,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Label slow_char_code;
Label got_char_code;
// Get the object from the stack.
__ ldr(r1, MemOperand(sp, kPointerSize));
Register object = r1;
Register index = r0;
Register code = r2;
@ -913,25 +909,21 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label slow, failed_allocation;
// Get the object from the stack.
__ ldr(r1, MemOperand(sp, kPointerSize));
// r0: key
// r1: receiver object
Register key = r0;
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(r1, &slow);
__ BranchOnSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
__ BranchOnNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
@ -943,53 +935,51 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: index (as a smi)
// r1: JSObject
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset));
__ cmp(r1, Operand(r0, ASR, kSmiTagSize));
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r0: index (smi)
// r1: elements array
__ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset));
// r1: base pointer of external storage
// r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (array_type) {
case kExternalByteArray:
__ ldrsb(r0, MemOperand(r1, r0, LSR, 1));
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(r0, MemOperand(r1, r0, LSR, 1));
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(r0, MemOperand(r1, r0, LSL, 0));
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(r0, MemOperand(r1, r0, LSL, 0));
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r0, r1, Operand(r0, LSL, 1));
__ vldr(s0, r0, 0);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
default:
@ -998,37 +988,36 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
}
// For integer array types:
// r0: value
// r2: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r0: value (if VFP3 is not supported)
// r2: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(r0, Operand(0xC0000000));
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ mov(r1, r0);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
__ AllocateHeapNumber(r0, r3, r4, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(r1, r0, r3);
WriteInt32ToHeapNumberStub stub(value, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
@ -1038,51 +1027,60 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(r0, Operand(0xC0000000));
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, r0);
__ AllocateHeapNumber(r0, r1, r2, &slow);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(r0, Operand(0x80000000));
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(r0, Operand(0x40000000));
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, r0, r1, r2, 0);
GenerateUInt2Double(masm, hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, r0, r1, r2, 1);
GenerateUInt2Double(masm, hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers r0:r1.
// Wrap it into a HeapNumber.
__ AllocateHeapNumber(r2, r3, r5, &slow);
__ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ AllocateHeapNumber(r4, r5, r6, &slow);
__ mov(r0, r2);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
@ -1090,40 +1088,52 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ AllocateHeapNumber(r0, r1, r2, &slow);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ mov(r0, r2);
__ Ret();
} else {
__ AllocateHeapNumber(r3, r1, r2, &slow);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ AllocateHeapNumber(r3, r4, r5, &slow);
// VFP is not available, do manual single to double conversion.
// r0: floating point value (binary32)
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r1.
__ and_(r1, r0, Operand(kBinary32MantissaMask));
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r2.
__ mov(r2, Operand(r0, LSR, kBinary32MantissaBits));
__ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r2, Operand(0x00));
__ teq(r1, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r2, Operand(0xff));
__ mov(r2, Operand(0x7ff), LeaveCC, eq);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r2,
r2,
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r0, r0, Operand(kBinary32SignMask));
__ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord));
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
@ -1132,24 +1142,25 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord));
__ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord));
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case: Load name and receiver from stack and jump to runtime.
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
__ ldr(r0, MemOperand(sp, 0));
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
}
@ -1158,14 +1169,10 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label slow;
// Get the object from the stack.
__ ldr(r1, MemOperand(sp, kPointerSize));
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);

50
deps/v8/src/arm/macro-assembler-arm.cc

@ -232,6 +232,13 @@ void MacroAssembler::LoadRoot(Register destination,
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond) {
str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
}
void MacroAssembler::RecordWriteHelper(Register object,
Register offset,
Register scratch) {
@ -926,6 +933,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
// Load address of new object into result and allocation top address into
// scratch1.
ExternalReference new_space_allocation_top =
@ -948,23 +961,16 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ExternalReference::new_space_allocation_limit_address();
mov(scratch2, Operand(new_space_allocation_limit));
ldr(scratch2, MemOperand(scratch2));
add(result, result, Operand(object_size * kPointerSize));
add(result, result, Operand(object_size));
cmp(result, Operand(scratch2));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
if (FLAG_debug_code) {
tst(result, Operand(kObjectAlignmentMask));
Check(eq, "Unaligned allocation in new space");
}
str(result, MemOperand(scratch1));
// Tag and adjust back to start of new object.
if ((flags & TAG_OBJECT) != 0) {
sub(result, result, Operand((object_size * kPointerSize) -
kHeapObjectTag));
sub(result, result, Operand(object_size - kHeapObjectTag));
} else {
sub(result, result, Operand(object_size * kPointerSize));
sub(result, result, Operand(object_size));
}
}
@ -1001,7 +1007,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ExternalReference::new_space_allocation_limit_address();
mov(scratch2, Operand(new_space_allocation_limit));
ldr(scratch2, MemOperand(scratch2));
if ((flags & SIZE_IN_WORDS) != 0) {
add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
add(result, result, Operand(object_size));
}
cmp(result, Operand(scratch2));
b(hi, gc_required);
@ -1013,7 +1023,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
str(result, MemOperand(scratch1));
// Adjust back to start of new object.
if ((flags & SIZE_IN_WORDS) != 0) {
sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
sub(result, result, Operand(object_size));
}
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@ -1054,10 +1068,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
add(scratch1, scratch1,
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
// AllocateInNewSpace expects the size in words, so we can round down
// to kObjectAlignment and divide by kPointerSize in the same shift.
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
AllocateInNewSpace(scratch1,
@ -1088,10 +1099,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT(kCharSize == 1);
add(scratch1, length,
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
// AllocateInNewSpace expects the size in words, so we can round down
// to kObjectAlignment and divide by kPointerSize in the same shift.
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
AllocateInNewSpace(scratch1,
@ -1115,7 +1123,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
AllocateInNewSpace(ConsString::kSize / kPointerSize,
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
@ -1135,7 +1143,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
AllocateInNewSpace(ConsString::kSize / kPointerSize,
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
@ -1549,7 +1557,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
AllocateInNewSpace(HeapNumber::kSize,
result,
scratch1,
scratch2,

28
deps/v8/src/arm/macro-assembler-arm.h

@ -52,6 +52,21 @@ enum InvokeJSFlags {
};
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@ -85,6 +100,10 @@ class MacroAssembler: public Assembler {
void LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond = al);
// Store an object to the root table.
void StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond = al);
// Check if object is in new space.
@ -280,7 +299,9 @@ class MacroAssembler: public Assembler {
// Allocate an object in new space. The object_size is specified in words (not
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
// tag_allocated_object is true the result is tagged as as a heap object. All
// registers are clobbered also when control continues at the gc_required
// label.
void AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
@ -324,8 +345,9 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,

21
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1210,15 +1210,32 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
offset = r0;
}
// We assume that we cannot do unaligned loads on ARM, so this function
// must only be used to load a single character at a time.
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
#if !V8_TARGET_CAN_READ_UNALIGNED
ASSERT(characters == 1);
#endif
if (mode_ == ASCII) {
if (characters == 4) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else if (characters == 2) {
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
} else {
ASSERT(characters == 1);
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
}
} else {
ASSERT(mode_ == UC16);
if (characters == 2) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else {
ASSERT(characters == 1);
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
}
}
}

106
deps/v8/src/arm/simulator-arm.cc

@ -728,6 +728,13 @@ int32_t Simulator::get_register(int reg) const {
}
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
registers_[dreg + 1] = dbl[1];
}
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@ -864,27 +871,42 @@ void Simulator::TrashCallerSaveRegisters() {
registers_[12] = 0x50Bad4U;
}
// The ARM cannot do unaligned reads and writes. On some ARM platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to
// emulating the rotate behaviour. Note that simulator runs have the runtime
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
// The following statements below describes the behavior of the ARM CPUs
// that don't support unaligned access.
// Some ARM platforms raise an interrupt on detecting unaligned access.
// On others it does a funky rotation thing. For now we
// simply disallow unaligned reads. Note that simulator runs have the runtime
// system running directly on the host system and only generated code is
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct ARM-like behaviour on unaligned accesses.
// get the correct ARM-like behaviour on unaligned accesses for those ARM
// targets that don't support unaligned loads and stores.
int Simulator::ReadW(int32_t addr, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
#else
if ((addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
PrintF("Unaligned read at 0x%08x\n", addr);
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
return 0;
#endif
}
void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
@ -892,10 +914,15 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
}
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
#endif
}
uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
#else
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
@ -903,10 +930,15 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
return 0;
#endif
}
int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
#else
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
@ -914,10 +946,16 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
#endif
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@ -925,10 +963,16 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
}
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
#endif
}
void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
#else
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
@ -936,6 +980,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
}
PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
#endif
}
@ -963,6 +1008,41 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
}
int32_t* Simulator::ReadDW(int32_t addr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
#else
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
PrintF("Unaligned read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
#endif
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
#if V8_TARGET_CAN_READ_UNALIGNED
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
return;
#else
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
return;
}
PrintF("Unaligned write at 0x%08x\n", addr);
UNIMPLEMENTED();
#endif
}
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 256 bytes to prevent overrunning the stack when
@ -1590,7 +1670,19 @@ void Simulator::DecodeType01(Instr* instr) {
}
}
}
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
ASSERT((rd % 2) == 0);
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
int32_t value2 = get_register(rd+1);
WriteDW(addr, value1, value2);
} else {
// The ldrd instruction.
int* rn_data = ReadDW(addr);
set_dw_register(rd, rn_data);
}
} else if (instr->HasH()) {
if (instr->HasSign()) {
if (instr->HasL()) {
int16_t val = ReadH(addr, instr);

4
deps/v8/src/arm/simulator-arm.h

@ -159,6 +159,7 @@ class Simulator {
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
void set_s_register(int reg, unsigned int value);
@ -252,6 +253,9 @@ class Simulator {
inline int ReadW(int32_t addr, Instr* instr);
inline void WriteW(int32_t addr, int value, Instr* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
void DecodeType2(Instr* instr);

53
deps/v8/src/arm/stub-cache-arm.cc

@ -1121,11 +1121,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1175,11 +1171,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1194,9 +1186,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
Object* result = generator(this, object, holder, function, name, check);
const int id = function_info->custom_call_generator_id();
Object* result =
CompileCustomCall(id, object, holder, function, name, check);
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) {
return result;
@ -1334,11 +1326,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1825,8 +1813,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
@ -1834,7 +1821,6 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1850,8 +1836,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
@ -1860,7 +1845,6 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
callback, name, &miss, &failure);
if (!success) return failure;
@ -1879,8 +1863,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
@ -1888,7 +1871,6 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1904,8 +1886,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
@ -1915,7 +1896,6 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadInterceptor(receiver,
holder,
&lookup,
@ -1936,8 +1916,7 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
@ -1945,7 +1924,6 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadArrayLength(masm(), r1, r2, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1958,8 +1936,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
@ -1968,7 +1945,6 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
@ -1984,8 +1960,7 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -- r1 : receiver
// -----------------------------------
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -2085,7 +2060,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
r5,
r6,
&generic_stub_call,
NO_ALLOCATION_FLAGS);
SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.

50
deps/v8/src/arm/virtual-frame-arm.cc

@ -323,7 +323,8 @@ void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
SpillAllButCopyTOSToR0();
PopToR1R0();
SpillAll();
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
@ -505,21 +506,25 @@ void VirtualFrame::Dup() {
break;
case R0_TOS:
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_R1_TOS:
__ push(r1);
__ mov(r1, r0);
// No need to change state as r0 and r1 now contains the same value.
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ push(r0);
__ mov(r0, r1);
// No need to change state as r0 and r1 now contains the same value.
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
default:
UNREACHABLE();
@ -528,6 +533,45 @@ void VirtualFrame::Dup() {
}
void VirtualFrame::Dup2() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_TOS:
__ push(r0);
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R1_R0_TOS;
break;
case R0_R1_TOS:
__ Push(r1, r0);
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ Push(r0, r1);
top_of_stack_state_ = R1_R0_TOS;
break;
default:
UNREACHABLE();
}
}
element_count_ += 2;
}
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||

7
deps/v8/src/arm/virtual-frame-arm.h

@ -316,8 +316,8 @@ class VirtualFrame : public ZoneObject {
// Result is returned in r0.
void CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed load IC. Key and receiver are on the stack. Result is returned
// in r0.
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.
void CallKeyedLoadIC();
// Call keyed store IC. Key and receiver are on the stack and the value is in
@ -355,6 +355,9 @@ class VirtualFrame : public ZoneObject {
// Duplicate the top of stack.
void Dup();
// Duplicate the two elements on top of stack.
void Dup2();
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();

10
deps/v8/src/assembler.cc

@ -670,16 +670,6 @@ ExternalReference ExternalReference::scheduled_exception_address() {
}
ExternalReference ExternalReference::compile_array_pop_call() {
return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall));
}
ExternalReference ExternalReference::compile_array_push_call() {
return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall));
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {

3
deps/v8/src/assembler.h

@ -444,9 +444,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference scheduled_exception_address();
static ExternalReference compile_array_pop_call();
static ExternalReference compile_array_push_call();
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT

27
deps/v8/src/bootstrapper.cc

@ -37,6 +37,7 @@
#include "macro-assembler.h"
#include "natives.h"
#include "snapshot.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
@ -228,6 +229,7 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
bool InstallNatives();
void InstallCustomCallGenerators();
void InstallJSFunctionResultCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
@ -1229,6 +1231,8 @@ bool Genesis::InstallNatives() {
InstallNativeFunctions();
InstallCustomCallGenerators();
// Install Function.prototype.call and apply.
{ Handle<String> key = Factory::function_class_symbol();
Handle<JSFunction> function =
@ -1326,6 +1330,29 @@ bool Genesis::InstallNatives() {
}
static void InstallCustomCallGenerator(Handle<JSFunction> holder_function,
const char* function_name,
int id) {
Handle<JSObject> proto(JSObject::cast(holder_function->instance_prototype()));
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
Handle<JSFunction> function(JSFunction::cast(proto->GetProperty(*name)));
function->shared()->set_function_data(Smi::FromInt(id));
}
void Genesis::InstallCustomCallGenerators() {
HandleScope scope;
#define INSTALL_CALL_GENERATOR(holder_fun, fun_name, name) \
{ \
Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
const int id = CallStubCompiler::k##name##CallGenerator; \
InstallCustomCallGenerator(holder, #fun_name, id); \
}
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR
}
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \

4
deps/v8/src/bootstrapper.h

@ -80,10 +80,6 @@ class Bootstrapper : public AllStatic {
// Tells whether bootstrapping is active.
static bool IsActive() { return BootstrapperActive::IsActive(); }
// Encoding/decoding support for fixup flags.
class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);

3
deps/v8/src/builtins.cc

@ -377,7 +377,7 @@ static Object* CallJsBuiltin(const char* name,
name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
ScopedVector<Object**> argv(args.length() - 1);
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = args.at<Object>(i + 1).location();
@ -388,7 +388,6 @@ static Object* CallJsBuiltin(const char* name,
n_args,
argv.start(),
&pending_exception);
argv.Dispose();
if (pending_exception) return Failure::Exception();
return *result;
}

34
deps/v8/src/conversions.cc

@ -31,8 +31,8 @@
#include "v8.h"
#include "conversions-inl.h"
#include "dtoa.h"
#include "factory.h"
#include "fast-dtoa.h"
#include "scanner.h"
namespace v8 {
@ -766,15 +766,16 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
default: {
int decimal_point;
int sign;
char* decimal_rep;
bool used_gay_dtoa = false;
const int kFastDtoaBufferCapacity = kFastDtoaMaximalLength + 1;
char fast_dtoa_buffer[kFastDtoaBufferCapacity];
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
char v8_dtoa_buffer[kV8DtoaBufferCapacity];
int length;
if (FastDtoa(v, Vector<char>(fast_dtoa_buffer, kFastDtoaBufferCapacity),
if (DoubleToAscii(v, DTOA_SHORTEST, 0,
Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
&sign, &length, &decimal_point)) {
decimal_rep = fast_dtoa_buffer;
decimal_rep = v8_dtoa_buffer;
} else {
decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
used_gay_dtoa = true;
@ -842,7 +843,11 @@ const char* IntToCString(int n, Vector<char> buffer) {
char* DoubleToFixedCString(double value, int f) {
const int kMaxDigitsBeforePoint = 20;
const double kFirstNonFixed = 1e21;
const int kMaxDigitsAfterPoint = 20;
ASSERT(f >= 0);
ASSERT(f <= kMaxDigitsAfterPoint);
bool negative = false;
double abs_value = value;
@ -851,7 +856,9 @@ char* DoubleToFixedCString(double value, int f) {
negative = true;
}
if (abs_value >= 1e21) {
// If abs_value has more than kMaxDigitsBeforePoint digits before the point
// use the non-fixed conversion routine.
if (abs_value >= kFirstNonFixed) {
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
return StrDup(DoubleToCString(value, buffer));
@ -860,8 +867,16 @@ char* DoubleToFixedCString(double value, int f) {
// Find a sufficiently precise decimal representation of n.
int decimal_point;
int sign;
char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
int decimal_rep_length = StrLength(decimal_rep);
// Add space for the '.' and the '\0' byte.
const int kDecimalRepCapacity =
kMaxDigitsBeforePoint + kMaxDigitsAfterPoint + 2;
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
bool status = DoubleToAscii(value, DTOA_FIXED, f,
Vector<char>(decimal_rep, kDecimalRepCapacity),
&sign, &decimal_rep_length, &decimal_point);
USE(status);
ASSERT(status);
// Create a representation that is padded with zeros if needed.
int zero_prefix_length = 0;
@ -884,7 +899,6 @@ char* DoubleToFixedCString(double value, int f) {
rep_builder.AddString(decimal_rep);
rep_builder.AddPadding('0', zero_postfix_length);
char* rep = rep_builder.Finalize();
freedtoa(decimal_rep);
// Create the result string by appending a minus and putting in a
// decimal point if needed.

8
deps/v8/src/dateparser.cc

@ -33,11 +33,7 @@ namespace v8 {
namespace internal {
bool DateParser::DayComposer::Write(FixedArray* output) {
// Set year to 0 by default.
if (index_ < 1) {
comp_[index_++] = 1;
}
if (index_ < 1) return false;
// Day and month defaults to 1.
while (index_ < kSize) {
comp_[index_++] = 1;
@ -48,7 +44,6 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
int day = kNone;
if (named_month_ == kNone) {
if (index_ < 2) return false;
if (index_ == 3 && !IsDay(comp_[0])) {
// YMD
year = comp_[0];
@ -62,7 +57,6 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
}
} else {
month = named_month_;
if (index_ < 1) return false;
if (index_ == 1) {
// MD or DM
day = comp_[0];

6
deps/v8/src/debug-agent.cc

@ -181,15 +181,15 @@ void DebuggerAgentSession::Run() {
buf.GetNext();
len++;
}
int16_t* temp = NewArray<int16_t>(len + 1);
ScopedVector<int16_t> temp(len + 1);
buf.Reset(*message, StrLength(*message));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
// Send the request received to the debugger.
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp), len);
DeleteArray(temp);
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
len);
}
}

18
deps/v8/src/debug.cc

@ -52,14 +52,13 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
char* data = NewArray<char>(s->Length() + 1);
if (data == NULL) {
ScopedVector<char> data(s->Length() + 1);
if (data.start() == NULL) {
V8::FatalProcessOutOfMemory("PrintLn");
return;
}
s->WriteAscii(data);
PrintF("%s\n", data);
DeleteArray(data);
s->WriteAscii(data.start());
PrintF("%s\n", data.start());
}
@ -431,8 +430,13 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// is set the patching performed by the runtime system will take place in
// the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code.
if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc());
if (code->is_keyed_load_stub()) {
KeyedLoadIC::ClearInlinedVersion(pc());
} else if (code->is_keyed_store_stub()) {
KeyedStoreIC::ClearInlinedVersion(pc());
} else if (code->is_load_stub()) {
LoadIC::ClearInlinedVersion(pc());
}
}
}

77
deps/v8/src/dtoa.cc

@ -0,0 +1,77 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "v8.h"
#include "dtoa.h"
#include "double.h"
#include "fast-dtoa.h"
#include "fixed-dtoa.h"
namespace v8 {
namespace internal {
bool DoubleToAscii(double v, DtoaMode mode, int requested_digits,
Vector<char> buffer, int* sign, int* length, int* point) {
ASSERT(!Double(v).IsSpecial());
ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0);
if (Double(v).Sign() < 0) {
*sign = 1;
v = -v;
} else {
*sign = 0;
}
if (v == 0) {
buffer[0] = '0';
buffer[1] = '\0';
*length = 1;
*point = 1;
return true;
}
if (mode == DTOA_PRECISION && requested_digits == 0) {
buffer[0] = '\0';
*length = 0;
return true;
}
switch (mode) {
case DTOA_SHORTEST:
return FastDtoa(v, buffer, length, point);
case DTOA_FIXED:
return FastFixedDtoa(v, requested_digits, buffer, length, point);
default:
break;
}
return false;
}
} } // namespace v8::internal

81
deps/v8/src/dtoa.h

@ -0,0 +1,81 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DTOA_H_
#define V8_DTOA_H_
namespace v8 {
namespace internal {
enum DtoaMode {
// 0.9999999999999999 becomes 0.1
DTOA_SHORTEST,
// Fixed number of digits after the decimal point.
// For instance fixed(0.1, 4) becomes 0.1000
// If the input number is big, the output will be big.
DTOA_FIXED,
// Fixed number of digits (independent of the decimal point).
DTOA_PRECISION
};
// The maximal length of digits a double can have in base 10.
// Note that DoubleToAscii null-terminates its input. So the given buffer should
// be at least kBase10MaximalLength + 1 characters long.
static const int kBase10MaximalLength = 17;
// Converts the given double 'v' to ascii.
// The result should be interpreted as buffer * 10^(point-length).
//
// The output depends on the given mode:
// - SHORTEST: produce the least amount of digits for which the internal
// identity requirement is still satisfied. If the digits are printed
// (together with the correct exponent) then reading this number will give
// 'v' again. The buffer will choose the representation that is closest to
// 'v'. If there are two at the same distance, than the one farther away
// from 0 is chosen (halfway cases - ending with 5 - are rounded up).
// In this mode the 'requested_digits' parameter is ignored.
// - FIXED: produces digits necessary to print a given number with
// 'requested_digits' digits after the decimal point. The produced digits
// might be too short in which case the caller has to fill the gaps with '0's.
// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
// Halfway cases are rounded towards +/-Infinity (away from 0). The call
// toFixed(0.15, 2) thus returns buffer="2", point=0.
// The returned buffer may contain digits that would be truncated from the
// shortest representation of the input.
// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
// Even though the length of produced digits usually equals
// 'requested_digits', the function is allowed to return fewer digits, in
// which case the caller has to fill the missing digits with '0's.
// Halfway cases are again rounded away from 0.
// 'DoubleToAscii' expects the given buffer to be big enough to hold all digits
// and a terminating null-character.
bool DoubleToAscii(double v, DtoaMode mode, int requested_digits,
Vector<char> buffer, int* sign, int* length, int* point);
} } // namespace v8::internal
#endif // V8_DTOA_H_

11
deps/v8/src/fast-dtoa.cc

@ -314,7 +314,7 @@ static void BiggestPowerTen(uint32_t number,
// w's fractional part is therefore 0x567890abcdef.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the comma would be computed by
// get each digit. Example the first digit after the point would be computed by
// (0x567890abcdef * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
@ -490,18 +490,11 @@ bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
bool FastDtoa(double v,
Vector<char> buffer,
int* sign,
int* length,
int* point) {
ASSERT(v != 0);
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
if (v < 0) {
v = -v;
*sign = 1;
} else {
*sign = 0;
}
int decimal_exponent;
bool result = grisu3(v, buffer, length, &decimal_exponent);
*point = *length + decimal_exponent;

3
deps/v8/src/fast-dtoa.h

@ -36,7 +36,7 @@ namespace internal {
static const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v.
// v must not be (positive or negative) zero and it must not be Infinity or NaN.
// v must be a strictly positive finite double.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true then
@ -50,7 +50,6 @@ static const int kFastDtoaMaximalLength = 17;
// otherwise.
bool FastDtoa(double d,
Vector<char> buffer,
int* sign,
int* length,
int* point);

405
deps/v8/src/fixed-dtoa.cc

@ -0,0 +1,405 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "v8.h"
#include "double.h"
#include "fixed-dtoa.h"
namespace v8 {
namespace internal {
// Represents a 128bit type. This class should be replaced by a native type on
// platforms that support 128bit integers.
class UInt128 {
public:
UInt128() : high_bits_(0), low_bits_(0) { }
UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
void Multiply(uint32_t multiplicand) {
uint64_t accumulator;
accumulator = (low_bits_ & kMask32) * multiplicand;
uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
accumulator >>= 32;
accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
low_bits_ = (accumulator << 32) + part;
accumulator >>= 32;
accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
part = static_cast<uint32_t>(accumulator & kMask32);
accumulator >>= 32;
accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
high_bits_ = (accumulator << 32) + part;
ASSERT((accumulator >> 32) == 0);
}
void Shift(int shift_amount) {
ASSERT(-64 <= shift_amount && shift_amount <= 64);
if (shift_amount == 0) {
return;
} else if (shift_amount == -64) {
high_bits_ = low_bits_;
low_bits_ = 0;
} else if (shift_amount == 64) {
low_bits_ = high_bits_;
high_bits_ = 0;
} else if (shift_amount <= 0) {
high_bits_ <<= -shift_amount;
high_bits_ += low_bits_ >> (64 + shift_amount);
low_bits_ <<= -shift_amount;
} else {
low_bits_ >>= shift_amount;
low_bits_ += high_bits_ << (64 - shift_amount);
high_bits_ >>= shift_amount;
}
}
// Modifies *this to *this MOD (2^power).
// Returns *this DIV (2^power).
int DivModPowerOf2(int power) {
if (power >= 64) {
int result = static_cast<int>(high_bits_ >> (power - 64));
high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
return result;
} else {
uint64_t part_low = low_bits_ >> power;
uint64_t part_high = high_bits_ << (64 - power);
int result = static_cast<int>(part_low + part_high);
high_bits_ = 0;
low_bits_ -= part_low << power;
return result;
}
}
bool IsZero() const {
return high_bits_ == 0 && low_bits_ == 0;
}
int BitAt(int position) {
if (position >= 64) {
return static_cast<int>(high_bits_ >> (position - 64)) & 1;
} else {
return static_cast<int>(low_bits_ >> position) & 1;
}
}
private:
static const uint64_t kMask32 = 0xFFFFFFFF;
// Value == (high_bits_ << 64) + low_bits_
uint64_t high_bits_;
uint64_t low_bits_;
};
static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
static void FillDigits32FixedLength(uint32_t number, int requested_length,
Vector<char> buffer, int* length) {
for (int i = requested_length - 1; i >= 0; --i) {
buffer[(*length) + i] = '0' + number % 10;
number /= 10;
}
*length += requested_length;
}
static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
int number_length = 0;
// We fill the digits in reverse order and exchange them afterwards.
while (number != 0) {
int digit = number % 10;
number /= 10;
buffer[(*length) + number_length] = '0' + digit;
number_length++;
}
// Exchange the digits.
int i = *length;
int j = *length + number_length - 1;
while (i < j) {
char tmp = buffer[i];
buffer[i] = buffer[j];
buffer[j] = tmp;
i++;
j--;
}
*length += number_length;
}
static void FillDigits64FixedLength(uint64_t number, int requested_length,
Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
// For efficiency cut the number into 3 uint32_t parts, and print those.
uint32_t part2 = static_cast<uint32_t>(number % kTen7);
number /= kTen7;
uint32_t part1 = static_cast<uint32_t>(number % kTen7);
uint32_t part0 = static_cast<uint32_t>(number / kTen7);
FillDigits32FixedLength(part0, 3, buffer, length);
FillDigits32FixedLength(part1, 7, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
}
static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
// For efficiency cut the number into 3 uint32_t parts, and print those.
uint32_t part2 = static_cast<uint32_t>(number % kTen7);
number /= kTen7;
uint32_t part1 = static_cast<uint32_t>(number % kTen7);
uint32_t part0 = static_cast<uint32_t>(number / kTen7);
if (part0 != 0) {
FillDigits32(part0, buffer, length);
FillDigits32FixedLength(part1, 7, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
} else if (part1 != 0) {
FillDigits32(part1, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
} else {
FillDigits32(part2, buffer, length);
}
}
static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
// An empty buffer represents 0.
if (*length == 0) {
buffer[0] = '1';
*decimal_point = 1;
*length = 1;
return;
}
// Round the last digit until we either have a digit that was not '9' or until
// we reached the first digit.
buffer[(*length) - 1]++;
for (int i = (*length) - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) {
return;
}
buffer[i] = '0';
buffer[i - 1]++;
}
// If the first digit is now '0' + 10, we would need to set it to '0' and add
// a '1' in front. However we reach the first digit only if all following
// digits had been '9' before rounding up. Now all trailing digits are '0' and
// we simply switch the first digit to '1' and update the decimal-point
// (indicating that the point is now one digit to the right).
if (buffer[0] == '0' + 10) {
buffer[0] = '1';
(*decimal_point)++;
}
}
// The given fractionals number represents a fixed-point number with binary
// point at bit (-exponent).
// Preconditions:
// -128 <= exponent <= 0.
// 0 <= fractionals * 2^exponent < 1
// The buffer holds the result.
// The function will round its result. During the rounding-process digits not
// generated by this function might be updated, and the decimal-point variable
// might be updated. If this function generates the digits 99 and the buffer
// already contained "199" (thus yielding a buffer of "19999") then a
// rounding-up will change the contents of the buffer to "20000".
static void FillFractionals(uint64_t fractionals, int exponent,
int fractional_count, Vector<char> buffer,
int* length, int* decimal_point) {
ASSERT(-128 <= exponent && exponent <= 0);
// 'fractionals' is a fixed-point number, with binary point at bit
// (-exponent). Inside the function the non-converted remainder of fractionals
// is a fixed-point number, with binary point at bit 'point'.
if (-exponent <= 64) {
// One 64 bit number is sufficient.
ASSERT(fractionals >> 56 == 0);
int point = -exponent;
for (int i = 0; i < fractional_count; ++i) {
if (fractionals == 0) break;
// Instead of multiplying by 10 we multiply by 5 and adjust the point
// location. This way the fractionals variable will not overflow.
// Invariant at the beginning of the loop: fractionals < 2^point.
// Initially we have: point <= 64 and fractionals < 2^56
// After each iteration the point is decremented by one.
// Note that 5^3 = 125 < 128 = 2^7.
// Therefore three iterations of this loop will not overflow fractionals
// (even without the subtraction at the end of the loop body). At this
// time point will satisfy point <= 61 and therefore fractionals < 2^point
// and any further multiplication of fractionals by 5 will not overflow.
fractionals *= 5;
point--;
int digit = static_cast<int>(fractionals >> point);
buffer[*length] = '0' + digit;
(*length)++;
fractionals -= static_cast<uint64_t>(digit) << point;
}
// If the first bit after the point is set we have to round up.
if (((fractionals >> (point - 1)) & 1) == 1) {
RoundUp(buffer, length, decimal_point);
}
} else { // We need 128 bits.
ASSERT(64 < -exponent && -exponent <= 128);
UInt128 fractionals128 = UInt128(fractionals, 0);
fractionals128.Shift(-exponent - 64);
int point = 128;
for (int i = 0; i < fractional_count; ++i) {
if (fractionals128.IsZero()) break;
// As before: instead of multiplying by 10 we multiply by 5 and adjust the
// point location.
// This multiplication will not overflow for the same reasons as before.
fractionals128.Multiply(5);
point--;
int digit = fractionals128.DivModPowerOf2(point);
buffer[*length] = '0' + digit;
(*length)++;
}
if (fractionals128.BitAt(point - 1) == 1) {
RoundUp(buffer, length, decimal_point);
}
}
}
// Removes leading and trailing zeros.
// If leading zeros are removed then the decimal point position is adjusted.
static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
while (*length > 0 && buffer[(*length) - 1] == '0') {
(*length)--;
}
int first_non_zero = 0;
while (first_non_zero < *length && buffer[first_non_zero] == '0') {
first_non_zero++;
}
if (first_non_zero != 0) {
for (int i = first_non_zero; i < *length; ++i) {
buffer[i - first_non_zero] = buffer[i];
}
*length -= first_non_zero;
*decimal_point -= first_non_zero;
}
}
bool FastFixedDtoa(double v,
int fractional_count,
Vector<char> buffer,
int* length,
int* decimal_point) {
const uint32_t kMaxUInt32 = 0xFFFFFFFF;
uint64_t significand = Double(v).Significand();
int exponent = Double(v).Exponent();
// v = significand * 2^exponent (with significand a 53bit integer).
// If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
// don't know how to compute the representation. 2^73 ~= 9.5*10^21.
// If necessary this limit could probably be increased, but we don't need
// more.
if (exponent > 20) return false;
if (fractional_count > 20) return false;
*length = 0;
// At most kDoubleSignificandSize bits of the significand are non-zero.
// Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
// bits: 0..11*..0xxx..53*..xx
if (exponent + kDoubleSignificandSize > 64) {
// The exponent must be > 11.
//
// We know that v = significand * 2^exponent.
// And the exponent > 11.
// We simplify the task by dividing v by 10^17.
// The quotient delivers the first digits, and the remainder fits into a 64
// bit number.
// Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
const uint64_t kFive17 = V8_2PART_UINT64_C(0xB1, A2BC2EC5); // 5^17
uint64_t divisor = kFive17;
int divisor_power = 17;
uint64_t dividend = significand;
uint32_t quotient;
uint64_t remainder;
// Let v = f * 2^e with f == significand and e == exponent.
// Then need q (quotient) and r (remainder) as follows:
// v = q * 10^17 + r
// f * 2^e = q * 10^17 + r
// f * 2^e = q * 5^17 * 2^17 + r
// If e > 17 then
// f * 2^(e-17) = q * 5^17 + r/2^17
// else
// f = q * 5^17 * 2^(17-e) + r/2^e
if (exponent > divisor_power) {
// We only allow exponents of up to 20 and therefore (17 - e) <= 3
dividend <<= exponent - divisor_power;
quotient = static_cast<uint32_t>(dividend / divisor);
remainder = (dividend % divisor) << divisor_power;
} else {
divisor <<= divisor_power - exponent;
quotient = static_cast<uint32_t>(dividend / divisor);
remainder = (dividend % divisor) << exponent;
}
FillDigits32(quotient, buffer, length);
FillDigits64FixedLength(remainder, divisor_power, buffer, length);
*decimal_point = *length;
} else if (exponent >= 0) {
// 0 <= exponent <= 11
significand <<= exponent;
FillDigits64(significand, buffer, length);
*decimal_point = *length;
} else if (exponent > -kDoubleSignificandSize) {
// We have to cut the number.
uint64_t integrals = significand >> -exponent;
uint64_t fractionals = significand - (integrals << -exponent);
if (integrals > kMaxUInt32) {
FillDigits64(integrals, buffer, length);
} else {
FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
}
*decimal_point = *length;
FillFractionals(fractionals, exponent, fractional_count,
buffer, length, decimal_point);
} else if (exponent < -128) {
// This configuration (with at most 20 digits) means that all digits must be
// 0.
ASSERT(fractional_count <= 20);
buffer[0] = '\0';
*length = 0;
*decimal_point = -fractional_count;
} else {
*decimal_point = 0;
FillFractionals(significand, exponent, fractional_count,
buffer, length, decimal_point);
}
TrimZeros(buffer, length, decimal_point);
buffer[*length] = '\0';
if ((*length) == 0) {
// The string is empty and the decimal_point thus has no importance. Mimick
// Gay's dtoa and and set it to -fractional_count.
*decimal_point = -fractional_count;
}
return true;
}
} } // namespace v8::internal

55
deps/v8/src/fixed-dtoa.h

@ -0,0 +1,55 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FIXED_DTOA_H_
#define V8_FIXED_DTOA_H_
namespace v8 {
namespace internal {
// Produces digits necessary to print a given number with
// 'fractional_count' digits after the decimal point.
// The buffer must be big enough to hold the result plus one terminating null
// character.
//
// The produced digits might be too short in which case the caller has to fill
// the gaps with '0's.
// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
// decimal_point = -2.
// Halfway cases are rounded towards +/-Infinity (away from 0). The call
// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
// The returned buffer may contain digits that would be truncated from the
// shortest representation of the input.
//
// This method only works for some parameters. If it can't handle the input it
// returns false. The output is null-terminated when the function succeeds.
bool FastFixedDtoa(double v, int fractional_count,
Vector<char> buffer, int* length, int* decimal_point);
} } // namespace v8::internal
#endif // V8_FIXED_DTOA_H_

14
deps/v8/src/flags.cc

@ -470,12 +470,12 @@ static char* SkipBlackSpace(char* p) {
// static
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
char* copy0 = NewArray<char>(len + 1);
memcpy(copy0, str, len);
ScopedVector<char> copy0(len + 1);
memcpy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
char* copy = SkipWhiteSpace(copy0);
char* copy = SkipWhiteSpace(copy0.start());
// count the number of 'arguments'
int argc = 1; // be compatible with SetFlagsFromCommandLine()
@ -485,7 +485,7 @@ int FlagList::SetFlagsFromString(const char* str, int len) {
}
// allocate argument array
char** argv = NewArray<char*>(argc);
ScopedVector<char*> argv(argc);
// split the flags string into arguments
argc = 1; // be compatible with SetFlagsFromCommandLine()
@ -497,11 +497,7 @@ int FlagList::SetFlagsFromString(const char* str, int len) {
}
// set the flags
int result = SetFlagsFromCommandLine(&argc, argv, false);
// cleanup
DeleteArray(argv);
DeleteArray(copy0);
int result = SetFlagsFromCommandLine(&argc, argv.start(), false);
return result;
}

1
deps/v8/src/frames.h

@ -357,6 +357,7 @@ class StandardFrame: public StackFrame {
private:
friend class StackFrame;
friend class StackFrameIterator;
};

12
deps/v8/src/globals.h

@ -46,6 +46,12 @@ namespace internal {
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
// Some CPU-OS combinations allow unaligned access on ARM. We assume
// that unaligned accesses are not allowed unless the build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif
#elif defined(_MIPS_ARCH_MIPS32R2)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
@ -73,6 +79,12 @@ namespace internal {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM
// Some CPU-OS combinations allow unaligned access on ARM. We assume
// that unaligned accesses are not allowed unless the build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_TARGET_CAN_READ_UNALIGNED 1
#endif
#elif V8_TARGET_ARCH_MIPS
#else
#error Target architecture is not supported by v8

6
deps/v8/src/heap.cc

@ -674,6 +674,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
}
@ -1685,6 +1687,10 @@ bool Heap::CreateInitialObjects() {
if (obj->IsFailure()) return false;
set_non_monomorphic_cache(NumberDictionary::cast(obj));
set_instanceof_cache_function(Smi::FromInt(0));
set_instanceof_cache_map(Smi::FromInt(0));
set_instanceof_cache_answer(Smi::FromInt(0));
CreateFixedStubs();
if (InitializeNumberStringCache()->IsFailure()) return false;

19
deps/v8/src/heap.h

@ -93,6 +93,9 @@ class ZoneScopeInfo;
V(Map, proxy_map, ProxyMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, neander_map, NeanderMap) \
@ -361,6 +364,11 @@ class Heap : public AllStatic {
// Allocates an empty code cache.
static Object* AllocateCodeCache();
// Clear the Instanceof cache (used when a prototype changes).
static void ClearInstanceofCache() {
set_instanceof_cache_function(the_hole_value());
}
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
@ -971,6 +979,8 @@ class Heap : public AllStatic {
static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
static void ClearJSFunctionResultCaches();
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
@ -1171,6 +1181,13 @@ class Heap : public AllStatic {
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
static void CompletelyClearInstanceofCache() {
set_instanceof_cache_map(the_hole_value());
set_instanceof_cache_function(the_hole_value());
}
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
@ -1178,8 +1195,6 @@ class Heap : public AllStatic {
HeapObject* target,
int size);
static void ClearJSFunctionResultCaches();
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record the copy of an object in the NewSpace's statistics.
static void RecordCopiedObject(HeapObject* obj);

276
deps/v8/src/ia32/codegen-ia32.cc

@ -829,14 +829,6 @@ void CodeGenerator::LoadReference(Reference* ref) {
}
void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
ref->set_unloaded();
}
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
@ -1426,6 +1418,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
OverwriteMode overwrite_mode) {
// Copy the type info because left and right may be overwritten.
TypeInfo left_type_info = left->type_info();
TypeInfo right_type_info = right->type_info();
Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
@ -1501,8 +1496,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
(op == Token::DIV) ? eax : edx,
left->reg(),
right->reg(),
left->type_info(),
right->type_info(),
left_type_info,
right_type_info,
overwrite_mode);
if (left->reg().is(right->reg())) {
__ test(left->reg(), Immediate(kSmiTagMask));
@ -1605,18 +1600,18 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
answer.reg(),
left->reg(),
ecx,
left->type_info(),
right->type_info(),
left_type_info,
right_type_info,
overwrite_mode);
Label do_op, left_nonsmi;
// If right is a smi we make a fast case if left is either a smi
// or a heapnumber.
if (CpuFeatures::IsSupported(SSE2) && right->type_info().IsSmi()) {
if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
CpuFeatures::Scope use_sse2(SSE2);
__ mov(answer.reg(), left->reg());
// Fast case - both are actually smis.
if (!left->type_info().IsSmi()) {
if (!left_type_info.IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, &left_nonsmi);
} else {
@ -1640,7 +1635,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
deferred->Branch(negative);
} else {
CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
left->type_info(), right->type_info(), deferred);
left_type_info, right_type_info, deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
@ -1713,11 +1708,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
answer.reg(),
left->reg(),
right->reg(),
left->type_info(),
right->type_info(),
left_type_info,
right_type_info,
overwrite_mode);
CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
left->type_info(), right->type_info(), deferred);
left_type_info, right_type_info, deferred);
__ mov(answer.reg(), left->reg());
switch (op) {
@ -1988,18 +1983,13 @@ void DeferredInlineSmiSub::Generate() {
}
Result CodeGenerator::ConstantSmiBinaryOperation(
BinaryOperation* expr,
Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> value,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
// Generate inline code for a binary operation when one of the
// operands is a constant smi. Consumes the argument "operand".
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
@ -2499,7 +2489,9 @@ void CodeGenerator::Comparison(AstNode* node,
// by reconstituting them on the non-fall-through path.
if (left_side.is_smi()) {
if (FLAG_debug_code) __ AbortIfNotSmi(left_side.reg());
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg());
}
} else {
JumpTarget is_smi;
__ test(left_side.reg(), Immediate(kSmiTagMask));
@ -2528,7 +2520,7 @@ void CodeGenerator::Comparison(AstNode* node,
__ cvtsi2sd(xmm0, Operand(temp.reg()));
temp.Unuse();
}
__ comisd(xmm1, xmm0);
__ ucomisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
@ -2819,11 +2811,7 @@ void CodeGenerator::Comparison(AstNode* node,
// number comparison in the stub if it was inlined.
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
} else {
__ cmp(answer.reg(), 0);
}
answer.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
@ -4750,7 +4738,8 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
@ -5774,11 +5763,66 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// JavaScript examples:
//
// with (obj) foo(1, 2, 3) // foo is in obj
//
// function f() {};
// function g() {
// eval(...);
// f(); // f could be in extension object
// }
// ----------------------------------
// Load the function from the context. Sync the frame so we can
// push the arguments directly into place.
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
Result function;
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
function = LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = var->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
function = allocator()->Allocate();
ASSERT(function.is_valid());
__ mov(function.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
function,
&slow));
JumpTarget push_function_and_receiver;
if (potential_slot->var()->mode() == Variable::CONST) {
__ cmp(function.reg(), Factory::the_hole_value());
push_function_and_receiver.Branch(not_equal, &function);
__ mov(function.reg(), Factory::undefined_value());
}
push_function_and_receiver.Bind(&function);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
}
}
slow.Bind();
// Enter the runtime system to load the function from the context.
// Sync the frame so we can push the arguments directly into
// place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(var->name()));
@ -5795,6 +5839,7 @@ void CodeGenerator::VisitCall(Call* node) {
ASSERT(!allocator()->is_used(edx));
frame_->EmitPush(edx);
done.Bind();
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@ -6582,14 +6627,120 @@ class DeferredSearchCache: public DeferredCode {
virtual void Generate();
private:
Register dst_, cache_, key_;
Register dst_; // on invocation Smi index of finger, on exit
// holds value being looked up.
Register cache_; // instance of JSFunctionResultCache.
Register key_; // key being looked up.
};
// Return a position of the element at |index_as_smi| + |additional_offset|
// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand ArrayElement(Register array,
Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
void DeferredSearchCache::Generate() {
__ push(cache_);
Label first_loop, search_further, second_loop, cache_miss;
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
// Check the cache from finger to start of the cache.
__ bind(&first_loop);
__ sub(Operand(dst_), Immediate(kEntrySizeSmi));
__ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
__ j(less, &search_further);
__ cmp(key_, ArrayElement(cache_, dst_));
__ j(not_equal, &first_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ mov(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&search_further);
// Check the cache from end of cache up to finger.
__ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
__ bind(&second_loop);
__ sub(Operand(dst_), Immediate(kEntrySizeSmi));
// Consider prefetching into some reg.
__ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ j(less_equal, &cache_miss);
__ cmp(key_, ArrayElement(cache_, dst_));
__ j(not_equal, &second_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ mov(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
Handle<Object> receiver(Top::global_context()->global());
__ push(Immediate(receiver));
__ push(key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
// On ia32 function must be in edi.
__ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
ParameterCount expected(1);
__ InvokeFunction(edi, expected, CALL_FUNCTION);
// Find a place to put new cached value into.
Label add_new_entry, update_cache;
__ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
// Possible optimization: cache size is constant for the given cache
// so technically we could use a constant here. However, if we have
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ SmiTag(ebx);
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
__ add(Operand(edx), Immediate(kEntrySizeSmi));
__ cmp(ebx, Operand(edx));
__ j(greater, &update_cache);
// Need to wrap over the cache.
__ mov(edx, Immediate(kEntriesIndexSmi));
__ jmp(&update_cache);
__ bind(&add_new_entry);
__ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
__ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
// Update the cache itself.
// edx holds the index.
__ bind(&update_cache);
__ pop(ebx); // restore the key
__ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
// Store key.
__ mov(ArrayElement(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
// Store value.
__ pop(ecx); // restore the cache.
__ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
__ add(Operand(edx), Immediate(Smi::FromInt(1)));
__ mov(ebx, eax);
__ mov(ArrayElement(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
if (!dst_.is(eax)) {
__ mov(dst_, eax);
}
@ -6631,21 +6782,14 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
cache.reg(),
key.reg());
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
__ cmp(key.reg(), FieldOperand(cache.reg(),
tmp.reg(), // as smi
times_half_pointer_size,
FixedArray::kHeaderSize));
__ mov(tmp.reg(), FieldOperand(cache.reg(),
JSFunctionResultCache::kFingerOffset));
__ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg()));
deferred->Branch(not_equal);
__ mov(tmp.reg(), FieldOperand(cache.reg(),
tmp.reg(), // as smi
times_half_pointer_size,
kPointerSize + FixedArray::kHeaderSize));
__ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1));
deferred->BindExit();
frame_->Push(&tmp);
@ -10958,7 +11102,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ j(not_zero, &runtime);
__ cmp(eax, Operand(ebx));
__ j(above_equal, &runtime);
@ -12128,6 +12272,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// edx is function, eax is map.
// Look up the function and the map in the instanceof cache.
Label miss;
ExternalReference roots_address = ExternalReference::roots_address();
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ ret(2 * kPointerSize);
__ bind(&miss);
__ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
// Check that the function prototype is a JS object.
@ -12140,7 +12300,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(greater, &slow, not_taken);
// Register mapping: eax is object map and ebx is function prototype.
// Register mapping:
// eax is object map.
// edx is function.
// ebx is function prototype.
__ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
__ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
@ -12156,10 +12324,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
__ Set(eax, Immediate(0));
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ ret(2 * kPointerSize);
__ bind(&is_not_instance);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ ret(2 * kPointerSize);
// Slow-case: Go through the JavaScript implementation.

3
deps/v8/src/ia32/codegen-ia32.h

@ -48,7 +48,7 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// by GetValue, TakeValue and SetValue.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
@ -414,7 +414,6 @@ class CodeGenerator: public AstVisitor {
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));

11
deps/v8/src/ia32/macro-assembler-ia32.h

@ -33,6 +33,17 @@
namespace v8 {
namespace internal {
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1
};
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;

26
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1264,16 +1264,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1351,16 +1346,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1379,9 +1369,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
Object* result = generator(this, object, holder, function, name, check);
const int id = function_info->custom_call_generator_id();
Object* result =
CompileCustomCall(id, object, holder, function, name, check);
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) {
return result;
@ -1518,11 +1508,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}

14
deps/v8/src/ic.cc

@ -596,10 +596,16 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif
Map* map = HeapObject::cast(*object)->map();
if (object->IsString()) {
const int offset = String::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
}
Code* target = NULL;
target = Builtins::builtin(Builtins::LoadIC_StringLength);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
StubCache::Set(*name, map, target);
return Smi::FromInt(String::cast(*object)->length());
}
@ -608,9 +614,13 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
Map* map = HeapObject::cast(*object)->map();
const int offset = JSArray::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
StubCache::Set(*name, map, target);
return JSArray::cast(*object)->length();
}

6
deps/v8/src/ic.h

@ -239,6 +239,9 @@ class LoadIC: public IC {
static void GenerateStringLength(MacroAssembler* masm);
static void GenerateFunctionPrototype(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is architecture-dependent, and not
// used on ARM.
@ -265,9 +268,6 @@ class LoadIC: public IC {
static void Clear(Address address, Code* target);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
static bool PatchInlinedLoad(Address address, Object* map, int index);
friend class IC;

7
deps/v8/src/log.cc

@ -1313,9 +1313,8 @@ void Logger::LogCodeObjects() {
void Logger::LogCompiledFunctions() {
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
Handle<SharedFunctionInfo>* sfis =
NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
EnumerateCompiledFunctions(sfis);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
EnumerateCompiledFunctions(sfis.start());
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
@ -1360,8 +1359,6 @@ void Logger::LogCompiledFunctions() {
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
}
DeleteArray(sfis);
}

11
deps/v8/src/macro-assembler.h

@ -50,17 +50,6 @@ enum HandlerType {
};
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1
};
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;

7
deps/v8/src/messages.js

@ -42,6 +42,9 @@ var COMPILATION_TYPE_JSON = 2;
var kVowelSounds = 0;
var kCapitalVowelSounds = 0;
// Matches Messages::kNoLineNumberInfo from v8.h
var kNoLineNumberInfo = 0;
// If this object gets passed to an error constructor the error will
// get an accessor for .message that constructs a descriptive error
// message on access.
@ -203,9 +206,9 @@ function FormatMessage(message) {
function GetLineNumber(message) {
if (message.startPos == -1) return -1;
if (message.startPos == -1) return kNoLineNumberInfo;
var location = message.script.locationFromPosition(message.startPos, true);
if (location == null) return -1;
if (location == null) return kNoLineNumberInfo;
return location.line + 1;
}

8
deps/v8/src/objects-inl.h

@ -2530,7 +2530,13 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
bool SharedFunctionInfo::HasCustomCallGenerator() {
return function_data()->IsProxy();
return function_data()->IsSmi();
}
int SharedFunctionInfo::custom_call_generator_id() {
ASSERT(HasCustomCallGenerator());
return Smi::cast(function_data())->value();
}

19
deps/v8/src/objects.cc

@ -682,11 +682,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
SmartPointer<uc16> smart_chars(NewArray<uc16>(this->length()));
String::WriteToFlat(this, *smart_chars, 0, this->length());
ASSERT(memcmp(*smart_chars,
ScopedVector<uc16> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
ASSERT(memcmp(smart_chars.start(),
resource->data(),
resource->length() * sizeof(**smart_chars)) == 0);
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
@ -728,11 +728,11 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
SmartPointer<char> smart_chars(NewArray<char>(this->length()));
String::WriteToFlat(this, *smart_chars, 0, this->length());
ASSERT(memcmp(*smart_chars,
ScopedVector<char> smart_chars(this->length());
String::WriteToFlat(this, smart_chars.start(), 0, this->length());
ASSERT(memcmp(smart_chars.start(),
resource->data(),
resource->length()*sizeof(**smart_chars)) == 0);
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
@ -4900,6 +4900,7 @@ Object* JSFunction::SetInstancePrototype(Object* value) {
// prototype is put into the initial map where it belongs.
set_prototype_or_initial_map(value);
}
Heap::ClearInstanceofCache();
return value;
}
@ -5601,6 +5602,8 @@ Object* JSObject::SetPrototype(Object* value,
Map::cast(new_map)->set_prototype(value);
real_receiver->set_map(Map::cast(new_map));
Heap::ClearInstanceofCache();
return value;
}

7
deps/v8/src/objects.h

@ -2328,6 +2328,10 @@ class JSFunctionResultCache: public FixedArray {
static const int kEntrySize = 2; // key + value
static const int kFactoryOffset = kHeaderSize;
static const int kFingerOffset = kFactoryOffset + kPointerSize;
static const int kCacheSizeOffset = kFingerOffset + kPointerSize;
inline void MakeZeroSize();
inline void Clear();
@ -3200,7 +3204,7 @@ class SharedFunctionInfo: public HeapObject {
// [function data]: This field holds some additional data for function.
// Currently it either has FunctionTemplateInfo to make benefit the API
// or Proxy wrapping CustomCallGenerator.
// or Smi identifying a custom call generator.
// In the long run we don't want all functions to have this field but
// we can fix that when we have a better model for storing hidden data
// on objects.
@ -3209,6 +3213,7 @@ class SharedFunctionInfo: public HeapObject {
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasCustomCallGenerator();
inline int custom_call_generator_id();
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)

9
deps/v8/src/platform-freebsd.cc

@ -286,14 +286,12 @@ void OS::LogSharedLibraryAddresses() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
ScopedVector<void*> addresses(frames_size);
int frames_count = backtrace(addresses, frames_size);
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
char** symbols = backtrace_symbols(addresses, frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
@ -308,7 +306,6 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;

9
deps/v8/src/platform-linux.cc

@ -376,14 +376,12 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
#ifdef __GLIBC__
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
ScopedVector<void*> addresses(frames_size);
int frames_count = backtrace(addresses, frames_size);
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
char** symbols = backtrace_symbols(addresses.start(), frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
@ -398,7 +396,6 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;

10
deps/v8/src/platform-macos.cc

@ -283,13 +283,12 @@ int OS::StackWalk(Vector<StackFrame> frames) {
return 0;
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size);
ScopedVector<void*> addresses(frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols = backtrace_symbols(addresses.start(), frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
@ -305,7 +304,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;

9
deps/v8/src/platform-solaris.cc

@ -233,14 +233,12 @@ void OS::LogSharedLibraryAddresses() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
ScopedVector<void*> addresses(frames_size);
int frames_count = backtrace(addresses, frames_size);
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
char** symbols = backtrace_symbols(addresses.start(), frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
@ -255,7 +253,6 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;

20
deps/v8/src/platform-win32.cc

@ -1249,16 +1249,16 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// Try to locate a symbol for this frame.
DWORD64 symbol_displacement;
IMAGEHLP_SYMBOL64* symbol = NULL;
symbol = NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen);
if (!symbol) return kStackWalkError; // Out of memory.
memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
symbol->MaxNameLength = kStackWalkMaxNameLen;
SmartPointer<IMAGEHLP_SYMBOL64> symbol(
NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
if (symbol.is_empty()) return kStackWalkError; // Out of memory.
memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
(*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
(*symbol)->MaxNameLength = kStackWalkMaxNameLen;
ok = _SymGetSymFromAddr64(process_handle, // hProcess
stack_frame.AddrPC.Offset, // Address
&symbol_displacement, // Displacement
symbol); // Symbol
*symbol); // Symbol
if (ok) {
// Try to locate more source information for the symbol.
IMAGEHLP_LINE64 Line;
@ -1276,13 +1276,13 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
SNPrintF(MutableCStrVector(frames[frames_count].text,
kStackWalkMaxTextLen),
"%s %s:%d:%d",
symbol->Name, Line.FileName, Line.LineNumber,
(*symbol)->Name, Line.FileName, Line.LineNumber,
line_displacement);
} else {
SNPrintF(MutableCStrVector(frames[frames_count].text,
kStackWalkMaxTextLen),
"%s",
symbol->Name);
(*symbol)->Name);
}
// Make sure line termination is in place.
frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
@ -1294,11 +1294,9 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// module will never be found).
int err = GetLastError();
if (err != ERROR_MOD_NOT_FOUND) {
DeleteArray(symbol);
break;
}
}
DeleteArray(symbol);
frames_count++;
}

41
deps/v8/src/runtime.cc

@ -1325,18 +1325,9 @@ static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) {
}
static void SetCustomCallGenerator(Handle<JSFunction> function,
ExternalReference* generator) {
if (function->shared()->function_data()->IsUndefined()) {
function->shared()->set_function_data(*FromCData(generator->address()));
}
}
static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name,
ExternalReference* generator = NULL) {
Builtins::Name builtin_name) {
Handle<String> key = Factory::LookupAsciiSymbol(name);
Handle<Code> code(Builtins::builtin(builtin_name));
Handle<JSFunction> optimized = Factory::NewFunction(key,
@ -1345,44 +1336,18 @@ static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
code,
false);
optimized->shared()->DontAdaptArguments();
if (generator != NULL) {
SetCustomCallGenerator(optimized, generator);
}
SetProperty(holder, key, optimized, NONE);
return optimized;
}
Object* CompileArrayPushCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check) {
return compiler->CompileArrayPushCall(object, holder, function, name, check);
}
Object* CompileArrayPopCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check) {
return compiler->CompileArrayPopCall(object, holder, function, name, check);
}
static Object* Runtime_SpecialArrayFunctions(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, holder, 0);
ExternalReference pop = ExternalReference::compile_array_pop_call();
ExternalReference push = ExternalReference::compile_array_push_call();
InstallBuiltin(holder, "pop", Builtins::ArrayPop, &pop);
InstallBuiltin(holder, "push", Builtins::ArrayPush, &push);
InstallBuiltin(holder, "pop", Builtins::ArrayPop);
InstallBuiltin(holder, "push", Builtins::ArrayPush);
InstallBuiltin(holder, "shift", Builtins::ArrayShift);
InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
InstallBuiltin(holder, "slice", Builtins::ArraySlice);

22
deps/v8/src/serialize.cc

@ -414,44 +414,36 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
19,
"compare_doubles");
Add(ExternalReference::compile_array_pop_call().address(),
UNCLASSIFIED,
20,
"compile_array_pop");
Add(ExternalReference::compile_array_push_call().address(),
UNCLASSIFIED,
21,
"compile_array_push");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
22,
20,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
23,
21,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
24,
22,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
25,
23,
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
26,
24,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
27,
25,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address().address(),
UNCLASSIFIED,
28,
26,
"TranscendentalCache::caches()");
}

32
deps/v8/src/stub-cache.cc

@ -1142,6 +1142,29 @@ Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
}
Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* object,
JSObject* holder,
JSFunction* function,
String* fname,
CheckType check) {
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
switch (generator_id) {
#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
case k##name##CallGenerator: \
return CallStubCompiler::Compile##name##Call(object, \
holder, \
function, \
fname, \
check);
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
}
UNREACHABLE();
return Heap::undefined_value();
}
Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
@ -1152,6 +1175,15 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
}
Object* CallStubCompiler::GetCode(JSFunction* function) {
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* ConstructStubCompiler::GetCode() {
Code::Flags flags = Code::ComputeFlags(Code::STUB);
Object* result = GetCodeWithFlags(flags, "ConstructStub");

66
deps/v8/src/stub-cache.h

@ -559,8 +559,30 @@ class KeyedStoreStubCompiler: public StubCompiler {
};
// List of functions with custom constant call IC stubs.
//
// Installation of custom call generators for the selected builtins is
// handled by the bootstrapper.
//
// Each entry has a name of a global function (lowercased), a name of
// a builtin function on its instance prototype (the one the generator
// is set for), and a name of a generator itself (used to build ids
// and generator function names).
#define CUSTOM_CALL_IC_GENERATORS(V) \
V(array, push, ArrayPush) \
V(array, pop, ArrayPop)
class CallStubCompiler: public StubCompiler {
public:
enum {
#define DECLARE_CALL_GENERATOR_ID(ignored1, ignored2, name) \
k##name##CallGenerator,
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
#undef DECLARE_CALL_GENERATOR_ID
kNumCallGenerators
};
CallStubCompiler(int argc, InLoopFlag in_loop)
: arguments_(argc), in_loop_(in_loop) { }
@ -582,17 +604,22 @@ class CallStubCompiler: public StubCompiler {
JSFunction* function,
String* name);
Object* CompileArrayPushCall(Object* object,
// Compiles a custom call constant IC using the generator with given id.
Object* CompileCustomCall(int generator_id,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check);
Object* CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
Object* Compile##name##Call(Object* object, \
JSObject* holder, \
JSFunction* function, \
String* fname, \
CheckType check);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
private:
const ParameterCount arguments_;
@ -601,6 +628,10 @@ class CallStubCompiler: public StubCompiler {
const ParameterCount& arguments() { return arguments_; }
Object* GetCode(PropertyType type, String* name);
// Convenience function. Calls GetCode above passing
// CONSTANT_FUNCTION type and the name of the given function.
Object* GetCode(JSFunction* function);
};
@ -663,31 +694,6 @@ class CallOptimization BASE_EMBEDDED {
CallHandlerInfo* api_call_info_;
};
typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check);
Object* CompileArrayPushCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check);
Object* CompileArrayPopCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check);
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_

87
deps/v8/src/top.cc

@ -337,7 +337,7 @@ static int stack_trace_nesting_level = 0;
static StringStream* incomplete_message = NULL;
Handle<String> Top::StackTrace() {
Handle<String> Top::StackTraceString() {
if (stack_trace_nesting_level == 0) {
stack_trace_nesting_level++;
HeapStringAllocator allocator;
@ -365,6 +365,89 @@ Handle<String> Top::StackTrace() {
}
Local<StackTrace> Top::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
v8::HandleScope scope;
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stackTrace = Factory::NewJSArray(frame_limit);
FixedArray* frames = FixedArray::cast(stackTrace->elements());
Handle<String> column_key = Factory::LookupAsciiSymbol("column");
Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
Handle<String> script_key = Factory::LookupAsciiSymbol("scriptName");
Handle<String> function_key = Factory::LookupAsciiSymbol("functionName");
Handle<String> eval_key = Factory::LookupAsciiSymbol("isEval");
Handle<String> constructor_key = Factory::LookupAsciiSymbol("isConstructor");
StackTraceFrameIterator it;
int frames_seen = 0;
while (!it.done() && (frames_seen < limit)) {
// Create a JSObject to hold the information for the StackFrame.
Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
JavaScriptFrame* frame = it.frame();
JSFunction* fun(JSFunction::cast(frame->function()));
Script* script = Script::cast(fun->shared()->script());
if (options & StackTrace::kLineNumber) {
int script_line_offset = script->line_offset()->value();
int position = frame->code()->SourcePosition(frame->pc());
int line_number = GetScriptLineNumber(Handle<Script>(script), position);
// line_number is already shifted by the script_line_offset.
int relative_line_number = line_number - script_line_offset;
if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
int start = (relative_line_number == 0) ? 0 :
Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
int column_offset = position - start;
if (relative_line_number == 0) {
// For the case where the code is on the same line as the script tag.
column_offset += script->column_offset()->value();
}
SetProperty(stackFrame, column_key,
Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
}
SetProperty(stackFrame, line_key,
Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name());
SetProperty(stackFrame, script_key, script_name, NONE);
}
if (options & StackTrace::kFunctionName) {
Handle<Object> fun_name(fun->shared()->name());
if (fun_name->ToBoolean()->IsFalse()) {
fun_name = Handle<Object>(fun->shared()->inferred_name());
}
SetProperty(stackFrame, function_key, fun_name, NONE);
}
if (options & StackTrace::kIsEval) {
int type = Smi::cast(script->compilation_type())->value();
Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
Factory::true_value() : Factory::false_value();
SetProperty(stackFrame, eval_key, is_eval, NONE);
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frame->IsConstructor()) ?
Factory::true_value() : Factory::false_value();
SetProperty(stackFrame, constructor_key, is_constructor, NONE);
}
frames->set(frames_seen, *stackFrame);
frames_seen++;
it.Advance();
}
stackTrace->set_length(Smi::FromInt(frames_seen));
return scope.Close(Utils::StackTraceToLocal(stackTrace));
}
void Top::PrintStack() {
if (stack_trace_nesting_level == 0) {
stack_trace_nesting_level++;
@ -786,7 +869,7 @@ void Top::DoThrow(Object* exception,
// traces while the bootstrapper is active since the infrastructure
// may not have been properly initialized.
Handle<String> stack_trace;
if (FLAG_trace_exception) stack_trace = StackTrace();
if (FLAG_trace_exception) stack_trace = StackTraceString();
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
location, HandleVector<Object>(&exception_handle, 1), stack_trace);
}

5
deps/v8/src/top.h

@ -265,7 +265,10 @@ class Top {
static void PrintStackTrace(FILE* out, char* thread_data);
static void PrintStack(StringStream* accumulator);
static void PrintStack();
static Handle<String> StackTrace();
static Handle<String> StackTraceString();
static Local<StackTrace> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be

3
deps/v8/src/utils.h

@ -412,6 +412,9 @@ class ScopedVector : public Vector<T> {
~ScopedVector() {
DeleteArray(this->start());
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
};

3
deps/v8/src/v8natives.js

@ -660,7 +660,8 @@ function ObjectGetOwnPropertyNames(obj) {
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
// IS_OBJECT will return true on null covering that case.
if (!IS_OBJECT(proto) && !IS_FUNCTION(proto)) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 8
#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

2
deps/v8/src/x64/assembler-x64-inl.h

@ -34,7 +34,7 @@
namespace v8 {
namespace internal {
Condition NegateCondition(Condition cc) {
inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}

338
deps/v8/src/x64/codegen-x64.cc

@ -360,7 +360,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
// Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@ -2866,9 +2866,63 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// JavaScript examples:
//
// with (obj) foo(1, 2, 3) // foo is in obj
//
// function f() {};
// function g() {
// eval(...);
// f(); // f could be in extension object
// }
// ----------------------------------
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
Result function;
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
function = LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = var->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
function = allocator()->Allocate();
ASSERT(function.is_valid());
__ movq(function.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
function,
&slow));
JumpTarget push_function_and_receiver;
if (potential_slot->var()->mode() == Variable::CONST) {
__ CompareRoot(function.reg(), Heap::kTheHoleValueRootIndex);
push_function_and_receiver.Branch(not_equal, &function);
__ LoadRoot(function.reg(), Heap::kUndefinedValueRootIndex);
}
push_function_and_receiver.Bind(&function);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
}
}
slow.Bind();
// Load the function from the context. Sync the frame so we can
// push the arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
@ -2887,6 +2941,7 @@ void CodeGenerator::VisitCall(Call* node) {
ASSERT(!allocator()->is_used(rdx));
frame_->EmitPush(rdx);
done.Bind();
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@ -4392,7 +4447,7 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ Move(FieldOperand(rcx, HeapObject::kMapOffset),
Factory::fixed_array_map());
// Set length.
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
__ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
// Fill contents of fixed-array with the-hole.
__ Move(rdx, Factory::the_hole_value());
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
@ -4421,22 +4476,142 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
: dst_(dst), cache_(cache), key_(key) {
DeferredSearchCache(Register dst,
Register cache,
Register key,
Register scratch)
: dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
set_comment("[ DeferredSearchCache");
}
virtual void Generate();
private:
Register dst_, cache_, key_;
Register dst_; // on invocation index of finger (as Smi), on exit
// holds value being looked up.
Register cache_; // instance of JSFunctionResultCache.
Register key_; // key being looked up.
Register scratch_;
};
// Return a position of the element at |index| + |additional_offset|
// in FixedArray pointer to which is held in |array|. |index| is int32.
static Operand ArrayElement(Register array,
Register index,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index, times_pointer_size, offset);
}
void DeferredSearchCache::Generate() {
__ push(cache_);
Label first_loop, search_further, second_loop, cache_miss;
Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
__ SmiToInteger32(dst_, dst_);
// Check the cache from finger to start of the cache.
__ bind(&first_loop);
__ subq(dst_, kEntrySizeImm);
__ cmpq(dst_, kEntriesIndexImm);
__ j(less, &search_further);
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop);
__ Integer32ToSmi(scratch_, dst_);
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&search_further);
// Check the cache from end of cache up to finger.
__ movq(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
__ movq(scratch_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(dst_, dst_);
__ SmiToInteger32(scratch_, scratch_);
__ bind(&second_loop);
__ subq(dst_, kEntrySizeImm);
__ cmpq(dst_, scratch_);
__ j(less_equal, &cache_miss);
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop);
__ Integer32ToSmi(scratch_, dst_);
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&cache_miss);
__ push(cache_); // store a reference to cache
__ push(key_); // store a key
Handle<Object> receiver(Top::global_context()->global());
__ Push(receiver);
__ push(key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
// On x64 function must be in rdi.
__ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
ParameterCount expected(1);
__ InvokeFunction(rdi, expected, CALL_FUNCTION);
// Find a place to put new cached value into.
Label add_new_entry, update_cache;
__ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
// Possible optimization: cache size is constant for the given cache
// so technically we could use a constant here. However, if we have
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
__ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ SmiToInteger32(r9, r9);
__ cmpq(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rdx, rdx);
__ addq(rdx, kEntrySizeImm);
Label forward;
__ cmpq(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
__ movq(rdx, kEntriesIndexImm);
__ bind(&forward);
__ Integer32ToSmi(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
// r9 holds cache size as int.
__ movq(rdx, r9);
__ Integer32ToSmi(r9, r9);
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself.
// rdx holds the index as int.
// r9 holds the index as smi.
__ bind(&update_cache);
__ pop(rbx); // restore the key
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key.
__ movq(ArrayElement(rcx, rdx), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
// Store value.
__ pop(rcx); // restore the cache.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1));
__ movq(r9, rdx);
__ SmiToInteger32(rdx, rdx);
__ movq(rbx, rax);
__ movq(ArrayElement(rcx, rdx), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
if (!dst_.is(rax)) {
__ movq(dst_, rax);
}
@ -4474,27 +4649,28 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
Result tmp = allocator()->Allocate();
ASSERT(tmp.is_valid());
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
cache.reg(),
key.reg());
key.reg(),
scratch.reg());
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
SmiIndex index =
masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
__ cmpq(key.reg(), FieldOperand(cache.reg(),
index.reg,
index.scale,
index.reg, index.scale,
FixedArray::kHeaderSize));
// Do NOT alter index.reg or tmp.reg() before cmpq below.
deferred->Branch(not_equal);
__ movq(tmp.reg(), FieldOperand(cache.reg(),
index.reg,
index.scale,
kPointerSize + FixedArray::kHeaderSize));
index.reg, index.scale,
FixedArray::kHeaderSize + kPointerSize));
deferred->BindExit();
frame_->Push(&tmp);
@ -5178,7 +5354,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
@ -5683,6 +5860,11 @@ void CodeGenerator::Comparison(AstNode* node,
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
if (left_side.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg(), "Non-smi value inferred as smi.");
}
} else {
Condition left_is_smi = masm_->CheckSmi(left_side.reg());
is_smi.Branch(left_is_smi);
@ -5725,6 +5907,8 @@ void CodeGenerator::Comparison(AstNode* node,
dest->false_target()->Jump();
is_smi.Bind();
}
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
@ -6404,10 +6588,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
Handle<Object> value,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
// Generate inline code for a binary operation when one of the
// operands is a constant smi. Consumes the argument "operand".
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
@ -6528,43 +6710,37 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::SHL:
if (reversed) {
// Move operand into rcx and also into a second register.
// If operand is already in a register, take advantage of that.
// This lets us modify rcx, but still bail out to deferred code.
Result right;
Result right_copy_in_rcx;
TypeInfo right_type_info = operand->type_info();
operand->ToRegister();
// We need rcx to be available to hold operand, and to be spilled.
// SmiShiftLeft implicitly modifies rcx.
if (operand->reg().is(rcx)) {
right = allocator()->Allocate();
__ movq(right.reg(), rcx);
frame_->Spill(rcx);
right_copy_in_rcx = *operand;
frame_->Spill(operand->reg());
answer = allocator()->Allocate();
} else {
right_copy_in_rcx = allocator()->Allocate(rcx);
__ movq(rcx, operand->reg());
right = *operand;
Result rcx_reg = allocator()->Allocate(rcx);
// answer must not be rcx.
answer = allocator()->Allocate();
// rcx_reg goes out of scope.
}
operand->Unuse();
answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
smi_value,
right.reg(),
operand->reg(),
overwrite_mode);
__ movq(answer.reg(), Immediate(int_value));
__ SmiToInteger32(rcx, rcx);
if (!right_type_info.IsSmi()) {
Condition is_smi = masm_->CheckSmi(right.reg());
if (!operand->type_info().IsSmi()) {
Condition is_smi = masm_->CheckSmi(operand->reg());
deferred->Branch(NegateCondition(is_smi));
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(right.reg(),
__ AbortIfNotSmi(operand->reg(),
"Static type info claims non-smi is smi in (const SHL smi).");
}
__ shl_cl(answer.reg());
__ Integer32ToSmi(answer.reg(), answer.reg());
__ Move(answer.reg(), smi_value);
__ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
operand->Unuse();
deferred->BindExit();
} else {
@ -6597,8 +6773,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftLeftConstant(answer.reg(),
operand->reg(),
shift_value,
deferred->entry_label());
shift_value);
deferred->BindExit();
operand->Unuse();
}
@ -6685,10 +6860,19 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
return answer;
}
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
OverwriteMode overwrite_mode) {
// Copy the type info because left and right may be overwritten.
TypeInfo left_type_info = left->type_info();
TypeInfo right_type_info = right->type_info();
USE(left_type_info);
USE(right_type_info);
// TODO(X64): Use type information in calculations.
Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
@ -6813,9 +6997,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->reg(),
rcx,
overwrite_mode);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), rcx);
__ JumpIfNotSmi(answer.reg(), deferred->entry_label());
__ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
// Perform the operation.
switch (op) {
@ -6832,8 +7014,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::SHL: {
__ SmiShiftLeft(answer.reg(),
left->reg(),
rcx,
deferred->entry_label());
rcx);
break;
}
default:
@ -8795,6 +8976,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[1] : function pointer
// rsp[2] : value
// Returns a bitwise zero to indicate that the value
// is and instance of the function and anything else to
// indicate that the value is not an instance.
// Get the object - go slow case if it's a smi.
Label slow;
@ -8809,6 +8993,18 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the prototype of the function.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
// rdx is function, rax is map.
// Look up the function and the map in the instanceof cache.
Label miss;
__ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ j(not_equal, &miss);
__ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
__ j(not_equal, &miss);
__ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(2 * kPointerSize);
__ bind(&miss);
__ TryGetFunctionPrototype(rdx, rbx, &slow);
// Check that the function prototype is a JS object.
@ -8818,7 +9014,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
__ j(above, &slow);
// Register mapping: rax is object map and rbx is function prototype.
// Register mapping:
// rax is object map.
// rdx is function.
// rbx is function prototype.
__ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
__ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
@ -8828,6 +9030,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ cmpq(rcx, rbx);
__ j(equal, &is_instance);
__ cmpq(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance);
__ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
@ -8835,10 +9039,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
__ xorl(rax, rax);
// Store bitwise zero in the cache. This is a Smi in GC terms.
ASSERT_EQ(0, kSmiTag);
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(2 * kPointerSize);
__ bind(&is_not_instance);
__ movl(rax, Immediate(1));
// We have to store a non-zero value in the cache.
__ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(2 * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@ -8930,7 +9138,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
__ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
// Copy the fixed array slots.
Label loop;
@ -9860,13 +10068,28 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ movq(left, Operand(rsp, 2 * kPointerSize));
}
// 2. Smi check both operands. Skip the check for OR as it is better combined
// with the actual operation.
Label not_smis;
// 2. Smi check both operands.
if (static_operands_type_.IsSmi()) {
// Skip smi check if we know that both arguments are smis.
if (FLAG_debug_code) {
__ AbortIfNotSmi(left, "Static type check claimed non-smi is smi.");
__ AbortIfNotSmi(right, "Static type check claimed non-smi is smi.");
}
if (op_ == Token::BIT_OR) {
// Handle OR here, since we do extra smi-checking in the or code below.
__ SmiOr(right, right, left);
GenerateReturn(masm);
return;
}
} else {
if (op_ != Token::BIT_OR) {
// Skip the check for OR as it is better combined with the
// actual operation.
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
}
// 3. Operands are both smis (except for OR), perform the operation leaving
// the result in rax and check the result if necessary.
@ -9929,7 +10152,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ SmiShiftLogicalRight(left, left, right, slow);
break;
case Token::SHL:
__ SmiShiftLeft(left, left, right, slow);
__ SmiShiftLeft(left, left, right);
break;
default:
UNREACHABLE();
@ -9953,6 +10176,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SUB:
case Token::MUL:
case Token::DIV: {
ASSERT(use_fp_on_smis.is_linked());
__ bind(&use_fp_on_smis);
if (op_ == Token::DIV) {
__ movq(rdx, rax);

11
deps/v8/src/x64/macro-assembler-x64.cc

@ -50,6 +50,11 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
}
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
push(Operand(kRootRegister, index << kPointerSizeLog2));
}
@ -1227,8 +1232,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
int shift_value) {
if (!dst.is(src)) {
movq(dst, src);
}
@ -1240,8 +1244,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
Register src2) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag shift amount.

18
deps/v8/src/x64/macro-assembler-x64.h

@ -33,6 +33,17 @@
namespace v8 {
namespace internal {
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1
};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
@ -62,6 +73,7 @@ class MacroAssembler: public Assembler {
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
void StoreRoot(Register source, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// GC Support
@ -374,8 +386,7 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
int shift_value);
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
@ -388,8 +399,7 @@ class MacroAssembler: public Assembler {
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
Register src2);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.

26
deps/v8/src/x64/stub-cache-x64.cc

@ -870,9 +870,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
Object* result = generator(this, object, holder, function, name, check);
const int id = function_info->custom_call_generator_id();
Object* result =
CompileCustomCall(id, object, holder, function, name, check);
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) {
return result;
@ -1007,11 +1007,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1198,7 +1194,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ addq(FieldOperand(rbx, FixedArray::kLengthOffset),
__ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
@ -1219,11 +1215,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}
@ -1308,11 +1300,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
return GetCode(function);
}

2
deps/v8/test/cctest/SConscript

@ -34,6 +34,7 @@ Import('context object_files')
SOURCES = {
'all': [
'gay-fixed.cc',
'gay-shortest.cc',
'test-accessors.cc',
'test-alloc.cc',
@ -49,6 +50,7 @@ SOURCES = {
'test-diy-fp.cc',
'test-double.cc',
'test-fast-dtoa.cc',
'test-fixed-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
'test-hashmap.cc',

100049
deps/v8/test/cctest/gay-fixed.cc

File diff suppressed because it is too large

47
deps/v8/test/cctest/gay-fixed.h

@ -0,0 +1,47 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GAY_FIXED_H_
#define GAY_FIXED_H_
namespace v8 {
namespace internal {
struct PrecomputedFixed {
double v;
int number_digits;
const char* representation;
int decimal_point;
};
// Returns precomputed values of dtoa. The strings have been generated using
// Gay's dtoa in mode "fixed".
Vector<const PrecomputedFixed> PrecomputedFixedRepresentations();
} } // namespace v8::internal
#endif // GAY_FIXED_H_

10
deps/v8/test/cctest/gay-shortest.cc

@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
static const GayShortest kShortestTestNumbers[] = {
static const PrecomputedShortest kShortestTestNumbers[] = {
{1.3252057186783201350530603e-106, "13252057186783201", -105},
{1.6899223998841386493367055e-33, "16899223998841386", -32},
{1.0077972445720390730768089e+138, "1007797244572039", 139},
@ -100040,9 +100040,11 @@ static const GayShortest kShortestTestNumbers[] = {
};
Vector<const GayShortest> PrecomputedShortestRepresentations() {
int number_elements = sizeof(kShortestTestNumbers) / sizeof(GayShortest);
return Vector<const GayShortest>(kShortestTestNumbers, number_elements);
Vector<const PrecomputedShortest> PrecomputedShortestRepresentations() {
int number_elements =
sizeof(kShortestTestNumbers) / sizeof(PrecomputedShortest);
return Vector<const PrecomputedShortest>(kShortestTestNumbers,
number_elements);
}
} } // namespace v8::internal

4
deps/v8/test/cctest/gay-shortest.h

@ -31,13 +31,13 @@
namespace v8 {
namespace internal {
struct GayShortest {
struct PrecomputedShortest {
double v;
const char* representation;
int decimal_point;
};
Vector<const GayShortest> PrecomputedShortestRepresentations();
Vector<const PrecomputedShortest> PrecomputedShortestRepresentations();
} } // namespace v8::internal

231
deps/v8/test/cctest/test-api.cc

@ -9584,6 +9584,120 @@ THREADED_TEST(StackTrace) {
}
// Checks that a StackFrame has certain expected values.
void checkStackFrame(const char* expected_script_name,
const char* expected_func_name, int expected_line_number,
int expected_column, bool is_eval, bool is_constructor,
v8::Handle<v8::StackFrame> frame) {
v8::HandleScope scope;
v8::String::Utf8Value func_name(frame->GetFunctionName());
v8::String::Utf8Value script_name(frame->GetScriptName());
if (*script_name == NULL) {
// The situation where there is no associated script, like for evals.
CHECK(expected_script_name == NULL);
} else {
CHECK(strstr(*script_name, expected_script_name) != NULL);
}
CHECK(strstr(*func_name, expected_func_name) != NULL);
CHECK_EQ(expected_line_number, frame->GetLineNumber());
CHECK_EQ(expected_column, frame->GetColumn());
CHECK_EQ(is_eval, frame->IsEval());
CHECK_EQ(is_constructor, frame->IsConstructor());
}
v8::Handle<Value> AnalyzeStackInNativeCode(const v8::Arguments& args) {
v8::HandleScope scope;
const char* origin = "capture-stack-trace-test";
const int kOverviewTest = 1;
const int kDetailedTest = 2;
ASSERT(args.Length() == 1);
int testGroup = args[0]->Int32Value();
if (testGroup == kOverviewTest) {
v8::Handle<v8::StackTrace> stackTrace =
v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bar", 2, 10, false, false,
stackTrace->GetFrame(0));
checkStackFrame(origin, "foo", 6, 3, false, false,
stackTrace->GetFrame(1));
checkStackFrame(NULL, "", 1, 1, false, false,
stackTrace->GetFrame(2));
// The last frame is an anonymous function that has the initial call.
checkStackFrame(origin, "", 8, 7, false, false,
stackTrace->GetFrame(3));
CHECK(stackTrace->AsArray()->IsArray());
} else if (testGroup == kDetailedTest) {
v8::Handle<v8::StackTrace> stackTrace =
v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bat", 4, 22, false, false,
stackTrace->GetFrame(0));
checkStackFrame(origin, "baz", 8, 3, false, true,
stackTrace->GetFrame(1));
checkStackFrame(NULL, "", 1, 1, true, false,
stackTrace->GetFrame(2));
// The last frame is an anonymous function that has the initial call to foo.
checkStackFrame(origin, "", 10, 1, false, false,
stackTrace->GetFrame(3));
CHECK(stackTrace->AsArray()->IsArray());
}
return v8::Undefined();
}
// Tests the C++ StackTrace API.
THREADED_TEST(CaptureStackTrace) {
v8::HandleScope scope;
v8::Handle<v8::String> origin = v8::String::New("capture-stack-trace-test");
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeStackInNativeCode"),
v8::FunctionTemplate::New(AnalyzeStackInNativeCode));
LocalContext context(0, templ);
// Test getting OVERVIEW information. Should ignore information that is not
// script name, function name, line number, and column offset.
const char *overview_source =
"function bar() {\n"
" var y; AnalyzeStackInNativeCode(1);\n"
"}\n"
"function foo() {\n"
"\n"
" bar();\n"
"}\n"
"var x;eval('new foo();');";
v8::Handle<v8::String> overview_src = v8::String::New(overview_source);
v8::Handle<Value> overview_result =
v8::Script::New(overview_src, origin)->Run();
ASSERT(!overview_result.IsEmpty());
ASSERT(overview_result->IsObject());
// Test getting DETAILED information.
const char *detailed_source =
"function bat() {AnalyzeStackInNativeCode(2);\n"
"}\n"
"\n"
"function baz() {\n"
" bat();\n"
"}\n"
"eval('new baz();');";
v8::Handle<v8::String> detailed_src = v8::String::New(detailed_source);
// Make the script using a non-zero line and column offset.
v8::Handle<v8::Integer> line_offset = v8::Integer::New(3);
v8::Handle<v8::Integer> column_offset = v8::Integer::New(5);
v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
v8::Handle<v8::Script> detailed_script(
v8::Script::New(detailed_src, &detailed_origin));
v8::Handle<Value> detailed_result = detailed_script->Run();
ASSERT(!detailed_result.IsEmpty());
ASSERT(detailed_result->IsObject());
}
// Test that idle notification can be handled and eventually returns true.
THREADED_TEST(IdleNotification) {
bool rv = false;
@ -10155,3 +10269,120 @@ TEST(GCCallbacks) {
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
}
THREADED_TEST(AddToJSFunctionResultCache) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope;
LocalContext context;
const char* code =
"(function() {"
" var key0 = 'a';"
" var key1 = 'b';"
" var r0 = %_GetFromCache(0, key0);"
" var r1 = %_GetFromCache(0, key1);"
" var r0_ = %_GetFromCache(0, key0);"
" if (r0 !== r0_)"
" return 'Different results for ' + key0 + ': ' + r0 + ' vs. ' + r0_;"
" var r1_ = %_GetFromCache(0, key1);"
" if (r1 !== r1_)"
" return 'Different results for ' + key1 + ': ' + r1 + ' vs. ' + r1_;"
" return 'PASSED';"
"})()";
v8::internal::Heap::ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
static const int k0CacheSize = 16;
THREADED_TEST(FillJSFunctionResultCache) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope;
LocalContext context;
const char* code =
"(function() {"
" var k = 'a';"
" var r = %_GetFromCache(0, k);"
" for (var i = 0; i < 16; i++) {"
" %_GetFromCache(0, 'a' + i);"
" };"
" if (r === %_GetFromCache(0, k))"
" return 'FAILED: k0CacheSize is too small';"
" return 'PASSED';"
"})()";
v8::internal::Heap::ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
THREADED_TEST(RoundRobinGetFromCache) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope;
LocalContext context;
const char* code =
"(function() {"
" var keys = [];"
" for (var i = 0; i < 16; i++) keys.push(i);"
" var values = [];"
" for (var i = 0; i < 16; i++) values[i] = %_GetFromCache(0, keys[i]);"
" for (var i = 0; i < 16; i++) {"
" var v = %_GetFromCache(0, keys[i]);"
" if (v !== values[i])"
" return 'Wrong value for ' + "
" keys[i] + ': ' + v + ' vs. ' + values[i];"
" };"
" return 'PASSED';"
"})()";
v8::internal::Heap::ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
THREADED_TEST(ReverseGetFromCache) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope;
LocalContext context;
const char* code =
"(function() {"
" var keys = [];"
" for (var i = 0; i < 16; i++) keys.push(i);"
" var values = [];"
" for (var i = 0; i < 16; i++) values[i] = %_GetFromCache(0, keys[i]);"
" for (var i = 15; i >= 16; i--) {"
" var v = %_GetFromCache(0, keys[i]);"
" if (v !== values[i])"
" return 'Wrong value for ' + "
" keys[i] + ': ' + v + ' vs. ' + values[i];"
" };"
" return 'PASSED';"
"})()";
v8::internal::Heap::ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
THREADED_TEST(TestEviction) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope;
LocalContext context;
const char* code =
"(function() {"
" for (var i = 0; i < 2*16; i++) {"
" %_GetFromCache(0, 'a' + i);"
" };"
" return 'PASSED';"
"})()";
v8::internal::Heap::ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}

44
deps/v8/test/cctest/test-debug.cc

@ -2511,6 +2511,50 @@ TEST(DebugStepKeyedStoreLoop) {
}
// Test of the stepping mechanism for named load in a loop.
TEST(DebugStepNamedLoadLoop) {
v8::HandleScope scope;
DebugLocalContext env;
// Create a function for testing stepping of named load.
v8::Local<v8::Function> foo = CompileFunction(
&env,
"function foo() {\n"
" var a = [];\n"
" var s = \"\";\n"
" for (var i = 0; i < 10; i++) {\n"
" var v = new V(i, i + 1);\n"
" v.y;\n"
" a.length;\n" // Special case: array length.
" s.length;\n" // Special case: string length.
" }\n"
"}\n"
"function V(x, y) {\n"
" this.x = x;\n"
" this.y = y;\n"
"}\n",
"foo");
// Call function without any break points to ensure inlining is in place.
foo->Call(env->Global(), 0, NULL);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener(DebugEventStep);
// Setup break point and step through the function.
SetBreakPoint(foo, 4);
step_action = StepNext;
break_point_hit_count = 0;
foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit.
CHECK_EQ(41, break_point_hit_count);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
// Test the stepping mechanism with different ICs.
TEST(DebugStepLinearMixedICs) {
v8::HandleScope scope;

37
deps/v8/test/cctest/test-fast-dtoa.cc

@ -18,70 +18,60 @@ static const int kBufferSize = 100;
TEST(FastDtoaVariousDoubles) {
char buffer_container[kBufferSize];
Vector<char> buffer(buffer_container, kBufferSize);
int sign;
int length;
int point;
int status;
double min_double = 5e-324;
status = FastDtoa(min_double, buffer, &sign, &length, &point);
status = FastDtoa(min_double, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5", buffer.start());
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, buffer, &sign, &length, &point);
status = FastDtoa(max_double, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("17976931348623157", buffer.start());
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, buffer, &sign, &length, &point);
status = FastDtoa(4294967272.0, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4294967272", buffer.start());
CHECK_EQ(10, point);
status = FastDtoa(4.1855804968213567e298, buffer, &sign, &length, &point);
status = FastDtoa(4.1855804968213567e298, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4185580496821357", buffer.start());
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, buffer, &sign, &length, &point);
status = FastDtoa(5.5626846462680035e-309, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5562684646268003", buffer.start());
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, buffer, &sign, &length, &point);
status = FastDtoa(2147483648.0, buffer, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("2147483648", buffer.start());
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, buffer, &sign, &length, &point);
status = FastDtoa(3.5844466002796428e+298, buffer, &length, &point);
if (status) { // Not all FastDtoa variants manage to compute this number.
CHECK_EQ("35844466002796428", buffer.start());
CHECK_EQ(0, sign);
CHECK_EQ(299, point);
}
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = FastDtoa(v, buffer, &sign, &length, &point);
status = FastDtoa(v, buffer, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("22250738585072014", buffer.start());
CHECK_EQ(-307, point);
}
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
status = FastDtoa(v, buffer, &sign, &length, &point);
status = FastDtoa(v, buffer, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("2225073858507201", buffer.start());
CHECK_EQ(-307, point);
}
@ -92,24 +82,23 @@ TEST(FastDtoaGayShortest) {
char buffer_container[kBufferSize];
Vector<char> buffer(buffer_container, kBufferSize);
bool status;
int sign;
int length;
int point;
int succeeded = 0;
int total = 0;
bool needed_max_length = false;
Vector<const GayShortest> precomputed = PrecomputedShortestRepresentations();
Vector<const PrecomputedShortest> precomputed =
PrecomputedShortestRepresentations();
for (int i = 0; i < precomputed.length(); ++i) {
const GayShortest current_test = precomputed[i];
const PrecomputedShortest current_test = precomputed[i];
total++;
double v = current_test.v;
status = FastDtoa(v, buffer, &sign, &length, &point);
status = FastDtoa(v, buffer, &length, &point);
CHECK_GE(kFastDtoaMaximalLength, length);
if (!status) continue;
if (length == kFastDtoaMaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_EQ(current_test.representation, buffer.start());
}

512
deps/v8/test/cctest/test-fixed-dtoa.cc

@ -0,0 +1,512 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "double.h"
#include "fixed-dtoa.h"
#include "gay-fixed.h"
using namespace v8::internal;
static const int kBufferSize = 500;
TEST(FastFixedVariousDoubles) {
char buffer_container[kBufferSize];
Vector<char> buffer(buffer_container, kBufferSize);
int length;
int point;
CHECK(FastFixedDtoa(1.0, 1, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 0, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0xFFFFFFFF, 5, buffer, &length, &point));
CHECK_EQ("4294967295", buffer.start());
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(4294967296.0, 5, buffer, &length, &point));
CHECK_EQ("4294967296", buffer.start());
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(1e21, 5, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
// CHECK_EQ(22, point);
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(999999999999999868928.00, 2, buffer, &length, &point));
CHECK_EQ("999999999999999868928", buffer.start());
CHECK_EQ(21, point);
CHECK(FastFixedDtoa(6.9999999999999989514240000e+21, 5, buffer,
&length, &point));
CHECK_EQ("6999999999999998951424", buffer.start());
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(1.5, 5, buffer, &length, &point));
CHECK_EQ("15", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 5, buffer, &length, &point));
CHECK_EQ("155", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 1, buffer, &length, &point));
CHECK_EQ("16", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.00000001, 15, buffer, &length, &point));
CHECK_EQ("100000001", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.1, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.0000001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.000000001, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.00000000001, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.000000000001, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.00000000000001, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.0000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.00000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.0000000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.00000000000000000001, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001004, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000104, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000004, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100004, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010004, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001004, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000104, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000004, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100004, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010004, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001004, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000104, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000014, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000006, 10, buffer, &length, &point));
CHECK_EQ("1000000001", buffer.start());
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000006, 10, buffer, &length, &point));
CHECK_EQ("100000001", buffer.start());
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000006, 10, buffer, &length, &point));
CHECK_EQ("10000001", buffer.start());
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000006, 10, buffer, &length, &point));
CHECK_EQ("1000001", buffer.start());
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000006, 10, buffer, &length, &point));
CHECK_EQ("100001", buffer.start());
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100006, 10, buffer, &length, &point));
CHECK_EQ("10001", buffer.start());
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010006, 10, buffer, &length, &point));
CHECK_EQ("1001", buffer.start());
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001006, 10, buffer, &length, &point));
CHECK_EQ("101", buffer.start());
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000106, 10, buffer, &length, &point));
CHECK_EQ("11", buffer.start());
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000006, 15, buffer, &length, &point));
CHECK_EQ("100001", buffer.start());
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100006, 15, buffer, &length, &point));
CHECK_EQ("10001", buffer.start());
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010006, 15, buffer, &length, &point));
CHECK_EQ("1001", buffer.start());
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001006, 15, buffer, &length, &point));
CHECK_EQ("101", buffer.start());
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000106, 15, buffer, &length, &point));
CHECK_EQ("11", buffer.start());
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000006, 20, buffer, &length, &point));
CHECK_EQ("100001", buffer.start());
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100006, 20, buffer, &length, &point));
CHECK_EQ("10001", buffer.start());
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010006, 20, buffer, &length, &point));
CHECK_EQ("1001", buffer.start());
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001006, 20, buffer, &length, &point));
CHECK_EQ("101", buffer.start());
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000106, 20, buffer, &length, &point));
CHECK_EQ("11", buffer.start());
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000016, 20, buffer, &length, &point));
CHECK_EQ("2", buffer.start());
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.6, 0, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.96, 1, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.996, 2, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9996, 3, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99996, 4, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999996, 5, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999996, 6, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999996, 7, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999996, 8, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999996, 9, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999996, 10, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999996, 11, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999996, 12, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999999996, 13, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999999996, 14, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999999996, 15, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.00999999999999996, 16, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.000999999999999996, 17, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0000999999999999996, 18, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00000999999999999996, 19, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000000999999999999996, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(323423.234234, 10, buffer, &length, &point));
CHECK_EQ("323423234234", buffer.start());
CHECK_EQ(6, point);
CHECK(FastFixedDtoa(12345678.901234, 4, buffer, &length, &point));
CHECK_EQ("123456789012", buffer.start());
CHECK_EQ(8, point);
CHECK(FastFixedDtoa(98765.432109, 5, buffer, &length, &point));
CHECK_EQ("9876543211", buffer.start());
CHECK_EQ(5, point);
CHECK(FastFixedDtoa(42, 20, buffer, &length, &point));
CHECK_EQ("42", buffer.start());
CHECK_EQ(2, point);
CHECK(FastFixedDtoa(0.5, 0, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1e-23, 10, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(1e-123, 2, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(1e-123, 0, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(1e-23, 20, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-21, 20, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-22, 20, buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(6e-21, 20, buffer, &length, &point));
CHECK_EQ("1", buffer.start());
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(9.1193616301674545152000000e+19, 0,
buffer, &length, &point));
CHECK_EQ("91193616301674545152", buffer.start());
CHECK_EQ(20, point);
CHECK(FastFixedDtoa(4.8184662102767651659096515e-04, 19,
buffer, &length, &point));
CHECK_EQ("4818466210276765", buffer.start());
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(1.9023164229540652612705182e-23, 8,
buffer, &length, &point));
CHECK_EQ("", buffer.start());
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(1000000000000000128.0, 0,
buffer, &length, &point));
CHECK_EQ("1000000000000000128", buffer.start());
CHECK_EQ(19, point);
}
TEST(FastFixedDtoaGayFixed) {
char buffer_container[kBufferSize];
Vector<char> buffer(buffer_container, kBufferSize);
bool status;
int length;
int point;
Vector<const PrecomputedFixed> precomputed =
PrecomputedFixedRepresentations();
for (int i = 0; i < precomputed.length(); ++i) {
const PrecomputedFixed current_test = precomputed[i];
double v = current_test.v;
int number_digits = current_test.number_digits;
status = FastFixedDtoa(v, number_digits,
buffer, &length, &point);
CHECK(status);
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
CHECK_EQ(current_test.representation, buffer.start());
}
}

60
deps/v8/test/cctest/test-macro-assembler-x64.cc

@ -1737,10 +1737,10 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
// rax == id + i * 10.
int shift = shifts[i];
int result = x << shift;
if (Smi::IsValid(result)) {
CHECK(Smi::IsValid(result));
__ Move(r8, Smi::FromInt(result));
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(r9, rcx, shift, exit);
__ SmiShiftLeftConstant(r9, rcx, shift);
__ incq(rax);
__ SmiCompare(r9, r8);
@ -1748,7 +1748,7 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(rcx, rcx, shift, exit);
__ SmiShiftLeftConstant(rcx, rcx, shift);
__ incq(rax);
__ SmiCompare(rcx, r8);
@ -1757,7 +1757,7 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(rcx, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, rcx, exit);
__ SmiShiftLeft(r9, rdx, rcx);
__ incq(rax);
__ SmiCompare(r9, r8);
@ -1766,7 +1766,7 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, r11, exit);
__ SmiShiftLeft(r9, rdx, r11);
__ incq(rax);
__ SmiCompare(r9, r8);
@ -1775,61 +1775,13 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(rdx, rdx, r11, exit);
__ SmiShiftLeft(rdx, rdx, r11);
__ incq(rax);
__ SmiCompare(rdx, r8);
__ j(not_equal, exit);
__ incq(rax);
} else {
// Cannot happen with long smis.
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ movq(r11, rcx);
__ SmiShiftLeftConstant(r9, rcx, shift, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
Label fail_ok2;
__ SmiShiftLeftConstant(rcx, rcx, shift, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ Move(r8, Smi::FromInt(shift));
Label fail_ok3;
__ SmiShiftLeft(r9, rcx, r8, &fail_ok3);
__ jmp(exit);
__ bind(&fail_ok3);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ Move(r8, Smi::FromInt(shift));
__ movq(rdx, r11);
Label fail_ok4;
__ SmiShiftLeft(rdx, rdx, r8, &fail_ok4);
__ jmp(exit);
__ bind(&fail_ok4);
__ incq(rax);
__ SmiCompare(rdx, r11);
__ j(not_equal, exit);
__ addq(rax, Immediate(3));
}
}
}

329
deps/v8/test/mjsunit/instanceof-2.js

@ -0,0 +1,329 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var except = "exception";
var correct_answer_index = 0;
var correct_answers = [
false, false, true, true, false, false, true, true,
true, false, false, true, true, false, false, true,
false, true, true, false, false, true, true, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, false, true,
except, except, true, false, except, except, true, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, true, false, except, except,
false, true, except, except, false, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, false, true, true, false,
true, true, false, false, false, true, true, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, true, false,
except, except, false, false, except, except, true, false,
false, false, except, except, false, false, except, except,
true, false, except, except, true, false, except, except,
false, true, except, except, false, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, true, true, false,
true, false, false, true, true, true, false, false,
false, true, true, false, false, true, true, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, false, true,
except, except, true, false, except, except, true, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, true, true, false,
true, false, false, true, false, true, true, false,
false, true, true, false, false, true, true, false,
true, true, false, false, false, true, true, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, true, false,
except, except, false, false, except, except, true, false,
false, false, except, except, false, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, false, false, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, false, false, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, true, true, false, false,
true, false, false, true, true, true, false, false,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, true, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, true, true, false, false,
true, false, false, true, true, true, false, false,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, true, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, true, true, false, false,
false, true, true, false, false, false, true, true,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, false, false,
except, except, true, false, except, except, true, true,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, false, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, false, false, true, true,
true, true, false, false, false, false, true, true,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, true, true,
except, except, false, false, except, except, true, true,
false, false, except, except, false, false, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, false, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, true, true, false, false,
false, true, true, false, false, false, true, true,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, false, false,
except, except, true, false, except, except, true, true,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, false, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, false, false, true, true,
true, true, false, false, false, false, true, true,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, true, true,
except, except, false, false, except, except, true, true,
false, false, except, except, false, false, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, false, false, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, false, false, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, false, false, true, true,
true, false, false, true, false, false, true, true,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, false, false, except, except,
true, false, except, except, false, false, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, true, true, false, false,
true, false, false, true, true, true, false, false,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, true, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
false, false, true, true, true, true, false, false,
true, false, false, true, true, true, false, false,
false, true, true, false, true, true, false, false,
true, true, false, false, true, true, false, false,
except, except, true, true, except, except, true, true,
except, except, false, true, except, except, true, true,
except, except, true, false, except, except, false, false,
except, except, false, false, except, except, false, false,
false, false, except, except, true, true, except, except,
true, false, except, except, true, true, except, except,
false, true, except, except, true, true, except, except,
true, true, except, except, true, true, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except,
except, except, except, except, except, except, except, except];
for (var i = 0; i < 256; i++) {
Test(i & 1, i & 2, i & 4, i & 8, i & 0x10, i & 0x20, i & 0x40, i & 0x80);
}
function InstanceTest(x, func) {
try {
var answer = (x instanceof func);
assertEquals(correct_answers[correct_answer_index], answer);
} catch (e) {
assertTrue(/prototype/.test(e));
assertEquals(correct_answers[correct_answer_index], except);
}
correct_answer_index++;
}
function Test(a, b, c, d, e, f, g, h) {
var Foo = function() { }
var Bar = function() { }
if (c) Foo.prototype = 12;
if (d) Bar.prototype = 13;
var x = a ? new Foo() : new Bar();
var y = b ? new Foo() : new Bar();
InstanceTest(x, Foo);
InstanceTest(y, Foo);
InstanceTest(x, Bar);
InstanceTest(y, Bar);
if (e) x.__proto__ = Bar.prototype;
if (f) y.__proto__ = Foo.prototype;
if (g) {
x.__proto__ = y;
} else {
if (h) y.__proto__ = x
}
InstanceTest(x, Foo);
InstanceTest(y, Foo);
InstanceTest(x, Bar);
InstanceTest(y, Bar);
}

50
deps/v8/test/mjsunit/property-load-across-eval.js

@ -28,17 +28,55 @@
// Tests loading of properties across eval calls.
var x = 1;
function global_function() { return 'global'; }
const const_uninitialized;
const const_initialized = function() { return "const_global"; }
// Test loading across an eval call that does not shadow variables.
function testNoShadowing() {
var y = 2;
function local_function() { return 'local'; }
const local_const_uninitialized;
const local_const_initialized = function() { return "const_local"; }
function f() {
eval('1');
assertEquals(1, x);
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
try {
const_uninitialized();
assertUnreachable();
} catch(e) {
// Ignore.
}
assertEquals('const_global', const_initialized());
try {
local_const_uninitialized();
assertUnreachable();
} catch(e) {
// Ignore.
}
assertEquals('const_local', local_const_initialized());
function g() {
assertEquals(1, x);
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
try {
const_uninitialized();
assertUnreachable();
} catch(e) {
// Ignore.
}
assertEquals('const_global', const_initialized());
try {
local_const_uninitialized();
assertUnreachable();
} catch(e) {
// Ignore.
}
assertEquals('const_local', local_const_initialized());
}
g();
}
@ -50,14 +88,19 @@ testNoShadowing();
// Test loading across eval calls that do not shadow variables.
function testNoShadowing2() {
var y = 2;
function local_function() { return 'local'; }
eval('1');
function f() {
eval('1');
assertEquals(1, x);
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
function g() {
assertEquals(1, x);
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
}
g();
}
@ -69,13 +112,20 @@ testNoShadowing2();
// Test loading across an eval call that shadows variables.
function testShadowing() {
var y = 2;
function local_function() { return 'local'; }
function f() {
eval('var x = 3; var y = 4;');
assertEquals(3, x);
assertEquals(4, y);
eval('function local_function() { return "new_local"; }');
eval('function global_function() { return "new_nonglobal"; }');
assertEquals('new_nonglobal', global_function());
assertEquals('new_local', local_function());
function g() {
assertEquals(3, x);
assertEquals(4, y);
assertEquals('new_nonglobal', global_function());
assertEquals('new_local', local_function());
}
g();
}

36
deps/v8/test/mjsunit/regress/regress-696.js

@ -0,0 +1,36 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// See: http://code.google.com/p/v8/issues/detail?id=696
// Because of the change in dateparser in revision 4557 to support time
// only strings in Date.parse we also misleadingly supported strings with non
// leading numbers.
assertTrue(isNaN(Date.parse('x')));
assertTrue(isNaN(Date.parse('1x')));
assertTrue(isNaN(Date.parse('xT10:00:00')));
assertTrue(isNaN(Date.parse('This is a relatively long string')));

34
deps/v8/test/mjsunit/regress/regress-697.js

@ -0,0 +1,34 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// See: http://code.google.com/p/v8/issues/detail?id=697
try {
Object.create(function(){});
} catch (e) {
assertTrue(false);
}

7
deps/v8/test/mjsunit/smi-ops.js

@ -678,3 +678,10 @@ function LogicalShiftRightByMultipleOf32(x) {
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));
// Verify that the shift amount is reduced modulo 32, not modulo 64.
function LeftShiftThreeBy(x) {return 3 << x;}
assertEquals(24, LeftShiftThreeBy(3));
assertEquals(24, LeftShiftThreeBy(35));
assertEquals(24, LeftShiftThreeBy(67));
assertEquals(24, LeftShiftThreeBy(-29));

4
deps/v8/tools/gyp/v8.gyp

@ -276,6 +276,8 @@
'../../src/disasm.h',
'../../src/disassembler.cc',
'../../src/disassembler.h',
'../../src/dtoa.cc',
'../../src/dtoa.h',
'../../src/dtoa-config.c',
'../../src/diy-fp.cc',
'../../src/diy-fp.h',
@ -288,6 +290,8 @@
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
'../../src/flag-definitions.h',
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
'../../src/flags.cc',
'../../src/flags.h',
'../../src/flow-graph.cc',

Loading…
Cancel
Save