Browse Source

Upgrade V8 to 2.2.12

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
d4345e1ff8
  1. 8
      deps/v8/ChangeLog
  2. 1
      deps/v8/SConstruct
  3. 114
      deps/v8/include/v8.h
  4. 67
      deps/v8/src/api.cc
  5. 26
      deps/v8/src/arm/assembler-arm-inl.h
  6. 43
      deps/v8/src/arm/assembler-arm.cc
  7. 8
      deps/v8/src/arm/assembler-arm.h
  8. 149
      deps/v8/src/arm/codegen-arm.cc
  9. 1464
      deps/v8/src/arm/full-codegen-arm.cc
  10. 45
      deps/v8/src/arm/macro-assembler-arm.cc
  11. 12
      deps/v8/src/arm/macro-assembler-arm.h
  12. 72
      deps/v8/src/arm/stub-cache-arm.cc
  13. 21
      deps/v8/src/assembler.h
  14. 48
      deps/v8/src/circular-queue-inl.h
  15. 26
      deps/v8/src/circular-queue.h
  16. 34
      deps/v8/src/compiler.cc
  17. 1
      deps/v8/src/cpu-profiler-inl.h
  18. 1
      deps/v8/src/cpu-profiler.cc
  19. 3
      deps/v8/src/cpu-profiler.h
  20. 4
      deps/v8/src/flag-definitions.h
  21. 5
      deps/v8/src/handles.cc
  22. 2
      deps/v8/src/handles.h
  23. 26
      deps/v8/src/ia32/assembler-ia32-inl.h
  24. 87
      deps/v8/src/ia32/codegen-ia32.cc
  25. 4
      deps/v8/src/ia32/codegen-ia32.h
  26. 4
      deps/v8/src/ia32/full-codegen-ia32.cc
  27. 81
      deps/v8/src/ia32/stub-cache-ia32.cc
  28. 2
      deps/v8/src/jump-target-light.h
  29. 408
      deps/v8/src/objects.cc
  30. 19
      deps/v8/src/objects.h
  31. 6
      deps/v8/src/parser.cc
  32. 2
      deps/v8/src/parser.h
  33. 2
      deps/v8/src/platform-freebsd.cc
  34. 22
      deps/v8/src/platform-linux.cc
  35. 7
      deps/v8/src/platform-macos.cc
  36. 6
      deps/v8/src/platform-win32.cc
  37. 2
      deps/v8/src/platform.h
  38. 13
      deps/v8/src/runtime.js
  39. 4
      deps/v8/src/serialize.cc
  40. 9
      deps/v8/src/top.cc
  41. 87
      deps/v8/src/unbound-queue-inl.h
  42. 66
      deps/v8/src/unbound-queue.h
  43. 32
      deps/v8/src/v8natives.js
  44. 2
      deps/v8/src/version.cc
  45. 31
      deps/v8/src/x64/assembler-x64-inl.h
  46. 46
      deps/v8/src/x64/assembler-x64.cc
  47. 6
      deps/v8/src/x64/assembler-x64.h
  48. 238
      deps/v8/src/x64/codegen-x64.cc
  49. 5
      deps/v8/src/x64/codegen-x64.h
  50. 2
      deps/v8/src/x64/full-codegen-x64.cc
  51. 21
      deps/v8/src/x64/macro-assembler-x64.cc
  52. 81
      deps/v8/src/x64/stub-cache-x64.cc
  53. 1
      deps/v8/test/cctest/SConscript
  54. 217
      deps/v8/test/cctest/test-api.cc
  55. 46
      deps/v8/test/cctest/test-circular-queue.cc
  56. 8
      deps/v8/test/cctest/test-log-stack-tracer.cc
  57. 357
      deps/v8/test/cctest/test-macro-assembler-x64.cc
  58. 45
      deps/v8/test/cctest/test-thread-termination.cc
  59. 54
      deps/v8/test/cctest/test-unbound-queue.cc
  60. 251
      deps/v8/test/mjsunit/object-define-property.js
  61. 38
      deps/v8/test/mjsunit/regress/regress-712.js
  62. 36
      deps/v8/test/mjsunit/regress/regress-720.js
  63. 102
      deps/v8/test/mjsunit/samevalue.js
  64. 2
      deps/v8/tools/gyp/v8.gyp
  65. 4
      deps/v8/tools/v8.xcodeproj/project.pbxproj
  66. 32
      deps/v8/tools/visual_studio/v8_base.vcproj
  67. 8
      deps/v8/tools/visual_studio/v8_base_arm.vcproj
  68. 8
      deps/v8/tools/visual_studio/v8_base_x64.vcproj
  69. 4
      deps/v8/tools/visual_studio/v8_cctest.vcproj

8
deps/v8/ChangeLog

@ -1,3 +1,11 @@
2010-05-26: Version 2.2.12
Allowed accessors to be defined on objects rather than just object
templates.
Changed the ScriptData API.
2010-05-21: Version 2.2.11
Fix crash bug in liveedit on 64 bit.

1
deps/v8/SConstruct

@ -294,6 +294,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']

114
deps/v8/include/v8.h

@ -126,6 +126,7 @@ template <class T> class Persistent;
class FunctionTemplate;
class ObjectTemplate;
class Data;
class AccessorInfo;
class StackTrace;
class StackFrame;
@ -512,11 +513,37 @@ class V8EXPORT Data {
class V8EXPORT ScriptData { // NOLINT
public:
virtual ~ScriptData() { }
/**
* Pre-compiles the specified script (context-independent).
*
* \param input Pointer to UTF-8 script source code.
* \param length Length of UTF-8 script source code.
*/
static ScriptData* PreCompile(const char* input, int length);
static ScriptData* New(unsigned* data, int length);
/**
* Load previous pre-compilation data.
*
* \param data Pointer to data returned by a call to Data() of a previous
* ScriptData. Ownership is not transferred.
* \param length Length of data.
*/
static ScriptData* New(const char* data, int length);
/**
* Returns the length of Data().
*/
virtual int Length() = 0;
virtual unsigned* Data() = 0;
/**
* Returns a serialized representation of this ScriptData that can later be
* passed to New(). NOTE: Serialized data is platform-dependent.
*/
virtual const char* Data() = 0;
/**
* Returns true if the source code could not be parsed.
*/
virtual bool HasError() = 0;
};
@ -1305,6 +1332,41 @@ enum ExternalArrayType {
kExternalFloatArray
};
/**
* Accessor[Getter|Setter] are used as callback functions when
* setting|getting a particular property. See Object and ObjectTemplate's
* method SetAccessor.
*/
typedef Handle<Value> (*AccessorGetter)(Local<String> property,
const AccessorInfo& info);
typedef void (*AccessorSetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
/**
* Access control specifications.
*
* Some accessors should be accessible across contexts. These
* accessors have an explicit access control parameter which specifies
* the kind of cross-context access that should be allowed.
*
* Additionally, for security, accessors can prohibit overwriting by
* accessors defined in JavaScript. For objects that have such
* accessors either locally or in their prototype chain it is not
* possible to overwrite the accessor by using __defineGetter__ or
* __defineSetter__ from JavaScript code.
*/
enum AccessControl {
DEFAULT = 0,
ALL_CAN_READ = 1,
ALL_CAN_WRITE = 1 << 1,
PROHIBITS_OVERWRITING = 1 << 2
};
/**
* A JavaScript object (ECMA-262, 4.3.3)
*/
@ -1347,6 +1409,13 @@ class V8EXPORT Object : public Value {
bool Delete(uint32_t index);
bool SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
/**
* Returns an array containing the names of the enumerable properties
* of this object, including properties from prototype objects. The
@ -1641,19 +1710,6 @@ typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
/**
* Accessor[Getter|Setter] are used as callback functions when
* setting|getting a particular property. See objectTemplate::SetAccessor.
*/
typedef Handle<Value> (*AccessorGetter)(Local<String> property,
const AccessorInfo& info);
typedef void (*AccessorSetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
/**
* NamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler.
@ -1733,27 +1789,6 @@ typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
/**
* Access control specifications.
*
* Some accessors should be accessible across contexts. These
* accessors have an explicit access control parameter which specifies
* the kind of cross-context access that should be allowed.
*
* Additionally, for security, accessors can prohibit overwriting by
* accessors defined in JavaScript. For objects that have such
* accessors either locally or in their prototype chain it is not
* possible to overwrite the accessor by using __defineGetter__ or
* __defineSetter__ from JavaScript code.
*/
enum AccessControl {
DEFAULT = 0,
ALL_CAN_READ = 1,
ALL_CAN_WRITE = 1 << 1,
PROHIBITS_OVERWRITING = 1 << 2
};
/**
* Access type specification.
*/
@ -2866,7 +2901,12 @@ class V8EXPORT Context {
*/
void ReattachGlobal(Handle<Object> global_object);
/** Creates a new context. */
/** Creates a new context.
*
* Returns a persistent handle to the newly allocated context. This
* persistent handle has to be disposed when the context is no
* longer used so the context can be garbage collected.
*/
static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),

67
deps/v8/src/api.cc

@ -58,11 +58,10 @@
namespace v8 {
#define ON_BAILOUT(location, code) \
if (IsDeadCheck(location)) { \
code; \
UNREACHABLE(); \
#define ON_BAILOUT(location, code) \
if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
code; \
UNREACHABLE(); \
}
@ -776,18 +775,13 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
}
void FunctionTemplate::AddInstancePropertyAccessor(
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
AccessorGetter getter,
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
return;
}
ENTER_V8;
HandleScope scope;
i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
ASSERT(getter != NULL);
obj->set_getter(*FromCData(getter));
@ -799,7 +793,26 @@ void FunctionTemplate::AddInstancePropertyAccessor(
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
return obj;
}
void FunctionTemplate::AddInstancePropertyAccessor(
v8::Handle<String> name,
AccessorGetter getter,
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
return;
}
ENTER_V8;
HandleScope scope;
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
if (list->IsUndefined()) {
list = NeanderArray().value();
@ -1106,8 +1119,19 @@ ScriptData* ScriptData::PreCompile(const char* input, int length) {
}
ScriptData* ScriptData::New(unsigned* data, int length) {
return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
ScriptData* ScriptData::New(const char* data, int length) {
// Return an empty ScriptData if the length is obviously invalid.
if (length % sizeof(unsigned) != 0) {
return new i::ScriptDataImpl(i::Vector<unsigned>());
}
// Copy the data to ensure it is properly aligned.
int deserialized_data_length = length / sizeof(unsigned);
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
memcpy(deserialized_data, data, length);
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
}
@ -2354,6 +2378,23 @@ bool v8::Object::Has(uint32_t index) {
}
bool Object::SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
ON_BAILOUT("v8::Object::SetAccessor()", return false);
ENTER_V8;
HandleScope scope;
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
return !result.is_null() && !result->IsUndefined();
}
bool v8::Object::HasRealNamedProperty(Handle<String> key) {
ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
return Utils::OpenHandle(this)->HasRealNamedProperty(

26
deps/v8/src/arm/assembler-arm-inl.h

@ -39,6 +39,7 @@
#include "arm/assembler-arm.h"
#include "cpu.h"
#include "debug.h"
namespace v8 {
@ -73,6 +74,11 @@ Address RelocInfo::target_address_address() {
}
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
@ -162,6 +168,26 @@ bool RelocInfo::IsPatchedReturnSequence() {
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
visitor->VisitRuntimeEntry(this);
}
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;

43
deps/v8/src/arm/assembler-arm.cc

@ -1363,40 +1363,25 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(CpuFeatures::IsEnabled(ARMv7));
ASSERT(src.rm().is(no_reg));
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B4, dst, src);
#else
// Generate two ldr instructions if ldrd is not available.
MemOperand src1(src);
src1.set_offset(src1.offset() + 4);
Register dst1(dst);
dst1.set_code(dst1.code() + 1);
if (dst.is(src.rn())) {
ldr(dst1, src1, cond);
ldr(dst, src, cond);
} else {
ldr(dst, src, cond);
ldr(dst1, src1, cond);
}
#endif
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
addrmod3(cond | B7 | B6 | B4, dst1, src);
}
void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
void Assembler::strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
ASSERT(dst.rm().is(no_reg));
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
#else
// Generate two str instructions if strd is not available.
MemOperand dst1(dst);
dst1.set_offset(dst1.offset() + 4);
Register src1(src);
src1.set_code(src1.code() + 1);
str(src, dst, cond);
str(src1, dst1, cond);
#endif
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
ASSERT(CpuFeatures::IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
// Load/Store multiple instructions.

8
deps/v8/src/arm/assembler-arm.h

@ -773,8 +773,12 @@ class Assembler : public Malloced {
void strh(Register src, const MemOperand& dst, Condition cond = al);
void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
void ldrd(Register dst, const MemOperand& src, Condition cond = al);
void strd(Register src, const MemOperand& dst, Condition cond = al);
void ldrd(Register dst1,
Register dst2,
const MemOperand& src, Condition cond = al);
void strd(Register src1,
Register src2,
const MemOperand& dst, Condition cond = al);
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);

149
deps/v8/src/arm/codegen-arm.cc

@ -1514,7 +1514,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Then process it as a normal function call.
__ ldr(r0, MemOperand(sp, 3 * kPointerSize));
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ strd(r0, MemOperand(sp, 2 * kPointerSize));
__ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
frame_->CallStub(&call_function, 3);
@ -2307,12 +2307,10 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
node->continue_target()->SetExpectedHeight();
// Load the current count to r0, load the length to r1.
__ ldrd(r0, frame_->ElementAt(0));
__ Ldrd(r0, r1, frame_->ElementAt(0));
__ cmp(r0, r1); // compare to the array length
node->break_target()->Branch(hs);
__ ldr(r0, frame_->ElementAt(0));
// Get the i'th entry of the array.
__ ldr(r2, frame_->ElementAt(2));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -2730,7 +2728,6 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function info and instantiate it.
@ -2751,7 +2748,6 @@ void CodeGenerator::VisitSharedFunctionInfoLiteral(
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
InstantiateFunction(node->shared_function_info());
ASSERT_EQ(original_height + 1, frame_->height());
@ -4045,37 +4041,35 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
__ tst(r0, Operand(kSmiTagMask));
Load(args->at(0));
Register reg = frame_->PopToRegister();
__ tst(reg, Operand(kSmiTagMask));
cc_reg_ = eq;
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
// See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (ShouldGenerateLog(args->at(0))) {
LoadAndSpill(args->at(1));
LoadAndSpill(args->at(2));
Load(args->at(1));
Load(args->at(2));
frame_->SpillAll();
VirtualFrame::SpilledScope spilled_scope(frame_);
__ CallRuntime(Runtime::kLog, 2);
}
#endif
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
__ tst(r0, Operand(kSmiTagMask | 0x80000000u));
Load(args->at(0));
Register reg = frame_->PopToRegister();
__ tst(reg, Operand(kSmiTagMask | 0x80000000u));
cc_reg_ = eq;
}
@ -4106,22 +4100,23 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
// flatten the string, which will ensure that the answer is in the left hand
// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
Comment(masm_, "[ GenerateFastCharCodeAt");
LoadAndSpill(args->at(0));
LoadAndSpill(args->at(1));
frame_->EmitPop(r1); // Index.
frame_->EmitPop(r2); // String.
Load(args->at(0));
Load(args->at(1));
Register index = frame_->PopToRegister(); // Index.
Register string = frame_->PopToRegister(index); // String.
Register result = VirtualFrame::scratch0();
Register scratch = VirtualFrame::scratch1();
Label slow_case;
Label exit;
StringHelper::GenerateFastCharCodeAt(masm_,
r2,
r1,
r3,
r0,
string,
index,
scratch,
result,
&slow_case,
&slow_case,
&slow_case,
@ -4131,10 +4126,10 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&slow_case);
// Move the undefined value into the result register, which will
// trigger the slow case.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&exit);
frame_->EmitPush(r0);
frame_->EmitPush(result);
}
@ -4214,9 +4209,8 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
__ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
__ tst(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(ne);
__ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
@ -4256,48 +4250,52 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 0);
Register scratch0 = VirtualFrame::scratch0();
Register scratch1 = VirtualFrame::scratch1();
// Get the frame pointer for the calling frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch1,
MemOperand(scratch0, StandardFrameConstants::kContextOffset));
__ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ ldr(scratch0,
MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
__ ldr(scratch1,
MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
__ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
cc_reg_ = eq;
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 0);
Label exit;
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
Register tos = frame_->GetTOSRegister();
Register scratch0 = VirtualFrame::scratch0();
Register scratch1 = VirtualFrame::scratch1();
// Check if the calling frame is an arguments adaptor frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &exit);
__ ldr(scratch0,
MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch1,
MemOperand(scratch0, StandardFrameConstants::kContextOffset));
__ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Get the number of formal parameters.
__ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ ldr(tos,
MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
eq);
__ bind(&exit);
frame_->EmitPush(r0);
frame_->EmitPush(tos);
}
@ -4735,15 +4733,14 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
LoadAndSpill(args->at(0));
LoadAndSpill(args->at(1));
frame_->EmitPop(r0);
frame_->EmitPop(r1);
__ cmp(r0, r1);
Load(args->at(0));
Load(args->at(1));
Register lhs = frame_->PopToRegister();
Register rhs = frame_->PopToRegister(lhs);
__ cmp(lhs, rhs);
cc_reg_ = eq;
}
@ -5042,6 +5039,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not.
VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
@ -5053,8 +5051,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
JumpTarget pop_and_continue;
JumpTarget exit;
__ ldr(r0, frame_->Top()); // Duplicate the stack top.
frame_->EmitPush(r0);
frame_->Dup();
// Avoid popping the result if it converts to 'false' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
@ -5063,7 +5060,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Pop the result of evaluating the first part.
pop_and_continue.Bind();
frame_->EmitPop(r0);
frame_->Pop();
// Evaluate right side expression.
is_true.Bind();
@ -5100,8 +5097,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
JumpTarget pop_and_continue;
JumpTarget exit;
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
frame_->Dup();
// Avoid popping the result if it converts to 'true' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
@ -5110,7 +5106,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Pop the result of evaluating the first part.
pop_and_continue.Bind();
frame_->EmitPop(r0);
frame_->Pop();
// Evaluate right side expression.
is_false.Bind();
@ -5145,7 +5141,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
if (node->op() == Token::AND || node->op() == Token::OR) {
VirtualFrame::SpilledScope spilled_scope(frame_);
GenerateLogicalBooleanOperation(node);
} else {
// Optimize for the case where (at least) one of the expressions
@ -5198,9 +5193,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
frame_->EmitPush(MemOperand(frame_->Function()));
ASSERT_EQ(original_height + 1, frame_->height());
}
@ -6386,7 +6379,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Load rhs to a double in r0, r1.
__ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ pop(lr);
}
@ -6421,7 +6414,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
__ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
@ -6585,8 +6578,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
__ sub(r7, r1, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
__ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(both_loaded_as_doubles);
}
@ -6963,7 +6956,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that second double is in r2 and r3.
__ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
@ -7015,7 +7008,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
// Calling convention says that first double is in r0 and r1.
__ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
}
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
@ -7086,7 +7079,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
#endif
__ mov(r0, Operand(r5));
// And we are done.

1464
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

45
deps/v8/src/arm/macro-assembler-arm.cc

@ -354,6 +354,51 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
}
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
MemOperand src2(src);
src2.set_offset(src2.offset() + 4);
if (dst1.is(src.rn())) {
ldr(dst2, src2, cond);
ldr(dst1, src, cond);
} else {
ldr(dst1, src, cond);
ldr(dst2, src2, cond);
}
}
}
void MacroAssembler::Strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
ASSERT(dst.rm().is(no_reg));
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
dst2.set_offset(dst2.offset() + 4);
str(src1, dst, cond);
str(src2, dst2, cond);
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());

12
deps/v8/src/arm/macro-assembler-arm.h

@ -185,6 +185,18 @@ class MacroAssembler: public Assembler {
}
}
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
Register dst2,
const MemOperand& src,
Condition cond = al);
// Store two consecutive registers to two consecutive memory locations.
void Strd(Register src1,
Register src2,
const MemOperand& dst,
Condition cond = al);
// ---------------------------------------------------------------------------
// Stack limit support

72
deps/v8/src/arm/stub-cache-arm.cc

@ -436,7 +436,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register holder,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
@ -456,7 +456,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
@ -466,14 +467,18 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ push(receiver);
__ Push(holder, name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
// Compare with no_interceptor_result_sentinel.
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
@ -488,13 +493,17 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
holder = stub_compiler->CheckPrototypes(holder_obj,
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder,
holder,
lookup->holder(),
scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm,
r0,
holder,
@ -502,30 +511,38 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call: push receiver to stack.
Label cleanup;
__ push(receiver);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
// Continue tail call preparation: push remaining parameters.
__ push(holder);
__ Move(holder, Handle<AccessorInfo>(callback));
__ push(holder);
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
__ Push(scratch1, name_);
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver and need to remove it.
__ bind(&cleanup);
__ pop(scratch2);
}
@ -536,9 +553,9 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register holder,
Register scratch,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
@ -714,7 +731,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
@ -727,10 +744,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
depth1 =
optimization.GetPrototypeDepthOfExpectedType(object,
interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
lookup->holder());
depth2 =
optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
lookup->holder());
}
can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
(depth2 != kInvalidProtoDepth);
@ -745,23 +765,31 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ReserveSpaceForFastApiCall(masm, scratch1);
}
// Check that the maps from receiver to interceptor's holder
// haven't changed and thus we can invoke interceptor.
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
scratch2, name, depth1, miss);
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
Label regular_invoke;
LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
&regular_invoke);
// Generate code for the failed interceptor case.
// Interceptor returned nothing for this property. Try to use cached
// constant function.
// Check the lookup is still valid.
stub_compiler_->CheckPrototypes(holder_obj, receiver,
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
scratch2, name, depth2, miss);
// Invoke function.
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
@ -769,12 +797,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JUMP_FUNCTION);
}
// Deferred code for fast API call case---clean preallocated space.
if (can_do_fast_api_call) {
__ bind(&miss_cleanup);
FreeSpaceForFastApiCall(masm);
__ b(miss_label);
}
// Invoke a regular function.
__ bind(&regular_invoke);
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
@ -787,10 +817,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
String* name,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
miss_label);
@ -803,7 +833,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
__ CallExternalReference(
ExternalReference(

21
deps/v8/src/assembler.h

@ -38,6 +38,7 @@
#include "runtime.h"
#include "top.h"
#include "token.h"
#include "objects.h"
namespace v8 {
namespace internal {
@ -199,9 +200,23 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target));
// Read the address of the word containing the target_address. Can only
// be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY.
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
// an instruction like movw/movt where the target bits are mixed into the
// instruction bits the size of the target will be zero, indicating that the
// serializer should not step forwards in memory after a target is resolved
// and written. In this case the target_address_address function above
// should return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target.
INLINE(int target_address_size());
// Read/modify the reference in the instruction this relocation
// applies to; can only be called if rmode_ is external_reference
@ -216,6 +231,8 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** call_object_address());
INLINE(void set_call_object(Object* target));
inline void Visit(ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);

48
deps/v8/src/circular-queue-inl.h

@ -34,54 +34,6 @@ namespace v8 {
namespace internal {
template<typename Record>
CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
: buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
enqueue_semaphore_(
OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
enqueue_pos_(buffer_),
dequeue_pos_(buffer_) {
// To be able to distinguish between a full and an empty queue
// state, the queue must be capable of containing at least 2
// records.
ASSERT((buffer_end_ - buffer_) >= 2);
}
template<typename Record>
CircularQueue<Record>::~CircularQueue() {
DeleteArray(buffer_);
delete enqueue_semaphore_;
}
template<typename Record>
void CircularQueue<Record>::Dequeue(Record* rec) {
ASSERT(!IsEmpty());
*rec = *dequeue_pos_;
dequeue_pos_ = Next(dequeue_pos_);
// Tell we have a spare record.
enqueue_semaphore_->Signal();
}
template<typename Record>
void CircularQueue<Record>::Enqueue(const Record& rec) {
// Wait until we have at least one spare record.
enqueue_semaphore_->Wait();
ASSERT(Next(enqueue_pos_) != dequeue_pos_);
*enqueue_pos_ = rec;
enqueue_pos_ = Next(enqueue_pos_);
}
template<typename Record>
Record* CircularQueue<Record>::Next(Record* curr) {
return ++curr != buffer_end_ ? curr : buffer_;
}
void* SamplingCircularQueue::Enqueue() {
WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
void* result = producer_pos_->enqueue_pos;

26
deps/v8/src/circular-queue.h

@ -32,32 +32,6 @@ namespace v8 {
namespace internal {
// Lock-based blocking circular queue for small records. Intended for
// transfer of small records between a single producer and a single
// consumer. Blocks on enqueue operation if the queue is full.
template<typename Record>
class CircularQueue {
public:
inline explicit CircularQueue(int desired_buffer_size_in_bytes);
inline ~CircularQueue();
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
private:
INLINE(Record* Next(Record* curr));
Record* buffer_;
Record* const buffer_end_;
Semaphore* enqueue_semaphore_;
Record* enqueue_pos_;
Record* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(CircularQueue);
};
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,

34
deps/v8/src/compiler.cc

@ -44,6 +44,18 @@
namespace v8 {
namespace internal {
// For normal operation the syntax checker is used to determine whether to
// use the full compiler for top level code or not. However if the flag
// --always-full-compiler is specified or debugging is active the full
// compiler will be used for all code.
static bool AlwaysFullCompiler() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
#else
return FLAG_always_full_compiler;
#endif
}
static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
FunctionLiteral* function = info->function();
@ -120,21 +132,9 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
bool force_full_compiler = false;
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
// On ia32 the full compiler can compile all code whereas the other platforms
// the constructs supported is checked by the associated syntax checker. When
// --always-full-compiler is used on ia32 the syntax checker is still in
// effect, but there is a special flag --force-full-compiler to ignore the
// syntax checker completely and use the full compiler for all code. Also
// when debugging on ia32 the full compiler will be used for all code.
force_full_compiler =
Debugger::IsDebuggerActive() || FLAG_force_full_compiler;
#endif
if (force_full_compiler) {
if (AlwaysFullCompiler()) {
return FullCodeGenerator::MakeCode(info);
} else if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
} else if (FLAG_full_compiler && is_run_once) {
FullCodeGenSyntaxChecker checker;
checker.Check(function);
if (checker.has_supported_syntax()) {
@ -521,7 +521,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen();
bool is_compiled = false;
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
if (AlwaysFullCompiler()) {
code = FullCodeGenerator::MakeCode(&info);
is_compiled = true;
} else if (FLAG_full_compiler && is_run_once) {
FullCodeGenSyntaxChecker checker;
checker.Check(literal);
if (checker.has_supported_syntax()) {

1
deps/v8/src/cpu-profiler-inl.h

@ -34,6 +34,7 @@
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
namespace v8 {
namespace internal {

1
deps/v8/src/cpu-profiler.cc

@ -46,7 +46,6 @@ static const int kTickSamplesBufferChunksCount = 16;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator),
running_(false),
events_buffer_(kEventsBufferSize),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),

3
deps/v8/src/cpu-profiler.h

@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "circular-queue.h"
#include "unbound-queue.h"
namespace v8 {
namespace internal {
@ -181,7 +182,7 @@ class ProfilerEventsProcessor : public Thread {
ProfileGenerator* generator_;
bool running_;
CircularQueue<CodeEventsContainer> events_buffer_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
unsigned enqueue_order_;
};

4
deps/v8/src/flag-definitions.h

@ -149,10 +149,6 @@ DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
DEFINE_bool(force_full_compiler, false,
"force use of the dedicated run-once backend for all code")
#endif
DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,

5
deps/v8/src/handles.cc

@ -399,6 +399,11 @@ Handle<JSObject> Copy(Handle<JSObject> obj) {
}
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
CALL_HEAP_FUNCTION(obj->DefineAccessor(*info), Object);
}
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from proxy objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage

2
deps/v8/src/handles.h

@ -262,6 +262,8 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
Handle<JSObject> Copy(Handle<JSObject> obj);
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);

26
deps/v8/src/ia32/assembler-ia32-inl.h

@ -38,6 +38,7 @@
#define V8_IA32_ASSEMBLER_IA32_INL_H_
#include "cpu.h"
#include "debug.h"
namespace v8 {
namespace internal {
@ -77,6 +78,11 @@ Address RelocInfo::target_address_address() {
}
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
@ -148,6 +154,26 @@ bool RelocInfo::IsPatchedReturnSequence() {
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
visitor->VisitRuntimeEntry(this);
}
}
Immediate::Immediate(int x) {
x_ = x;
rmode_ = RelocInfo::NONE;

87
deps/v8/src/ia32/codegen-ia32.cc

@ -695,9 +695,7 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
Result result =
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
frame()->Push(&result);
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
// Anything else can be handled normally.
Load(expr);
@ -746,7 +744,8 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// We have to skip storing into the arguments slot if it has already
// been written to. This can happen if the a function has a local
// variable named 'arguments'.
Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
@ -3026,9 +3025,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
Result existing_args =
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
frame()->Push(&existing_args);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
@ -4719,19 +4716,19 @@ void CodeGenerator::VisitConditional(Conditional* node) {
}
Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Result result;
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
JumpTarget slow;
JumpTarget done;
Result value;
// Generate fast case for loading from slots that correspond to
// local/global variables or arguments unless they are shadowed by
// eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(slot,
typeof_state,
&result,
&value,
&slow,
&done);
@ -4743,14 +4740,14 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
frame()->EmitPush(esi);
frame()->EmitPush(Immediate(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
result =
value =
frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
} else {
result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
}
done.Bind(&result);
return result;
done.Bind(&value);
frame_->Push(&value);
} else if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
@ -4767,15 +4764,13 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
__ j(not_equal, &exit);
__ mov(ecx, Factory::undefined_value());
__ bind(&exit);
return Result(ecx);
frame()->EmitPush(ecx);
} else if (slot->type() == Slot::PARAMETER) {
frame()->PushParameterAt(slot->index());
return frame()->Pop();
} else if (slot->type() == Slot::LOCAL) {
frame()->PushLocalAt(slot->index());
return frame()->Pop();
} else {
// The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@ -4784,46 +4779,48 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// The use of SlotOperand below is safe for an unspilled frame
// because it will always be a context slot.
ASSERT(slot->type() == Slot::CONTEXT);
result = allocator()->Allocate();
ASSERT(result.is_valid());
__ mov(result.reg(), SlotOperand(slot, result.reg()));
return result;
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), SlotOperand(slot, temp.reg()));
frame()->Push(&temp);
}
}
Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) {
Result result = LoadFromSlot(slot, state);
LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
// ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
// If the loaded value is a constant, we know if the arguments
// object has been lazily loaded yet.
Result result = frame()->Pop();
if (result.is_constant()) {
if (result.handle()->IsTheHole()) {
result.Unuse();
return StoreArgumentsObject(false);
} else {
return result;
result = StoreArgumentsObject(false);
}
frame()->Push(&result);
return;
}
ASSERT(result.is_register());
// The loaded value is in a register. If it is the sentinel that
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
exit.Branch(not_equal, &result);
frame()->Push(&result);
exit.Branch(not_equal);
result.Unuse();
result = StoreArgumentsObject(false);
exit.Bind(&result);
return result;
frame()->SetElementAt(0, &result);
result.Unuse();
exit.Bind();
return;
}
@ -5073,8 +5070,7 @@ void CodeGenerator::VisitSlot(Slot* slot) {
UNREACHABLE();
}
} else {
Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
frame()->Push(&result);
LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
}
}
@ -5395,8 +5391,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
// between the current property value and the actual right-hand side.
Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
frame()->Push(&result);
LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
Load(node->value());
// Perform the binary operation.
@ -8603,16 +8598,16 @@ Result CodeGenerator::EmitKeyedLoad() {
if (loop_nesting() > 0) {
Comment cmnt(masm_, "[ Inlined load from keyed Property");
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary for the index and later the loaded
// value.
result = allocator()->Allocate();
@ -8626,6 +8621,7 @@ Result CodeGenerator::EmitKeyedLoad() {
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
// Check that the receiver has the expected map.
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
@ -8659,7 +8655,6 @@ Result CodeGenerator::EmitKeyedLoad() {
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// Load and check that the result is not the hole.
__ mov(result.reg(), Operand(elements.reg(),
result.reg(),
times_4,
@ -8857,10 +8852,8 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
Result result =
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) set_unloaded();
cgen_->frame()->Push(&result);
break;
}

4
deps/v8/src/ia32/codegen-ia32.h

@ -459,8 +459,8 @@ class CodeGenerator: public AstVisitor {
void LoadWithSafeInt32ModeDisabled(Expression* expr);
// Read a value from a slot and leave it on top of the expression stack.
Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);

4
deps/v8/src/ia32/full-codegen-ia32.cc

@ -2222,9 +2222,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
__ cmp(ebx, FIRST_JS_OBJECT_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax); // Map is now in eax.
__ j(below, &null);
// As long as JS_FUNCTION_TYPE is the last instance type and it is

81
deps/v8/src/ia32/stub-cache-ia32.cc

@ -356,7 +356,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register holder,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
@ -376,7 +376,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
@ -389,12 +390,17 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ push(holder);
__ push(name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ cmp(eax, Factory::no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
@ -411,47 +417,61 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm, eax,
holder, lookup->holder(),
lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call: push receiver to stack after return address.
Label cleanup;
__ pop(scratch2);
__ pop(scratch2); // return address
__ push(receiver);
__ push(scratch2);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
__ pop(scratch2); // save old return address
// Continue tail call preparation: push remaining parameters after
// return address.
__ pop(scratch2); // return address
__ push(holder);
__ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
__ push(holder);
__ push(FieldOperand(holder, AccessorInfo::kDataOffset));
__ push(name_);
__ push(scratch2); // restore old return address
__ push(scratch2); // restore return address
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver after return address and
// need to remove it from there.
__ bind(&cleanup);
__ pop(scratch1);
__ pop(scratch2);
__ pop(scratch1); // return address.
__ pop(scratch2); // receiver.
__ push(scratch1);
}
}
@ -461,10 +481,10 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register holder,
Register scratch,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
__ pop(scratch); // save old return address
PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ push(scratch); // restore old return address
ExternalReference ref = ExternalReference(
@ -626,7 +646,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
@ -639,10 +659,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
depth1 =
optimization.GetPrototypeDepthOfExpectedType(object,
interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
lookup->holder());
depth2 =
optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
lookup->holder());
}
can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
(depth2 != kInvalidProtoDepth);
@ -655,24 +678,32 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ReserveSpaceForFastApiCall(masm, scratch1);
}
// Check that the maps from receiver to interceptor's holder
// haven't changed and thus we can invoke interceptor.
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
Label regular_invoke;
LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
&regular_invoke);
// Generate code for the failed interceptor case.
// Interceptor returned nothing for this property. Try to use cached
// constant function.
// Check the lookup is still valid.
stub_compiler_->CheckPrototypes(holder_obj, receiver,
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(),
scratch1, scratch2, name,
depth2, miss);
// Invoke function.
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
@ -680,12 +711,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JUMP_FUNCTION);
}
// Deferred code for fast API call case---clean preallocated space.
if (can_do_fast_api_call) {
__ bind(&miss_cleanup);
FreeSpaceForFastApiCall(masm, scratch1);
__ jmp(miss_label);
}
// Invoke a regular function.
__ bind(&regular_invoke);
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
@ -698,10 +731,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
String* name,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
miss_label);
@ -713,7 +746,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
__ CallExternalReference(
ExternalReference(

2
deps/v8/src/jump-target-light.h

@ -74,6 +74,8 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
inline CodeGenerator* cgen();
Label* entry_label() { return &entry_label_; }
const VirtualFrame* entry_frame() const {
return entry_frame_set_ ? &entry_frame_ : NULL;
}

408
deps/v8/src/objects.cc

@ -189,7 +189,7 @@ Object* Object::GetPropertyWithCallback(Object* receiver,
}
UNREACHABLE();
return 0;
return NULL;
}
@ -1613,7 +1613,7 @@ Object* JSObject::SetPropertyWithCallback(Object* structure,
}
UNREACHABLE();
return 0;
return NULL;
}
@ -1657,7 +1657,8 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
}
Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
Object* value) {
for (Object* pt = GetPrototype();
pt != Heap::null_value();
pt = pt->GetPrototype()) {
@ -1670,12 +1671,12 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
return FixedArray::cast(element)->get(kSetterIndex);
SetElementWithCallback(element, index, value, JSObject::cast(pt));
return true;
}
}
}
return Heap::undefined_value();
return false;
}
@ -2692,30 +2693,11 @@ Object* JSObject::DefineGetterSetter(String* name,
// interceptor calls.
AssertNoContextChange ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
return Heap::undefined_value();
}
// Try to flatten before operating on the string.
name->TryFlatten();
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
// This mechanism is needed for instance in a browser setting, where
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
LookupResult callback_result;
LookupCallback(name, &callback_result);
if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
if (obj->IsAccessorInfo() &&
AccessorInfo::cast(obj)->prohibits_overwriting()) {
return Heap::undefined_value();
}
if (!CanSetCallback(name)) {
return Heap::undefined_value();
}
uint32_t index;
@ -2746,9 +2728,10 @@ Object* JSObject::DefineGetterSetter(String* name,
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsReadOnly()) return Heap::undefined_value();
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
ASSERT(result->IsFixedArray());
return result;
if (result->IsFixedArray()) {
return result;
}
// Otherwise allow to override it.
}
}
break;
@ -2765,15 +2748,10 @@ Object* JSObject::DefineGetterSetter(String* name,
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
if (obj->IsFixedArray()) {
// The object might be in fast mode even though it has
// a getter/setter.
Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (ok->IsFailure()) return ok;
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
SetNormalizedProperty(name, obj, details);
return obj;
// Use set to update attributes.
return SetPropertyCallback(name, obj, attributes);
}
}
}
@ -2782,50 +2760,100 @@ Object* JSObject::DefineGetterSetter(String* name,
// Allocate the fixed array to hold getter and setter.
Object* structure = Heap::AllocateFixedArray(2, TENURED);
if (structure->IsFailure()) return structure;
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
if (is_element) {
// Normalize object to make this operation simple.
Object* ok = NormalizeElements();
if (ok->IsFailure()) return ok;
return SetElementCallback(index, structure, attributes);
} else {
return SetPropertyCallback(name, structure, attributes);
}
}
// Update the dictionary with the new CALLBACKS property.
Object* dict =
element_dictionary()->Set(index, structure, details);
if (dict->IsFailure()) return dict;
// If name is an index we need to stay in slow case.
NumberDictionary* elements = NumberDictionary::cast(dict);
elements->set_requires_slow_elements();
// Set the potential new dictionary on the object.
set_elements(NumberDictionary::cast(dict));
} else {
// Normalize object to make this operation simple.
Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (ok->IsFailure()) return ok;
bool JSObject::CanSetCallback(String* name) {
ASSERT(!IsAccessCheckNeeded()
|| Top::MayNamedAccess(this, name, v8::ACCESS_SET));
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
if (IsGlobalObject()) {
Object* new_map = map()->CopyDropDescriptors();
if (new_map->IsFailure()) return new_map;
set_map(Map::cast(new_map));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
// This mechanism is needed for instance in a browser setting, where
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
LookupResult callback_result;
LookupCallback(name, &callback_result);
if (callback_result.IsProperty()) {
Object* obj = callback_result.GetCallbackObject();
if (obj->IsAccessorInfo() &&
AccessorInfo::cast(obj)->prohibits_overwriting()) {
return false;
}
// Update the dictionary with the new CALLBACKS property.
return SetNormalizedProperty(name, structure, details);
}
return true;
}
Object* JSObject::SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes) {
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
// Normalize elements to make this operation simple.
Object* ok = NormalizeElements();
if (ok->IsFailure()) return ok;
// Update the dictionary with the new CALLBACKS property.
Object* dict =
element_dictionary()->Set(index, structure, details);
if (dict->IsFailure()) return dict;
NumberDictionary* elements = NumberDictionary::cast(dict);
elements->set_requires_slow_elements();
// Set the potential new dictionary on the object.
set_elements(elements);
return structure;
}
Object* JSObject::SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes) {
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
bool convert_back_to_fast = HasFastProperties() &&
(map()->instance_descriptors()->number_of_descriptors()
< DescriptorArray::kMaxNumberOfDescriptors);
// Normalize object to make this operation simple.
Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (ok->IsFailure()) return ok;
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
if (IsGlobalObject()) {
Object* new_map = map()->CopyDropDescriptors();
if (new_map->IsFailure()) return new_map;
set_map(Map::cast(new_map));
}
// Update the dictionary with the new CALLBACKS property.
Object* result = SetNormalizedProperty(name, structure, details);
if (result->IsFailure()) return result;
if (convert_back_to_fast) {
ok = TransformToFastProperties(0);
if (ok->IsFailure()) return ok;
}
return result;
}
Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
PropertyAttributes attributes) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
!Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
return Heap::undefined_value();
}
@ -2844,6 +2872,78 @@ Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
}
Object* JSObject::DefineAccessor(AccessorInfo* info) {
String* name = String::cast(info->name());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
return Heap::undefined_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->DefineAccessor(info);
}
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
name->TryFlatten();
if (!CanSetCallback(name)) {
return Heap::undefined_value();
}
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
if (IsJSArray()) return Heap::undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
case FAST_ELEMENTS:
break;
case PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return Heap::undefined_value();
case DICTIONARY_ELEMENTS:
break;
default:
UNREACHABLE();
break;
}
SetElementCallback(index, info, info->property_attributes());
} else {
// Lookup the name.
LookupResult result;
LocalLookup(name, &result);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
return Heap::undefined_value();
}
SetPropertyCallback(name, info, info->property_attributes());
}
return this;
}
Object* JSObject::LookupAccessor(String* name, bool is_getter) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@ -2871,8 +2971,9 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
return FixedArray::cast(element)->get(accessor_index);
if (element->IsFixedArray()) {
return FixedArray::cast(element)->get(accessor_index);
}
}
}
}
@ -5171,22 +5272,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::EMBEDDED_OBJECT) {
v->VisitPointer(it.rinfo()->target_object_address());
} else if (RelocInfo::IsCodeTarget(rmode)) {
v->VisitCodeTarget(it.rinfo());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
v->VisitExternalReference(it.rinfo()->target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
RelocInfo::IsJSReturn(rmode) &&
it.rinfo()->IsPatchedReturnSequence()) {
v->VisitDebugTarget(it.rinfo());
#endif
} else if (rmode == RelocInfo::RUNTIME_ENTRY) {
v->VisitRuntimeEntry(it.rinfo());
}
it.rinfo()->Visit(v);
}
ScopeInfo<>::IterateScopeInfo(this, v);
@ -5854,6 +5940,108 @@ Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
}
Object* JSObject::GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
Object* holder) {
ASSERT(!structure->IsProxy());
// api style callbacks.
if (structure->IsAccessorInfo()) {
AccessorInfo* data = AccessorInfo::cast(structure);
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope;
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
Handle<Object> number = Factory::NewNumberFromUint(index);
Handle<String> key(Factory::NumberToString(number));
LOG(ApiNamedPropertyAccess("load", *self, *key));
CustomArguments args(data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
result = call_fun(v8::Utils::ToLocal(key), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
if (result.IsEmpty()) return Heap::undefined_value();
return *v8::Utils::OpenHandle(*result);
}
// __defineGetter__ callback
if (structure->IsFixedArray()) {
Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
if (getter->IsJSFunction()) {
return Object::GetPropertyWithDefinedGetter(receiver,
JSFunction::cast(getter));
}
// Getter is not a function.
return Heap::undefined_value();
}
UNREACHABLE();
return NULL;
}
Object* JSObject::SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
JSObject* holder) {
HandleScope scope;
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
Handle<Object> value_handle(value);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually proxy
// callbacks should be phased out.
ASSERT(!structure->IsProxy());
if (structure->IsAccessorInfo()) {
// api style callbacks
AccessorInfo* data = AccessorInfo::cast(structure);
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = Factory::NewNumberFromUint(index);
Handle<String> key(Factory::NumberToString(number));
LOG(ApiNamedPropertyAccess("store", this, *key));
CustomArguments args(data->data(), this, JSObject::cast(holder));
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
call_fun(v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle),
info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
return *value_handle;
}
if (structure->IsFixedArray()) {
Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
Handle<Object> holder_handle(holder);
Handle<Object> key(Factory::NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder_handle };
return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
HandleVector(args, 2)));
}
}
UNREACHABLE();
return NULL;
}
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
@ -5864,9 +6052,8 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
uint32_t elms_length = static_cast<uint32_t>(elms->length());
if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) {
Object* setter = LookupCallbackSetterInPrototypes(index);
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
if (SetElementWithCallbackSetterInPrototypes(index, value)) {
return value;
}
}
@ -5984,18 +6171,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
FixedArray* structure = FixedArray::cast(element);
if (structure->get(kSetterIndex)->IsJSFunction()) {
JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
return SetPropertyWithDefinedSetter(setter, value);
} else {
Handle<Object> self(this);
Handle<Object> key(Factory::NewNumberFromUint(index));
Handle<Object> args[2] = { key, self };
return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
HandleVector(args, 2)));
}
return SetElementWithCallback(element, index, value, this);
} else {
dictionary->UpdateMaxNumberKey(index);
dictionary->ValueAtPut(entry, value);
@ -6003,10 +6179,8 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
} else {
// Index not already used. Look for an accessor in the prototype chain.
if (!IsJSArray()) {
Object* setter = LookupCallbackSetterInPrototypes(index);
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter),
value);
if (SetElementWithCallbackSetterInPrototypes(index, value)) {
return value;
}
}
Object* result = dictionary->AtNumberPut(index, value);
@ -6109,16 +6283,10 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
FixedArray* structure = FixedArray::cast(element);
Object* getter = structure->get(kGetterIndex);
if (getter->IsJSFunction()) {
return GetPropertyWithDefinedGetter(receiver,
JSFunction::cast(getter));
} else {
// Getter is not a function.
return Heap::undefined_value();
}
return GetElementWithCallback(receiver,
element,
index,
this);
}
return element;
}
@ -6266,16 +6434,10 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
FixedArray* structure = FixedArray::cast(element);
Object* getter = structure->get(kGetterIndex);
if (getter->IsJSFunction()) {
return GetPropertyWithDefinedGetter(receiver,
JSFunction::cast(getter));
} else {
// Getter is not a function.
return Heap::undefined_value();
}
return GetElementWithCallback(receiver,
element,
index,
this);
}
return element;
}

19
deps/v8/src/objects.h

@ -1248,6 +1248,8 @@ class JSObject: public HeapObject {
PropertyAttributes attributes);
Object* LookupAccessor(String* name, bool is_getter);
Object* DefineAccessor(AccessorInfo* info);
// Used from Object::GetProperty().
Object* GetPropertyWithFailedAccessCheck(Object* receiver,
LookupResult* result,
@ -1370,7 +1372,7 @@ class JSObject: public HeapObject {
void LookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
Object* LookupCallbackSetterInPrototypes(uint32_t index);
bool SetElementWithCallbackSetterInPrototypes(uint32_t index, Object* value);
void LookupCallback(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@ -1539,6 +1541,14 @@ class JSObject: public HeapObject {
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
private:
Object* GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
Object* holder);
Object* SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
JSObject* holder);
Object* SetElementWithInterceptor(uint32_t index, Object* value);
Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
@ -1569,6 +1579,13 @@ class JSObject: public HeapObject {
// Returns true if most of the elements backing storage is used.
bool HasDenseElements();
bool CanSetCallback(String* name);
Object* SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes);
Object* SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes);
Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
void LookupInDescriptor(String* name, LookupResult* result);

6
deps/v8/src/parser.cc

@ -5073,12 +5073,12 @@ ScriptDataImpl::~ScriptDataImpl() {
int ScriptDataImpl::Length() {
return store_.length();
return store_.length() * sizeof(unsigned);
}
unsigned* ScriptDataImpl::Data() {
return store_.start();
const char* ScriptDataImpl::Data() {
return reinterpret_cast<const char*>(store_.start());
}

2
deps/v8/src/parser.h

@ -90,7 +90,7 @@ class ScriptDataImpl : public ScriptData {
last_entry_(0) { }
virtual ~ScriptDataImpl();
virtual int Length();
virtual unsigned* Data();
virtual const char* Data();
virtual bool HasError();
FunctionEntry GetFunctionEnd(int start);
bool SanityCheck();

2
deps/v8/src/platform-freebsd.cc

@ -290,7 +290,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_count = backtrace(addresses.start(), frames_size);
char** symbols = backtrace_symbols(addresses, frames_count);
char** symbols = backtrace_symbols(addresses.start(), frames_count);
if (symbols == NULL) {
return kStackWalkError;
}

22
deps/v8/src/platform-linux.cc

@ -165,6 +165,28 @@ int OS::ActivationFrameAlignment() {
}
#ifdef V8_TARGET_ARCH_ARM
// 0xffff0fa0 is the hard coded address of a function provided by
// the kernel which implements a memory barrier. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
#endif
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) // don't use on a simulator
pLinuxKernelMemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
// An x86 store acts as a release barrier.
#endif
*ptr = value;
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));

7
deps/v8/src/platform-macos.cc

@ -39,6 +39,7 @@
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <libkern/OSAtomic.h>
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
@ -259,6 +260,12 @@ int OS::ActivationFrameAlignment() {
}
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
OSMemoryBarrier();
*ptr = value;
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));

6
deps/v8/src/platform-win32.cc

@ -1340,6 +1340,12 @@ int OS::ActivationFrameAlignment() {
}
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
MemoryBarrier();
*ptr = value;
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}

2
deps/v8/src/platform.h

@ -277,6 +277,8 @@ class OS {
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
private:
static const int msPerSecond = 1000;

13
deps/v8/src/runtime.js

@ -559,20 +559,15 @@ function ToInt32(x) {
// ES5, section 9.12
function SameValue(x, y) {
if (typeof x != typeof y) return false;
if (IS_NULL_OR_UNDEFINED(x)) return true;
if (IS_NUMBER(x)) {
if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
// x is +0 and y is -0 or vice versa
if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
// x is +0 and y is -0 or vice versa.
if (x === 0 && y === 0 && (1 / x) != (1 / y)) {
return false;
}
return x == y;
return x === y;
}
if (IS_STRING(x)) return %StringEquals(x, y);
if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
return %_ObjectEquals(x, y);
return x === y
}

4
deps/v8/src/serialize.cc

@ -1299,7 +1299,7 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
}
sink_->Put(kExternalReference + representation, "ExternalReference");
sink_->PutInt(encoding, "reference id");
bytes_processed_so_far_ += Assembler::kExternalTargetSize;
bytes_processed_so_far_ += rinfo->target_address_size();
}
@ -1309,7 +1309,7 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
OutputRawData(target_start);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
bytes_processed_so_far_ += Assembler::kCallTargetSize;
bytes_processed_so_far_ += rinfo->target_address_size();
}

9
deps/v8/src/top.cc

@ -370,8 +370,7 @@ Local<StackTrace> Top::CaptureCurrentStackTrace(
v8::HandleScope scope;
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stackTrace = Factory::NewJSArray(frame_limit);
FixedArray* frames = FixedArray::cast(stackTrace->elements());
Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
Handle<String> column_key = Factory::LookupAsciiSymbol("column");
Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
@ -438,13 +437,13 @@ Local<StackTrace> Top::CaptureCurrentStackTrace(
SetProperty(stackFrame, constructor_key, is_constructor, NONE);
}
frames->set(frames_seen, *stackFrame);
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
frames_seen++;
it.Advance();
}
stackTrace->set_length(Smi::FromInt(frames_seen));
return scope.Close(Utils::StackTraceToLocal(stackTrace));
stack_trace->set_length(Smi::FromInt(frames_seen));
return scope.Close(Utils::StackTraceToLocal(stack_trace));
}

87
deps/v8/src/unbound-queue-inl.h

@ -0,0 +1,87 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_UNBOUND_QUEUE_INL_H_
#define V8_UNBOUND_QUEUE_INL_H_
#include "unbound-queue.h"
namespace v8 {
namespace internal {
template<typename Record>
struct UnboundQueue<Record>::Node: public Malloced {
explicit Node(const Record& value)
: value(value), next(NULL) {
}
Record value;
Node* next;
};
template<typename Record>
UnboundQueue<Record>::UnboundQueue() {
first_ = new Node(Record());
divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
}
template<typename Record>
UnboundQueue<Record>::~UnboundQueue() {
while (first_ != NULL) DeleteFirst();
}
template<typename Record>
void UnboundQueue<Record>::DeleteFirst() {
Node* tmp = first_;
first_ = tmp->next;
delete tmp;
}
template<typename Record>
void UnboundQueue<Record>::Dequeue(Record* rec) {
ASSERT(divider_ != last_);
Node* next = reinterpret_cast<Node*>(divider_)->next;
*rec = next->value;
OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
}
template<typename Record>
void UnboundQueue<Record>::Enqueue(const Record& rec) {
Node*& next = reinterpret_cast<Node*>(last_)->next;
next = new Node(rec);
OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
}
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_INL_H_

66
deps/v8/src/unbound-queue.h

@ -0,0 +1,66 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_UNBOUND_QUEUE_
#define V8_UNBOUND_QUEUE_
namespace v8 {
namespace internal {
// Lock-free unbound queue for small records. Intended for
// transferring small records between a Single producer and a Single
// consumer. Doesn't have restrictions on the number of queued
// elements, so producer never blocks. Implemented after Herb
// Sutter's article:
// http://www.ddj.com/high-performance-computing/210604448
template<typename Record>
class UnboundQueue BASE_EMBEDDED {
public:
inline UnboundQueue();
inline ~UnboundQueue();
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return divider_ == last_; }
private:
INLINE(void DeleteFirst());
struct Node;
Node* first_;
AtomicWord divider_; // Node*
AtomicWord last_; // Node*
DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
};
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_

32
deps/v8/src/v8natives.js

@ -434,6 +434,11 @@ PropertyDescriptor.prototype.isWritable = function() {
}
PropertyDescriptor.prototype.hasWritable = function() {
return this.hasWritable_;
}
PropertyDescriptor.prototype.setConfigurable = function(configurable) {
this.configurable_ = configurable;
this.hasConfigurable_ = true;
@ -537,6 +542,22 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
throw MakeTypeError("define_disallowed", ["defineProperty"]);
if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
// Step 5 and 6
if ((!desc.hasEnumerable() ||
SameValue(desc.isEnumerable() && current.isEnumerable())) &&
(!desc.hasConfigurable() ||
SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
SameValue(desc.isWritable(), current.isWritable())) &&
(!desc.hasValue() ||
SameValue(desc.getValue(), current.getValue())) &&
(!desc.hasGetter() ||
SameValue(desc.getGet(), current.getGet())) &&
(!desc.hasSetter() ||
SameValue(desc.getSet(), current.getSet()))) {
return true;
}
// Step 7
if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
@ -583,7 +604,13 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
flag |= DONT_DELETE;
if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
flag |= desc.isWritable() ? 0 : READ_ONLY;
if (desc.hasWritable()) {
flag |= desc.isWritable() ? 0 : READ_ONLY;
} else if (!IS_UNDEFINED(current)) {
flag |= current.isWritable() ? 0 : READ_ONLY;
} else {
flag |= READ_ONLY;
}
%DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
} else {
if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
@ -673,8 +700,9 @@ function ObjectCreate(proto, properties) {
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
!IS_UNDETECTABLE(obj))
!IS_UNDETECTABLE(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
}
var name = ToString(p);
var desc = ToPropertyDescriptor(attributes);
DefineOwnProperty(obj, name, desc, true);

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 11
#define BUILD_NUMBER 12
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

31
deps/v8/src/x64/assembler-x64-inl.h

@ -29,6 +29,7 @@
#define V8_X64_ASSEMBLER_X64_INL_H_
#include "cpu.h"
#include "debug.h"
#include "memory.h"
namespace v8 {
@ -229,6 +230,15 @@ Address RelocInfo::target_address_address() {
}
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kCallTargetSize;
} else {
return Assembler::kExternalTargetSize;
}
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (IsCodeTarget(rmode_)) {
@ -320,6 +330,27 @@ Object** RelocInfo::call_object_address() {
pc_ + Assembler::kPatchReturnSequenceAddressOffset);
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
visitor->VisitRuntimeEntry(this);
}
}
// -----------------------------------------------------------------------------
// Implementation of Operand

46
deps/v8/src/x64/assembler-x64.cc

@ -239,6 +239,52 @@ Operand::Operand(Register index,
}
Operand::Operand(const Operand& operand, int32_t offset) {
ASSERT(operand.len_ >= 1);
// Operand encodes REX ModR/M [SIB] [Disp].
byte modrm = operand.buf_[0];
ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
bool has_sib = ((modrm & 0x07) == 0x04);
byte mode = modrm & 0xC0;
int disp_offset = has_sib ? 2 : 1;
int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
// Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
// displacement.
bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
int32_t disp_value = 0;
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
}
// Write new operand with same registers, but with modified displacement.
ASSERT(offset >= 0 ? disp_value + offset > disp_value
: disp_value + offset < disp_value); // No overflow.
disp_value += offset;
rex_ = operand.rex_;
if (!is_int8(disp_value) || is_baseless) {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
len_ = disp_offset + 4;
Memory::int32_at(&buf_[disp_offset]) = disp_value;
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
len_ = disp_offset + 1;
buf_[disp_offset] = static_cast<byte>(disp_value);
} else {
// Need no displacement.
buf_[0] = (modrm & 0x3f); // Mode 0.
len_ = disp_offset;
}
if (has_sib) {
buf_[1] = operand.buf_[1];
}
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.

6
deps/v8/src/x64/assembler-x64.h

@ -300,12 +300,16 @@ class Operand BASE_EMBEDDED {
ScaleFactor scale,
int32_t disp);
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
// this must not overflow.
Operand(const Operand& base, int32_t offset);
private:
byte rex_;
byte buf_[10];
// The number of bytes in buf_.
unsigned int len_;
RelocInfo::Mode rmode_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.

238
deps/v8/src/x64/codegen-x64.cc

@ -603,9 +603,8 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
explicit DeferredReferenceGetKeyedValue(Register dst,
Register receiver,
Register key,
bool is_global)
: dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
Register key)
: dst_(dst), receiver_(receiver), key_(key) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
@ -618,7 +617,6 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
Register dst_;
Register receiver_;
Register key_;
bool is_global_;
};
@ -633,10 +631,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode mode = is_global_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
__ Call(ic, mode);
__ Call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
// macro because the macro sometimes uses macro expansion to turn
@ -5693,7 +5688,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
slow));
frame_->Push(&arguments);
frame_->Push(key_literal->handle());
*result = EmitKeyedLoad(false);
*result = EmitKeyedLoad();
frame_->Drop(2); // Drop key and receiver.
done->Jump(result);
}
@ -7188,8 +7183,89 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
}
Result CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
Result result;
// Do not inline the inobject property case for loads from the global
// object. Also do not inline for unoptimized code. This saves time
// in the code generator. Unoptimized code is toplevel code or code
// that is not in a loop.
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
frame()->Push(name);
RelocInfo::Mode mode = is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
result = frame()->CallLoadIC(mode);
// A test rax instruction following the call signals that the
// inobject property case was inlined. Ensure that there is not
// a test rax instruction here.
__ nop();
} else {
// Inline the inobject property case.
Comment cmnt(masm(), "[ Inlined named property load");
Result receiver = frame()->Pop();
receiver.ToRegister();
result = allocator()->Allocate();
ASSERT(result.is_valid());
// Cannot use r12 for receiver, because that changes
// the distance between a call and a fixup location,
// due to a special encoding of r12 as r/m in a ModR/M byte.
if (receiver.reg().is(r12)) {
frame()->Spill(receiver.reg()); // It will be overwritten with result.
// Swap receiver and value.
__ movq(result.reg(), receiver.reg());
Result temp = receiver;
receiver = result;
result = temp;
}
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
// Check that the receiver is a heap object.
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm()->Move(kScratchRegister, Factory::null_value());
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
// Don't use deferred->Branch(...), since that might add coverage code.
masm()->j(not_equal, deferred->entry_label());
// The delta from the patch label to the load offset must be
// statically known.
ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
// The initial (invalid) offset has to be large enough to force
// a 32-bit instruction encoding to allow patching with an
// arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
frame()->Push(&receiver);
}
ASSERT(frame()->height() == original_height);
return result;
}
Result CodeGenerator::EmitKeyedLoad() {
#ifdef DEBUG
int original_height = frame()->height();
#endif
Result result;
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
@ -7197,34 +7273,30 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
if (loop_nesting() > 0) {
Comment cmnt(masm_, "[ Inlined load from keyed Property");
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
// Allocate the temporary early so that we use rax if it is free.
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary for the index
Result index = allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg(),
is_global);
key.reg());
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
}
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
// Check that the receiver has the expected map.
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
@ -7255,7 +7327,6 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// The index register holds the un-smi-tagged key. It has been
// zero-extended to 64-bits, so it can be used directly as index in the
// operand below.
@ -7266,39 +7337,33 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
Result value = index;
__ movq(value.reg(),
__ movq(elements.reg(),
Operand(elements.reg(),
index.reg(),
times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
result = elements;
elements.Unuse();
index.Unuse();
__ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
__ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
frame_->Push(&receiver);
frame_->Push(&key);
return value;
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = frame_->CallKeyedLoadIC(mode);
result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
return answer;
}
ASSERT(frame()->height() == original_height);
return result;
}
@ -7341,6 +7406,7 @@ void Reference::GetValue() {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) set_unloaded();
break;
}
@ -7348,101 +7414,29 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
// Do not inline the inobject property case for loads from the global
// object. Also do not inline for unoptimized code. This saves time
// in the code generator. Unoptimized code is toplevel code or code
// that is not in a loop.
if (is_global ||
cgen_->scope()->is_global_scope() ||
cgen_->loop_nesting() == 0) {
Comment cmnt(masm, "[ Load from named Property");
cgen_->frame()->Push(GetName());
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallLoadIC(mode);
// A test rax instruction following the call signals that the
// inobject property case was inlined. Ensure that there is not
// a test rax instruction here.
__ nop();
cgen_->frame()->Push(&answer);
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
// Cannot use r12 for receiver, because that changes
// the distance between a call and a fixup location,
// due to a special encoding of r12 as r/m in a ModR/M byte.
if (receiver.reg().is(r12)) {
// Swap receiver and value.
__ movq(value.reg(), receiver.reg());
Result temp = receiver;
receiver = value;
value = temp;
cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
}
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(value.reg(),
receiver.reg(),
GetName());
// Check that the receiver is a heap object.
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm->Move(kScratchRegister, Factory::null_value());
masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
// Don't use deferred->Branch(...), since that might add coverage code.
masm->j(not_equal, deferred->entry_label());
// The delta from the patch label to the load offset must be
// statically known.
ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
// The initial (invalid) offset has to be large enough to force
// a 32-bit instruction encoding to allow patching with an
// arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&value);
Result result = cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->Push(&result);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
case KEYED: {
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
// A load of a bare identifier (load from global) cannot be keyed.
ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
Result value = cgen_->EmitKeyedLoad(is_global);
Result value = cgen_->EmitKeyedLoad();
cgen_->frame()->Push(&value);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
}

5
deps/v8/src/x64/codegen-x64.h

@ -449,10 +449,13 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Receiver is passed on the frame and not consumed.
Result EmitNamedLoad(Handle<String> name, bool is_contextual);
// Load a property of an object, returning it in a Result.
// The object and the property name are passed on the stack, and
// not changed.
Result EmitKeyedLoad(bool is_global);
Result EmitKeyedLoad();
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'

2
deps/v8/src/x64/full-codegen-x64.cc

@ -2217,7 +2217,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); // Map is now in rax.
__ j(below, &null);
// As long as JS_FUNCTION_TYPE is the last instance type and it is

21
deps/v8/src/x64/macro-assembler-x64.cc

@ -603,7 +603,7 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
}
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
cmpq(dst, src);
}
@ -614,13 +614,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
if (src->value() == 0) {
// Only tagged long smi to have 32-bit representation.
cmpq(dst, Immediate(0));
} else {
Move(kScratchRegister, src);
cmpq(dst, kScratchRegister);
}
cmpl(Operand(dst, kIntSize), Immediate(src->value()));
}
@ -922,8 +916,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
Move(kScratchRegister, constant);
addq(dst, kScratchRegister);
addl(Operand(dst, kIntSize), Immediate(constant->value()));
}
}
@ -1607,13 +1600,7 @@ void MacroAssembler::Drop(int stack_elements) {
void MacroAssembler::Test(const Operand& src, Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
testl(src, Immediate(static_cast<int32_t>(smi)));
} else {
Move(kScratchRegister, source);
testq(src, kScratchRegister);
}
testl(Operand(src, kIntSize), Immediate(source->value()));
}

81
deps/v8/src/x64/stub-cache-x64.cc

@ -430,7 +430,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register holder,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
Label* miss_label) {
@ -450,7 +450,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
miss_label);
return;
}
@ -463,12 +464,17 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ push(holder);
__ push(name_);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(equal, &interceptor_failed);
@ -485,13 +491,17 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ LeaveInternalFrame();
if (lookup->type() == FIELD) {
holder = stub_compiler->CheckPrototypes(holder_obj,
// We found FIELD property in prototype chain of interceptor's holder.
// Check that the maps from interceptor's holder to field's holder
// haven't changed...
holder = stub_compiler->CheckPrototypes(interceptor_holder,
holder,
lookup->holder(),
scratch1,
scratch2,
name,
miss_label);
// ... and retrieve a field from field's holder.
stub_compiler->GenerateFastPropertyLoad(masm,
rax,
holder,
@ -499,37 +509,47 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
ASSERT(callback != NULL);
ASSERT(callback->getter() != NULL);
// Prepare for tail call. Push receiver to stack after return address.
Label cleanup;
__ pop(scratch2);
__ pop(scratch2); // return address
__ push(receiver);
__ push(scratch2);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
// Check that the maps from interceptor's holder to callback's holder
// haven't changed.
holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
lookup->holder(), scratch1,
scratch2,
name,
&cleanup);
__ pop(scratch2); // save old return address
// Continue tail call preparation: push remaining parameters after
// return address.
__ pop(scratch2); // return address
__ push(holder);
__ Move(holder, Handle<AccessorInfo>(callback));
__ push(holder);
__ push(FieldOperand(holder, AccessorInfo::kDataOffset));
__ push(name_);
__ push(scratch2); // restore old return address
__ push(scratch2); // restore return address
// Tail call to runtime.
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(ref, 5, 1);
// Clean up code: we pushed receiver after return address and
// need to remove it from there.
__ bind(&cleanup);
__ pop(scratch1);
__ pop(scratch2);
__ pop(scratch1); // return address
__ pop(scratch2); // receiver
__ push(scratch1);
}
}
@ -539,10 +559,10 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register holder,
Register scratch,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
__ pop(scratch); // save old return address
PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ push(scratch); // restore old return address
ExternalReference ref = ExternalReference(
@ -704,7 +724,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
@ -717,10 +737,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
depth1 =
optimization.GetPrototypeDepthOfExpectedType(object,
interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
lookup->holder());
depth2 =
optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
lookup->holder());
}
can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
(depth2 != kInvalidProtoDepth);
@ -733,24 +756,32 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ReserveSpaceForFastApiCall(masm, scratch1);
}
// Check that the maps from receiver to interceptor's holder
// haven't changed and thus we can invoke interceptor.
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
Label regular_invoke;
LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
&regular_invoke);
// Generate code for the failed interceptor case.
// Interceptor returned nothing for this property. Try to use cached
// constant function.
// Check the lookup is still valid.
stub_compiler_->CheckPrototypes(holder_obj, receiver,
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(),
scratch1, scratch2, name,
depth2, miss);
// Invoke function.
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
@ -758,12 +789,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JUMP_FUNCTION);
}
// Deferred code for fast API call case---clean preallocated space.
if (can_do_fast_api_call) {
__ bind(&miss_cleanup);
FreeSpaceForFastApiCall(masm, scratch1);
__ jmp(miss_label);
}
// Invoke a regular function.
__ bind(&regular_invoke);
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
@ -776,10 +809,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
String* name,
JSObject* holder_obj,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
scratch1, scratch2, name,
miss_label);
@ -791,7 +824,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
holder,
name_,
holder_obj);
interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),

1
deps/v8/test/cctest/SConscript

@ -71,6 +71,7 @@ SOURCES = {
'test-strings.cc',
'test-threads.cc',
'test-thread-termination.cc',
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc'
],

217
deps/v8/test/cctest/test-api.cc

@ -76,6 +76,11 @@ static void ExpectBoolean(const char* code, bool expected) {
}
static void ExpectTrue(const char* code) {
ExpectBoolean(code, true);
}
static void ExpectObject(const char* code, Local<Value> expected) {
Local<Value> result = CompileRun(code);
CHECK(result->Equals(expected));
@ -2506,7 +2511,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
// Uses getOwnPropertyDescriptor to check the configurable status
Local<Script> script_desc
= Script::Compile(v8_str("var prop =Object.getOwnPropertyDescriptor( "
= Script::Compile(v8_str("var prop = Object.getOwnPropertyDescriptor( "
"obj, 'x');"
"prop.configurable;"));
Local<Value> result = script_desc->Run();
@ -2592,7 +2597,166 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
}
static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
char const* name) {
return v8::Handle<v8::Object>::Cast((*context)->Global()->Get(v8_str(name)));
}
THREADED_TEST(DefineAPIAccessorOnObject) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
CompileRun("var obj2 = {};");
CHECK(CompileRun("obj1.x")->IsUndefined());
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj1")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj2")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CompileRun("Object.defineProperty(obj1, 'x',"
"{ get: function() { return 'y'; }, configurable: true })");
ExpectString("obj1.x", "y");
ExpectString("obj2.x", "x");
CompileRun("Object.defineProperty(obj2, 'x',"
"{ get: function() { return 'y'; }, configurable: true })");
ExpectString("obj1.x", "y");
ExpectString("obj2.x", "y");
ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CHECK(GetGlobalProperty(&context, "obj1")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
CHECK(GetGlobalProperty(&context, "obj2")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
// Define getters/setters, but now make them not configurable.
CompileRun("Object.defineProperty(obj1, 'x',"
"{ get: function() { return 'z'; }, configurable: false })");
CompileRun("Object.defineProperty(obj2, 'x',"
"{ get: function() { return 'z'; }, configurable: false })");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
CHECK(!GetGlobalProperty(&context, "obj1")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
CHECK(!GetGlobalProperty(&context, "obj2")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
}
THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
v8_str("x"),
GetXValue, NULL,
v8_str("donut"), v8::DEFAULT, v8::DontDelete));
CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
v8_str("x"),
GetXValue, NULL,
v8_str("donut"), v8::DEFAULT, v8::DontDelete));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CHECK(!GetGlobalProperty(&context, "obj1")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
CHECK(!GetGlobalProperty(&context, "obj2")->
SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
{
v8::TryCatch try_catch;
CompileRun("Object.defineProperty(obj1, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
String::AsciiValue exception_value(try_catch.Exception());
CHECK_EQ(*exception_value,
"TypeError: Cannot redefine property: defineProperty");
}
{
v8::TryCatch try_catch;
CompileRun("Object.defineProperty(obj2, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
String::AsciiValue exception_value(try_catch.Exception());
CHECK_EQ(*exception_value,
"TypeError: Cannot redefine property: defineProperty");
}
}
static v8::Handle<Value> Get239Value(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(info.Data(), v8_str("donut"));
CHECK_EQ(name, v8_str("239"));
return name;
}
THREADED_TEST(ElementAPIAccessor) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
v8_str("239"),
Get239Value, NULL,
v8_str("donut")));
CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
v8_str("239"),
Get239Value, NULL,
v8_str("donut")));
ExpectString("obj1[239]", "239");
ExpectString("obj2[239]", "239");
ExpectString("obj1['239']", "239");
ExpectString("obj2['239']", "239");
}
v8::Persistent<Value> xValue;
@ -8003,8 +8167,8 @@ TEST(PreCompile) {
// TODO(155): This test would break without the initialization of V8. This is
// a workaround for now to make this test not fail.
v8::V8::Initialize();
const char *script = "function foo(a) { return a+1; }";
v8::ScriptData *sd =
const char* script = "function foo(a) { return a+1; }";
v8::ScriptData* sd =
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
@ -8015,8 +8179,8 @@ TEST(PreCompile) {
TEST(PreCompileWithError) {
v8::V8::Initialize();
const char *script = "function foo(a) { return 1 * * 2; }";
v8::ScriptData *sd =
const char* script = "function foo(a) { return 1 * * 2; }";
v8::ScriptData* sd =
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK(sd->HasError());
delete sd;
@ -8025,14 +8189,53 @@ TEST(PreCompileWithError) {
TEST(Regress31661) {
v8::V8::Initialize();
const char *script = " The Definintive Guide";
v8::ScriptData *sd =
const char* script = " The Definintive Guide";
v8::ScriptData* sd =
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK(sd->HasError());
delete sd;
}
// Tests that ScriptData can be serialized and deserialized.
TEST(PreCompileSerialization) {
v8::V8::Initialize();
const char* script = "function foo(a) { return a+1; }";
v8::ScriptData* sd =
v8::ScriptData::PreCompile(script, i::StrLength(script));
// Serialize.
int serialized_data_length = sd->Length();
char* serialized_data = i::NewArray<char>(serialized_data_length);
memcpy(serialized_data, sd->Data(), serialized_data_length);
// Deserialize.
v8::ScriptData* deserialized_sd =
v8::ScriptData::New(serialized_data, serialized_data_length);
// Verify that the original is the same as the deserialized.
CHECK_EQ(sd->Length(), deserialized_sd->Length());
CHECK_EQ(0, memcmp(sd->Data(), deserialized_sd->Data(), sd->Length()));
CHECK_EQ(sd->HasError(), deserialized_sd->HasError());
delete sd;
delete deserialized_sd;
}
// Attempts to deserialize bad data.
TEST(PreCompileDeserializationError) {
v8::V8::Initialize();
const char* data = "DONT CARE";
int invalid_size = 3;
v8::ScriptData* sd = v8::ScriptData::New(data, invalid_size);
CHECK_EQ(0, sd->Length());
delete sd;
}
// This tests that we do not allow dictionary load/call inline caches
// to use functions that have not yet been compiled. The potential
// problem of loading a function that has not yet been compiled can

46
deps/v8/test/cctest/test-circular-queue.cc

@ -1,6 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved.
//
// Tests of circular queues.
// Tests of the circular queue.
#include "v8.h"
#include "circular-queue-inl.h"
@ -8,53 +8,9 @@
namespace i = v8::internal;
using i::CircularQueue;
using i::SamplingCircularQueue;
TEST(SingleRecordCircularQueue) {
typedef int Record;
CircularQueue<Record> cq(sizeof(Record) * 2);
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
Record rec = 0;
cq.Dequeue(&rec);
CHECK_EQ(1, rec);
CHECK(cq.IsEmpty());
}
TEST(MultipleRecordsCircularQueue) {
typedef int Record;
const int kQueueSize = 10;
CircularQueue<Record> cq(sizeof(Record) * (kQueueSize + 1));
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
for (int i = 2; i <= 5; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
Record rec = 0;
for (int i = 1; i <= 4; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
for (int i = 6; i <= 12; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
for (int i = 5; i <= 12; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
CHECK(cq.IsEmpty());
}
TEST(SamplingCircularQueue) {
typedef SamplingCircularQueue::Cell Record;
const int kRecordsPerChunk = 4;

8
deps/v8/test/cctest/test-log-stack-tracer.cc

@ -273,12 +273,10 @@ static void CreateTraceCallerFunction(const char* func_name,
// StackTracer uses Top::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
// TODO(711) The hack of replacing the inline runtime function
// RandomHeapNumber with GetFrameNumber does not work with the way the full
// compiler generates inline runtime calls.
i::FLAG_force_full_compiler = false;
#endif
i::FLAG_always_full_compiler = false;
TickSample sample;
InitTraceEnv(&sample);
@ -315,12 +313,10 @@ TEST(CFromJSStackTrace) {
// Top::c_entry_fp value. In this case, StackTracer uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
// TODO(711) The hack of replacing the inline runtime function
// RandomHeapNumber with GetFrameNumber does not work with the way the full
// compiler generates inline runtime calls.
i::FLAG_force_full_compiler = false;
#endif
i::FLAG_always_full_compiler = false;
TickSample sample;
InitTraceEnv(&sample);

357
deps/v8/test/cctest/test-macro-assembler-x64.cc

@ -61,6 +61,7 @@ using v8::internal::r12; // Remember: r12..r15 are callee save!
using v8::internal::r13;
using v8::internal::r14;
using v8::internal::r15;
using v8::internal::times_pointer_size;
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
using v8::internal::less_equal;
@ -75,6 +76,8 @@ using v8::internal::positive;
using v8::internal::Smi;
using v8::internal::kSmiTagMask;
using v8::internal::kSmiValueSize;
using v8::internal::kPointerSize;
using v8::internal::kIntSize;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@ -2053,4 +2056,358 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
}
TEST(OperandOffset) {
int data[256];
for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
&actual_size,
true));
CHECK(buffer);
HandleScope handles;
MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
Label exit;
__ push(r12);
__ push(r13);
__ push(rbx);
__ push(rbp);
__ push(Immediate(0x100)); // <-- rbp
__ movq(rbp, rsp);
__ push(Immediate(0x101));
__ push(Immediate(0x102));
__ push(Immediate(0x103));
__ push(Immediate(0x104));
__ push(Immediate(0x105)); // <-- rbx
__ push(Immediate(0x106));
__ push(Immediate(0x107));
__ push(Immediate(0x108));
__ push(Immediate(0x109)); // <-- rsp
// rbp = rsp[9]
// r12 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
__ lea(r12, Operand(rsp, 3 * kPointerSize));
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
__ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);
// Test 1.
__ movl(rdx, sp0); // Sanity check.
__ cmpl(rdx, Immediate(0x109));
__ j(not_equal, &exit);
__ incq(rax);
// Test 2.
// Zero to non-zero displacement.
__ movl(rdx, Operand(sp0, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
Operand sp2 = Operand(rsp, 2 * kPointerSize);
// Test 3.
__ movl(rdx, sp2); // Sanity check.
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(sp2, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
__ movl(rdx, Operand(sp2, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x109));
__ j(not_equal, &exit);
__ incq(rax);
Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kPointerSize);
// Test 6.
__ movl(rdx, sp2c2); // Sanity check.
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(sp2c2, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x103));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
__ movl(rdx, Operand(sp2c2, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
Operand bp0 = Operand(rbp, 0);
// Test 9.
__ movl(rdx, bp0); // Sanity check.
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
// Zero to non-zero displacement.
__ movl(rdx, Operand(bp0, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x102));
__ j(not_equal, &exit);
__ incq(rax);
Operand bp2 = Operand(rbp, -2 * kPointerSize);
// Test 11.
__ movl(rdx, bp2); // Sanity check.
__ cmpl(rdx, Immediate(0x102));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
__ movl(rdx, Operand(bp2, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bp2, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x104));
__ j(not_equal, &exit);
__ incq(rax);
Operand bp2c4 = Operand(rbp, rcx, times_pointer_size, -4 * kPointerSize);
// Test 14:
__ movl(rdx, bp2c4); // Sanity check.
__ cmpl(rdx, Immediate(0x102));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bp2c4, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bp2c4, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x104));
__ j(not_equal, &exit);
__ incq(rax);
Operand bx0 = Operand(rbx, 0);
// Test 17.
__ movl(rdx, bx0); // Sanity check.
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bx0, 5 * kPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bx0, -4 * kPointerSize));
__ cmpl(rdx, Immediate(0x109));
__ j(not_equal, &exit);
__ incq(rax);
Operand bx2 = Operand(rbx, 2 * kPointerSize);
// Test 20.
__ movl(rdx, bx2); // Sanity check.
__ cmpl(rdx, Immediate(0x103));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bx2, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x101));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
__ movl(rdx, Operand(bx2, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
Operand bx2c2 = Operand(rbx, rcx, times_pointer_size, -2 * kPointerSize);
// Test 23.
__ movl(rdx, bx2c2); // Sanity check.
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bx2c2, 2 * kPointerSize));
__ cmpl(rdx, Immediate(0x103));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(bx2c2, -2 * kPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
Operand r80 = Operand(r8, 0);
// Test 26.
__ movl(rdx, r80); // Sanity check.
__ cmpl(rdx, Immediate(0x80808080));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, -8 * kIntSize));
__ cmpl(rdx, Immediate(0x78787878));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, 8 * kIntSize));
__ cmpl(rdx, Immediate(0x88888888));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, -64 * kIntSize));
__ cmpl(rdx, Immediate(0x40404040));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, 64 * kIntSize));
__ cmpl(rdx, Immediate(0xC0C0C0C0));
__ j(not_equal, &exit);
__ incq(rax);
Operand r88 = Operand(r8, 8 * kIntSize);
// Test 31.
__ movl(rdx, r88); // Sanity check.
__ cmpl(rdx, Immediate(0x88888888));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r88, -8 * kIntSize));
__ cmpl(rdx, Immediate(0x80808080));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r88, 8 * kIntSize));
__ cmpl(rdx, Immediate(0x90909090));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r88, -64 * kIntSize));
__ cmpl(rdx, Immediate(0x48484848));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r88, 64 * kIntSize));
__ cmpl(rdx, Immediate(0xC8C8C8C8));
__ j(not_equal, &exit);
__ incq(rax);
Operand r864 = Operand(r8, 64 * kIntSize);
// Test 36.
__ movl(rdx, r864); // Sanity check.
__ cmpl(rdx, Immediate(0xC0C0C0C0));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r864, -8 * kIntSize));
__ cmpl(rdx, Immediate(0xB8B8B8B8));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r864, 8 * kIntSize));
__ cmpl(rdx, Immediate(0xC8C8C8C8));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r864, -64 * kIntSize));
__ cmpl(rdx, Immediate(0x80808080));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r864, 32 * kIntSize));
__ cmpl(rdx, Immediate(0xE0E0E0E0));
__ j(not_equal, &exit);
__ incq(rax);
// 32-bit offset to 8-bit offset.
__ movl(rdx, Operand(r864, -60 * kIntSize));
__ cmpl(rdx, Immediate(0x84848484));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r864, 60 * kIntSize));
__ cmpl(rdx, Immediate(0xFCFCFCFC));
__ j(not_equal, &exit);
__ incq(rax);
// Test unaligned offsets.
// Test 43.
__ movl(rdx, Operand(r80, 2));
__ cmpl(rdx, Immediate(0x81818080));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, -2));
__ cmpl(rdx, Immediate(0x80807F7F));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, 126));
__ cmpl(rdx, Immediate(0xA0A09F9F));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, -126));
__ cmpl(rdx, Immediate(0x61616060));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, 254));
__ cmpl(rdx, Immediate(0xC0C0BFBF));
__ j(not_equal, &exit);
__ incq(rax);
__ movl(rdx, Operand(r80, -254));
__ cmpl(rdx, Immediate(0x41414040));
__ j(not_equal, &exit);
__ incq(rax);
// Success.
__ movl(rax, Immediate(0));
__ bind(&exit);
__ lea(rsp, Operand(rbp, kPointerSize));
__ pop(rbp);
__ pop(rbx);
__ pop(r13);
__ pop(r12);
__ ret(0);
CodeDesc desc;
masm->GetCode(&desc);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
}
#undef __

45
deps/v8/test/cctest/test-thread-termination.cc

@ -308,3 +308,48 @@ TEST(TerminateLoadICException) {
v8::Script::Compile(source)->Run();
context.Dispose();
}
v8::Handle<v8::Value> ReenterAfterTermination(const v8::Arguments& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating());
v8::Script::Compile(v8::String::New("function f() {"
" var term = true;"
" try {"
" while(true) {"
" if (term) terminate();"
" term = false;"
" }"
" fail();"
" } catch(e) {"
" fail();"
" }"
"}"
"f()"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
CHECK(v8::V8::IsExecutionTerminating());
v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
return v8::Undefined();
}
// Test that reentry into V8 while the termination exception is still pending
// (has not yet unwound the 0-level JS frame) does not crash.
TEST(TerminateAndReenterFromThreadItself) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
CHECK(!v8::V8::IsExecutionTerminating());
// Check we can run JS again after termination.
CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
"f()"))->Run()->IsTrue());
context.Dispose();
}

54
deps/v8/test/cctest/test-unbound-queue.cc

@ -0,0 +1,54 @@
// Copyright 2010 the V8 project authors. All rights reserved.
//
// Tests of the unbound queue.
#include "v8.h"
#include "unbound-queue-inl.h"
#include "cctest.h"
namespace i = v8::internal;
using i::UnboundQueue;
TEST(SingleRecord) {
typedef int Record;
UnboundQueue<Record> cq;
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
Record rec = 0;
cq.Dequeue(&rec);
CHECK_EQ(1, rec);
CHECK(cq.IsEmpty());
}
TEST(MultipleRecords) {
typedef int Record;
UnboundQueue<Record> cq;
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
for (int i = 2; i <= 5; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
Record rec = 0;
for (int i = 1; i <= 4; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
for (int i = 6; i <= 12; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
for (int i = 5; i <= 12; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
CHECK(cq.IsEmpty());
}

251
deps/v8/test/mjsunit/object-define-property.js

@ -53,36 +53,46 @@ try {
assertTrue(/called on non-object/.test(e));
}
// Object
// Object.
var obj1 = {};
// Values
// Values.
var val1 = 0;
var val2 = 0;
var val3 = 0;
// Descriptors
function setter1() {val1++; }
function getter1() {return val1; }
function setter2() {val2++; }
function getter2() {return val2; }
function setter3() {val3++; }
function getter3() {return val3; }
// Descriptors.
var emptyDesc = {};
var accessorConfigurable = {
set: function() { val1++; },
get: function() { return val1; },
set: setter1,
get: getter1,
configurable: true
};
var accessorNoConfigurable = {
set: function() { val2++; },
get: function() { return val2; },
set: setter2,
get: getter2,
configurable: false
};
var accessorOnlySet = {
set: function() { val3++; },
set: setter3,
configurable: true
};
var accessorOnlyGet = {
get: function() { return val3; },
get: getter3,
configurable: true
};
@ -200,7 +210,7 @@ assertEquals(2, val1);
assertEquals(4, val2);
assertEquals(4, obj1.bar);
// Define an accessor that has only a setter
// Define an accessor that has only a setter.
Object.defineProperty(obj1, "setOnly", accessorOnlySet);
desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
assertTrue(desc.configurable);
@ -212,7 +222,7 @@ assertEquals(desc.get, undefined);
assertEquals(1, obj1.setOnly = 1);
assertEquals(1, val3);
// Add a getter - should not touch the setter
// Add a getter - should not touch the setter.
Object.defineProperty(obj1, "setOnly", accessorOnlyGet);
desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
assertTrue(desc.configurable);
@ -256,7 +266,7 @@ obj1.foobar = 1001;
assertEquals(obj1.foobar, 1000);
// Redefine to writable descriptor - now writing to foobar should be allowed
// Redefine to writable descriptor - now writing to foobar should be allowed.
Object.defineProperty(obj1, "foobar", dataWritable);
desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
assertEquals(obj1.foobar, 3000);
@ -279,7 +289,7 @@ desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
assertEquals(obj1.foobar, 2000);
assertEquals(desc.value, 2000);
assertFalse(desc.configurable);
assertFalse(desc.writable);
assertTrue(desc.writable);
assertFalse(desc.enumerable);
assertEquals(desc.get, undefined);
assertEquals(desc.set, undefined);
@ -307,7 +317,7 @@ desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
assertEquals(obj1.foobar, 2000);
assertEquals(desc.value, 2000);
assertFalse(desc.configurable);
assertFalse(desc.writable);
assertTrue(desc.writable);
assertFalse(desc.enumerable);
assertEquals(desc.get, undefined);
assertEquals(desc.set, undefined);
@ -375,7 +385,7 @@ assertEquals(desc.set, undefined);
// Redefinition of an accessor defined using __defineGetter__ and
// __defineSetter__
// __defineSetter__.
function get(){return this.x}
function set(x){this.x=x};
@ -442,7 +452,7 @@ assertEquals(1, obj4.bar = 1);
assertEquals(5, val1);
assertEquals(5, obj4.bar);
// Make sure an error is thrown when trying to access to redefined function
// Make sure an error is thrown when trying to access to redefined function.
try {
obj4.bar();
assertTrue(false);
@ -453,7 +463,7 @@ try {
// Test runtime calls to DefineOrRedefineDataProperty and
// DefineOrRedefineAccessorProperty - make sure we don't
// crash
// crash.
try {
%DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
} catch (e) {
@ -497,3 +507,210 @@ try {
} catch (e) {
assertTrue(/illegal access/.test(e));
}
// Test that all possible differences in step 6 in DefineOwnProperty are
// exercised, i.e., any difference in the given property descriptor and the
// existing properties should not return true, but throw an error if the
// existing configurable property is false.
var obj5 = {};
// Enumerable will default to false.
Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
// First, test that we are actually allowed to set the accessor if all
// values are of the descriptor are the same as the existing one.
Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
// Different setter.
var descDifferent = {
configurable:false,
enumerable:false,
set: setter1,
get: getter2
};
try {
Object.defineProperty(obj5, 'foo', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different getter.
descDifferent = {
configurable:false,
enumerable:false,
set: setter2,
get: getter1
};
try {
Object.defineProperty(obj5, 'foo', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different enumerable.
descDifferent = {
configurable:false,
enumerable:true,
set: setter2,
get: getter2
};
try {
Object.defineProperty(obj5, 'foo', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different configurable.
descDifferent = {
configurable:false,
enumerable:true,
set: setter2,
get: getter2
};
try {
Object.defineProperty(obj5, 'foo', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// No difference.
descDifferent = {
configurable:false,
enumerable:false,
set: setter2,
get: getter2
};
// Make sure we can still redefine if all properties are the same.
Object.defineProperty(obj5, 'foo', descDifferent);
// Make sure that obj5 still holds the original values.
desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
assertEquals(desc.get, getter2);
assertEquals(desc.set, setter2);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
// Also exercise step 6 on data property, writable and enumerable
// defaults to false.
Object.defineProperty(obj5, 'bar', dataNoConfigurable);
// Test that redefinition with the same property descriptor is possible
Object.defineProperty(obj5, 'bar', dataNoConfigurable);
// Different value.
descDifferent = {
configurable:false,
enumerable:false,
writable: false,
value: 1999
};
try {
Object.defineProperty(obj5, 'bar', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different writable.
descDifferent = {
configurable:false,
enumerable:false,
writable: true,
value: 2000
};
try {
Object.defineProperty(obj5, 'bar', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different enumerable.
descDifferent = {
configurable:false,
enumerable:true ,
writable:false,
value: 2000
};
try {
Object.defineProperty(obj5, 'bar', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// Different configurable.
descDifferent = {
configurable:true,
enumerable:false,
writable:false,
value: 2000
};
try {
Object.defineProperty(obj5, 'bar', descDifferent);
assertTrue(false);
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
// No difference.
descDifferent = {
configurable:false,
enumerable:false,
writable:false,
value:2000
};
// Make sure we can still redefine if all properties are the same.
Object.defineProperty(obj5, 'bar', descDifferent);
// Make sure that obj5 still holds the original values.
desc = Object.getOwnPropertyDescriptor(obj5, 'bar');
assertEquals(desc.value, 2000);
assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
// Make sure that we can't overwrite +0 with -0 and vice versa.
var descMinusZero = {value: -0, configurable: false};
var descPlusZero = {value: +0, configurable: false};
Object.defineProperty(obj5, 'minuszero', descMinusZero);
// Make sure we can redefine with -0.
Object.defineProperty(obj5, 'minuszero', descMinusZero);
try {
Object.defineProperty(obj5, 'minuszero', descPlusZero);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
Object.defineProperty(obj5, 'pluszero', descPlusZero);
// Make sure we can redefine with +0.
Object.defineProperty(obj5, 'pluszero', descPlusZero);
try {
Object.defineProperty(obj5, 'pluszero', descMinusZero);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}

38
deps/v8/test/mjsunit/regress/regress-712.js

@ -0,0 +1,38 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This regression test is used to ensure that Object.defineProperty
// can't be called with an empty property descriptor on a non-configurable
// existing property and override the existing property.
// See: http://code.google.com/p/v8/issues/detail?id=712
var obj = {};
Object.defineProperty(obj, "x", { get: function() { return "42"; },
configurable: false });
assertEquals(obj.x, "42");
Object.defineProperty(obj, 'x', {});
assertEquals(obj.x, "42");

36
deps/v8/test/mjsunit/regress/regress-720.js

@ -0,0 +1,36 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This regression test is used to ensure that Object.defineProperty
// keeps the existing value of the writable flag if none is given
// in the provided descriptor.
// See: http://code.google.com/p/v8/issues/detail?id=720
var o = {x: 10};
Object.defineProperty(o, "x", {value: 5});
var desc = Object.getOwnPropertyDescriptor(o, "x");
assertTrue(desc["writable"]);

102
deps/v8/test/mjsunit/samevalue.js

@ -0,0 +1,102 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-natives_as natives
// Test the SameValue internal method.
var obj1 = {x: 10, y: 11, z: "test"};
var obj2 = {x: 10, y: 11, z: "test"};
assertTrue(natives.SameValue(0, 0));
assertTrue(natives.SameValue(+0, +0));
assertTrue(natives.SameValue(-0, -0));
assertTrue(natives.SameValue(1, 1));
assertTrue(natives.SameValue(2, 2));
assertTrue(natives.SameValue(-1, -1));
assertTrue(natives.SameValue(0.5, 0.5));
assertTrue(natives.SameValue(true, true));
assertTrue(natives.SameValue(false, false));
assertTrue(natives.SameValue(NaN, NaN));
assertTrue(natives.SameValue(null, null));
assertTrue(natives.SameValue("foo", "foo"));
assertTrue(natives.SameValue(obj1, obj1));
// Undefined values.
assertTrue(natives.SameValue());
assertTrue(natives.SameValue(undefined, undefined));
assertFalse(natives.SameValue(0,1));
assertFalse(natives.SameValue("foo", "bar"));
assertFalse(natives.SameValue(obj1, obj2));
assertFalse(natives.SameValue(true, false));
assertFalse(natives.SameValue(obj1, true));
assertFalse(natives.SameValue(obj1, "foo"));
assertFalse(natives.SameValue(obj1, 1));
assertFalse(natives.SameValue(obj1, undefined));
assertFalse(natives.SameValue(obj1, NaN));
assertFalse(natives.SameValue(undefined, true));
assertFalse(natives.SameValue(undefined, "foo"));
assertFalse(natives.SameValue(undefined, 1));
assertFalse(natives.SameValue(undefined, obj1));
assertFalse(natives.SameValue(undefined, NaN));
assertFalse(natives.SameValue(NaN, true));
assertFalse(natives.SameValue(NaN, "foo"));
assertFalse(natives.SameValue(NaN, 1));
assertFalse(natives.SameValue(NaN, obj1));
assertFalse(natives.SameValue(NaN, undefined));
assertFalse(natives.SameValue("foo", true));
assertFalse(natives.SameValue("foo", 1));
assertFalse(natives.SameValue("foo", obj1));
assertFalse(natives.SameValue("foo", undefined));
assertFalse(natives.SameValue("foo", NaN));
assertFalse(natives.SameValue(true, 1));
assertFalse(natives.SameValue(true, obj1));
assertFalse(natives.SameValue(true, undefined));
assertFalse(natives.SameValue(true, NaN));
assertFalse(natives.SameValue(true, "foo"));
assertFalse(natives.SameValue(1, true));
assertFalse(natives.SameValue(1, obj1));
assertFalse(natives.SameValue(1, undefined));
assertFalse(natives.SameValue(1, NaN));
assertFalse(natives.SameValue(1, "foo"));
// Special string cases.
assertFalse(natives.SameValue("1", 1));
assertFalse(natives.SameValue("true", true));
assertFalse(natives.SameValue("false", false));
assertFalse(natives.SameValue("undefined", undefined));
assertFalse(natives.SameValue("NaN", NaN));
// -0 and +0 are should be different
assertFalse(natives.SameValue(+0, -0));
assertFalse(natives.SameValue(-0, +0));

2
deps/v8/tools/gyp/v8.gyp

@ -412,6 +412,8 @@
'../../src/top.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/unbound-queue-inl.h',
'../../src/unbound-queue.h',
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',

4
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -627,6 +627,8 @@
9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-arm.cc"; path = "arm/fast-codegen-arm.cc"; sourceTree = "<group>"; };
9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
9FF7A28311A642EA0051B8F2 /* unbound-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@ -970,6 +972,8 @@
897FF1910E719B8F00D62E90 /* top.h */,
9FA38BAE1175B2D200C4CD55 /* type-info.cc */,
9FA38BAF1175B2D200C4CD55 /* type-info.h */,
9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */,
9FF7A28311A642EA0051B8F2 /* unbound-queue.h */,
897FF1920E719B8F00D62E90 /* unicode-inl.h */,
897FF1930E719B8F00D62E90 /* unicode.cc */,
897FF1940E719B8F00D62E90 /* unicode.h */,

32
deps/v8/tools/visual_studio/v8_base.vcproj

@ -144,6 +144,30 @@
/>
</FileConfiguration>
</File>
<File
RelativePath="..\..\src\dtoa.cc"
>
</File>
<File
RelativePath="..\..\src\dtoa.h"
>
</File>
<File
RelativePath="..\..\src\fast-dtoa.cc"
>
</File>
<File
RelativePath="..\..\src\fast-dtoa.h"
>
</File>
<File
RelativePath="..\..\src\fixed-dtoa.cc"
>
</File>
<File
RelativePath="..\..\src\fixed-dtoa.h"
>
</File>
</Filter>
<Filter
Name="src"
@ -960,6 +984,14 @@
RelativePath="..\..\src\type-info.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue.h"
>
</File>
<File
RelativePath="..\..\src\unicode-inl.h"
>

8
deps/v8/tools/visual_studio/v8_base_arm.vcproj

@ -952,6 +952,14 @@
RelativePath="..\..\src\type-info.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue.h"
>
</File>
<File
RelativePath="..\..\src\unicode-inl.h"
>

8
deps/v8/tools/visual_studio/v8_base_x64.vcproj

@ -937,6 +937,14 @@
RelativePath="..\..\src\type-info.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\unbound-queue.h"
>
</File>
<File
RelativePath="..\..\src\unicode-inl.h"
>

4
deps/v8/tools/visual_studio/v8_cctest.vcproj

@ -247,6 +247,10 @@
RelativePath="..\..\test\cctest\test-strings.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-unbound-queue.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-utils.cc"
>

Loading…
Cancel
Save