Browse Source

Upgrade v8 to 1.3.17

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
50f45d14b4
  1. 21
      deps/v8/ChangeLog
  2. 63
      deps/v8/include/v8.h
  3. 60
      deps/v8/src/api.cc
  4. 9
      deps/v8/src/api.h
  5. 6
      deps/v8/src/arm/assembler-arm-inl.h
  6. 8
      deps/v8/src/arm/assembler-arm.h
  7. 4
      deps/v8/src/arm/builtins-arm.cc
  8. 56
      deps/v8/src/arm/codegen-arm.cc
  9. 9
      deps/v8/src/arm/codegen-arm.h
  10. 584
      deps/v8/src/arm/fast-codegen-arm.cc
  11. 15
      deps/v8/src/arm/frames-arm.cc
  12. 2
      deps/v8/src/arm/frames-arm.h
  13. 14
      deps/v8/src/arm/ic-arm.cc
  14. 19
      deps/v8/src/arm/macro-assembler-arm.cc
  15. 8
      deps/v8/src/arm/macro-assembler-arm.h
  16. 15
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  17. 15
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  18. 34
      deps/v8/src/arm/virtual-frame-arm.cc
  19. 25
      deps/v8/src/assembler.cc
  20. 8
      deps/v8/src/assembler.h
  21. 8
      deps/v8/src/ast.cc
  22. 2
      deps/v8/src/ast.h
  23. 3
      deps/v8/src/bootstrapper.cc
  24. 76
      deps/v8/src/builtins.cc
  25. 90
      deps/v8/src/builtins.h
  26. 91
      deps/v8/src/code-stubs.cc
  27. 67
      deps/v8/src/code-stubs.h
  28. 18
      deps/v8/src/codegen.cc
  29. 29
      deps/v8/src/codegen.h
  30. 241
      deps/v8/src/compiler.cc
  31. 2
      deps/v8/src/conversions-inl.h
  32. 2
      deps/v8/src/conversions.cc
  33. 1
      deps/v8/src/conversions.h
  34. 10
      deps/v8/src/debug-delay.js
  35. 12
      deps/v8/src/factory.cc
  36. 9
      deps/v8/src/factory.h
  37. 197
      deps/v8/src/fast-codegen.cc
  38. 27
      deps/v8/src/fast-codegen.h
  39. 6
      deps/v8/src/flag-definitions.h
  40. 18
      deps/v8/src/frames.cc
  41. 25
      deps/v8/src/frames.h
  42. 101
      deps/v8/src/global-handles.cc
  43. 18
      deps/v8/src/global-handles.h
  44. 18
      deps/v8/src/globals.h
  45. 17
      deps/v8/src/handles.cc
  46. 10
      deps/v8/src/handles.h
  47. 44
      deps/v8/src/heap-profiler.cc
  48. 4
      deps/v8/src/heap-profiler.h
  49. 308
      deps/v8/src/heap.cc
  50. 84
      deps/v8/src/heap.h
  51. 18
      deps/v8/src/ia32/assembler-ia32.cc
  52. 10
      deps/v8/src/ia32/assembler-ia32.h
  53. 86
      deps/v8/src/ia32/builtins-ia32.cc
  54. 303
      deps/v8/src/ia32/codegen-ia32.cc
  55. 27
      deps/v8/src/ia32/codegen-ia32.h
  56. 272
      deps/v8/src/ia32/disasm-ia32.cc
  57. 584
      deps/v8/src/ia32/fast-codegen-ia32.cc
  58. 13
      deps/v8/src/ia32/frames-ia32.cc
  59. 2
      deps/v8/src/ia32/frames-ia32.h
  60. 367
      deps/v8/src/ia32/ic-ia32.cc
  61. 130
      deps/v8/src/ia32/macro-assembler-ia32.cc
  62. 25
      deps/v8/src/ia32/macro-assembler-ia32.h
  63. 20
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  64. 39
      deps/v8/src/ia32/stub-cache-ia32.cc
  65. 69
      deps/v8/src/ic.cc
  66. 16
      deps/v8/src/ic.h
  67. 1
      deps/v8/src/list.h
  68. 7
      deps/v8/src/location.h
  69. 32
      deps/v8/src/log.cc
  70. 176
      deps/v8/src/objects-debug.cc
  71. 286
      deps/v8/src/objects-inl.h
  72. 374
      deps/v8/src/objects.cc
  73. 273
      deps/v8/src/objects.h
  74. 7
      deps/v8/src/platform-nullos.cc
  75. 6
      deps/v8/src/platform-posix.cc
  76. 50
      deps/v8/src/platform-win32.cc
  77. 1
      deps/v8/src/platform.h
  78. 16
      deps/v8/src/regexp-macro-assembler.h
  79. 178
      deps/v8/src/runtime.cc
  80. 4
      deps/v8/src/runtime.h
  81. 5
      deps/v8/src/runtime.js
  82. 508
      deps/v8/src/serialize.cc
  83. 230
      deps/v8/src/serialize.h
  84. 56
      deps/v8/src/snapshot-common.cc
  85. 3
      deps/v8/src/snapshot.h
  86. 7
      deps/v8/src/spaces-inl.h
  87. 42
      deps/v8/src/spaces.cc
  88. 14
      deps/v8/src/spaces.h
  89. 2
      deps/v8/src/string-stream.cc
  90. 9
      deps/v8/src/string.js
  91. 7
      deps/v8/src/stub-cache.cc
  92. 57
      deps/v8/src/third_party/valgrind/valgrind.h
  93. 4
      deps/v8/src/top.h
  94. 1
      deps/v8/src/v8-counters.h
  95. 2
      deps/v8/src/v8.cc
  96. 2
      deps/v8/src/v8.h
  97. 2
      deps/v8/src/version.cc
  98. 86
      deps/v8/src/x64/assembler-x64.cc
  99. 27
      deps/v8/src/x64/assembler-x64.h
  100. 78
      deps/v8/src/x64/builtins-x64.cc

21
deps/v8/ChangeLog

@ -1,3 +1,22 @@
2009-10-28: Version 1.3.17
Added API method to get simple heap statistics.
Improved heap profiler support.
Fixed the implementation of the resource constraint API so it
works when using snapshots.
Fixed a number of issues in the Windows 64-bit version.
Optimized calls to API getters.
Added valgrind notification on code modification to the 64-bit version.
Fixed issue where we logged shared library addresses on Windows at
startup and never used them.
2009-10-16: Version 1.3.16 2009-10-16: Version 1.3.16
X64: Convert smis to holding 32 bits of payload. X64: Convert smis to holding 32 bits of payload.
@ -41,7 +60,7 @@
Ensure V8 is initialized before locking and unlocking threads. Ensure V8 is initialized before locking and unlocking threads.
Implemented a new JavaScript minifier for compressing the source of Implemented a new JavaScript minifier for compressing the source of
the built-in JavaScript. This Remove non-Open Source code from Douglas the built-in JavaScript. This removes non-Open Source code from Douglas
Crockford from the project. Crockford from the project.
Added a missing optimization in StringCharAt. Added a missing optimization in StringCharAt.

63
deps/v8/include/v8.h

@ -452,8 +452,8 @@ class V8EXPORT HandleScope {
void* operator new(size_t size); void* operator new(size_t size);
void operator delete(void*, size_t); void operator delete(void*, size_t);
// This Data class is accessible internally through a typedef in the // This Data class is accessible internally as HandleScopeData through a
// ImplementationUtilities class. // typedef in the ImplementationUtilities class.
class V8EXPORT Data { class V8EXPORT Data {
public: public:
int extensions; int extensions;
@ -1069,7 +1069,7 @@ class V8EXPORT Number : public Primitive {
class V8EXPORT Integer : public Number { class V8EXPORT Integer : public Number {
public: public:
static Local<Integer> New(int32_t value); static Local<Integer> New(int32_t value);
static inline Local<Integer> NewFromUnsigned(uint32_t value); static Local<Integer> NewFromUnsigned(uint32_t value);
int64_t Value() const; int64_t Value() const;
static inline Integer* Cast(v8::Value* obj); static inline Integer* Cast(v8::Value* obj);
private: private:
@ -1126,6 +1126,16 @@ enum PropertyAttribute {
DontDelete = 1 << 2 DontDelete = 1 << 2
}; };
enum ExternalArrayType {
kExternalByteArray = 1,
kExternalUnsignedByteArray,
kExternalShortArray,
kExternalUnsignedShortArray,
kExternalIntArray,
kExternalUnsignedIntArray,
kExternalFloatArray
};
/** /**
* A JavaScript object (ECMA-262, 4.3.3) * A JavaScript object (ECMA-262, 4.3.3)
*/ */
@ -1278,6 +1288,17 @@ class V8EXPORT Object : public Value {
*/ */
void SetIndexedPropertiesToPixelData(uint8_t* data, int length); void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
/**
* Set the backing store of the indexed properties to be managed by the
* embedding layer. Access to the indexed properties will follow the rules
* spelled out for the CanvasArray subtypes in the WebGL specification.
* Note: The embedding program still owns the data and needs to ensure that
* the backing store is preserved while V8 has a reference.
*/
void SetIndexedPropertiesToExternalArrayData(void* data,
ExternalArrayType array_type,
int number_of_elements);
static Local<Object> New(); static Local<Object> New();
static inline Object* Cast(Value* obj); static inline Object* Cast(Value* obj);
private: private:
@ -2102,6 +2123,29 @@ enum ProfilerModules {
}; };
/**
* Collection of V8 heap information.
*
* Instances of this class can be passed to v8::V8::HeapStatistics to
* get heap statistics from V8.
*/
class V8EXPORT HeapStatistics {
public:
HeapStatistics();
size_t total_heap_size() { return total_heap_size_; }
size_t used_heap_size() { return used_heap_size_; }
private:
void set_total_heap_size(size_t size) { total_heap_size_ = size; }
void set_used_heap_size(size_t size) { used_heap_size_ = size; }
size_t total_heap_size_;
size_t used_heap_size_;
friend class V8;
};
/** /**
* Container class for static utility functions. * Container class for static utility functions.
*/ */
@ -2352,6 +2396,10 @@ class V8EXPORT V8 {
*/ */
static bool Dispose(); static bool Dispose();
/**
* Get statistics about the heap memory usage.
*/
static void GetHeapStatistics(HeapStatistics* heap_statistics);
/** /**
* Optional notification that the embedder is idle. * Optional notification that the embedder is idle.
@ -3069,15 +3117,6 @@ Number* Number::Cast(v8::Value* value) {
} }
Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
return Integer::New(static_cast<int32_t>(value));
}
return Local<Integer>::Cast(Number::New(value));
}
Integer* Integer::Cast(v8::Value* value) { Integer* Integer::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS #ifdef V8_ENABLE_CHECKS
CheckCast(value); CheckCast(value);

60
deps/v8/src/api.cc

@ -342,10 +342,10 @@ ResourceConstraints::ResourceConstraints()
bool SetResourceConstraints(ResourceConstraints* constraints) { bool SetResourceConstraints(ResourceConstraints* constraints) {
int semispace_size = constraints->max_young_space_size(); int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size(); int old_gen_size = constraints->max_old_space_size();
if (semispace_size != 0 || old_gen_size != 0) { if (young_space_size != 0 || old_gen_size != 0) {
bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size); bool result = i::Heap::ConfigureHeap(young_space_size / 2, old_gen_size);
if (!result) return false; if (!result) return false;
} }
if (constraints->stack_limit() != NULL) { if (constraints->stack_limit() != NULL) {
@ -2306,6 +2306,30 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
} }
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
int length) {
ON_BAILOUT("v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8;
HandleScope scope;
if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
return;
}
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!ApiCheck(!self->IsJSArray(),
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"JSArray is not supported")) {
return;
}
i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data);
self->set_elements(*array);
}
Local<v8::Object> Function::NewInstance() const { Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL); return NewInstance(0, NULL);
} }
@ -2611,6 +2635,15 @@ bool v8::V8::Dispose() {
} }
HeapStatistics::HeapStatistics(): total_heap_size_(0), used_heap_size_(0) { }
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
}
bool v8::V8::IdleNotification() { bool v8::V8::IdleNotification() {
// Returning true tells the caller that it need not // Returning true tells the caller that it need not
// continue to call IdleNotification. // continue to call IdleNotification.
@ -2620,10 +2653,8 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() { void v8::V8::LowMemoryNotification() {
#if defined(ANDROID)
if (!i::V8::IsRunning()) return; if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true); i::Heap::CollectAllGarbage(true);
#endif
} }
@ -3152,6 +3183,10 @@ Local<v8::Object> v8::Object::New() {
Local<v8::Value> v8::Date::New(double time) { Local<v8::Value> v8::Date::New(double time) {
EnsureInitialized("v8::Date::New()"); EnsureInitialized("v8::Date::New()");
LOG_API("Date::New"); LOG_API("Date::New");
if (isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
ENTER_V8; ENTER_V8;
EXCEPTION_PREAMBLE(); EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj = i::Handle<i::Object> obj =
@ -3224,6 +3259,10 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
Local<Number> v8::Number::New(double value) { Local<Number> v8::Number::New(double value) {
EnsureInitialized("v8::Number::New()"); EnsureInitialized("v8::Number::New()");
if (isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
ENTER_V8; ENTER_V8;
i::Handle<i::Object> result = i::Factory::NewNumber(value); i::Handle<i::Object> result = i::Factory::NewNumber(value);
return Utils::NumberToLocal(result); return Utils::NumberToLocal(result);
@ -3241,6 +3280,17 @@ Local<Integer> v8::Integer::New(int32_t value) {
} }
Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
return Integer::New(static_cast<int32_t>(value));
}
ENTER_V8;
i::Handle<i::Object> result = i::Factory::NewNumber(value);
return Utils::IntegerToLocal(result);
}
void V8::IgnoreOutOfMemoryException() { void V8::IgnoreOutOfMemoryException() {
thread_local.set_ignore_out_of_memory(true); thread_local.set_ignore_out_of_memory(true);
} }

9
deps/v8/src/api.h

@ -125,6 +125,15 @@ static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
} }
class ApiFunction {
public:
explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
v8::internal::Address address() { return addr_; }
private:
v8::internal::Address addr_;
};
v8::Arguments::Arguments(v8::Local<v8::Value> data, v8::Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder, v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee, v8::Local<v8::Function> callee,

6
deps/v8/src/arm/assembler-arm-inl.h

@ -245,6 +245,12 @@ Address Assembler::target_address_at(Address pc) {
} }
void Assembler::set_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::set_target_address_at(Address pc, Address target) { void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target; Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache // Intuitively, we would think it is necessary to flush the instruction cache

8
deps/v8/src/arm/assembler-arm.h

@ -437,6 +437,14 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc)); INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target)); INLINE(static void set_target_address_at(Address pc, Address target));
// Modify the code target address in a constant pool entry.
inline static void set_target_at(Address constant_pool_entry, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
static const int kCallTargetSize = kPointerSize;
// Size of an instruction. // Size of an instruction.
static const int kInstrSize = sizeof(Instr); static const int kInstrSize = sizeof(Instr);

4
deps/v8/src/arm/builtins-arm.cc

@ -949,6 +949,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex = const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex)); __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver); __ bind(&patch_receiver);
@ -1107,6 +1109,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset = const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver. // Push the receiver.

56
deps/v8/src/arm/codegen-arm.cc

@ -1122,22 +1122,20 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() { void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) { Comment cmnt(masm_, "[ check stack");
Comment cmnt(masm_, "[ check stack"); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ LoadRoot(ip, Heap::kStackLimitRootIndex); // Put the lr setup instruction in the delay slot. kInstrSize is added to
// Put the lr setup instruction in the delay slot. kInstrSize is added to // the implicit 8 byte offset that always applies to operations with pc and
// the implicit 8 byte offset that always applies to operations with pc and // gives a return address 12 bytes down.
// gives a return address 12 bytes down. masm_->add(lr, pc, Operand(Assembler::kInstrSize));
masm_->add(lr, pc, Operand(Assembler::kInstrSize)); masm_->cmp(sp, Operand(ip));
masm_->cmp(sp, Operand(ip)); StackCheckStub stub;
StackCheckStub stub; // Call the stub if lower.
// Call the stub if lower. masm_->mov(pc,
masm_->mov(pc, Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), RelocInfo::CODE_TARGET),
RelocInfo::CODE_TARGET), LeaveCC,
LeaveCC, lo);
lo);
}
} }
@ -1172,9 +1170,9 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
frame_->EmitPush(cp);
__ mov(r0, Operand(pairs)); __ mov(r0, Operand(pairs));
frame_->EmitPush(r0); frame_->EmitPush(r0);
frame_->EmitPush(cp);
__ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
frame_->EmitPush(r0); frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kDeclareGlobals, 3); frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
@ -2255,12 +2253,10 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
__ mov(r0, Operand(boilerplate));
frame_->EmitPush(r0);
// Create a new closure. // Create a new closure.
frame_->EmitPush(cp); frame_->EmitPush(cp);
__ mov(r0, Operand(boilerplate));
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNewClosure, 2); frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->EmitPush(r0); frame_->EmitPush(r0);
} }
@ -5799,7 +5795,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
StackFrame::Type frame_type, ExitFrame::Mode mode,
bool do_gc, bool do_gc,
bool always_allocate) { bool always_allocate) {
// r0: result parameter for PerformGC, if any // r0: result parameter for PerformGC, if any
@ -5859,7 +5855,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result // r0:r1: result
// sp: stack pointer // sp: stack pointer
// fp: frame pointer // fp: frame pointer
__ LeaveExitFrame(frame_type); __ LeaveExitFrame(mode);
// check if we should retry or throw exception // check if we should retry or throw exception
Label retry; Label retry;
@ -5905,12 +5901,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the // this by performing a garbage collection and retrying the
// builtin once. // builtin once.
StackFrame::Type frame_type = is_debug_break ExitFrame::Mode mode = is_debug_break
? StackFrame::EXIT_DEBUG ? ExitFrame::MODE_DEBUG
: StackFrame::EXIT; : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++. // Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type); __ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved) // r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved) // r5: pointer to builtin function (C callee-saved)
@ -5925,7 +5921,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
false, false,
false); false);
@ -5934,7 +5930,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
true, true,
false); false);
@ -5945,7 +5941,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
true, true,
true); true);

9
deps/v8/src/arm/codegen-arm.h

@ -242,7 +242,7 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
void UnloadReference(Reference* ref); void UnloadReference(Reference* ref);
MemOperand ContextOperand(Register context, int index) const { static MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index)); return MemOperand(context, Context::SlotOffset(index));
} }
@ -254,7 +254,7 @@ class CodeGenerator: public AstVisitor {
JumpTarget* slow); JumpTarget* slow);
// Expressions // Expressions
MemOperand GlobalObject() const { static MemOperand GlobalObject() {
return ContextOperand(cp, Context::GLOBAL_INDEX); return ContextOperand(cp, Context::GLOBAL_INDEX);
} }
@ -330,10 +330,11 @@ class CodeGenerator: public AstVisitor {
const InlineRuntimeLUT& new_entry, const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry); InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations); void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of // Declare global variables and functions in the given array of
// name/value pairs. // name/value pairs.
@ -425,6 +426,8 @@ class CodeGenerator: public AstVisitor {
friend class VirtualFrame; friend class VirtualFrame;
friend class JumpTarget; friend class JumpTarget;
friend class Reference; friend class Reference;
friend class FastCodeGenerator;
friend class CodeGenSelector;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator); DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
}; };

584
deps/v8/src/arm/fast-codegen-arm.cc

@ -29,6 +29,7 @@
#include "codegen-inl.h" #include "codegen-inl.h"
#include "fast-codegen.h" #include "fast-codegen.h"
#include "parser.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -62,27 +63,32 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
if (locals_count > 0) { if (locals_count > 0) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
} }
if (FLAG_check_stack) { __ LoadRoot(r2, Heap::kStackLimitRootIndex);
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
for (int i = 0; i < locals_count; i++) { for (int i = 0; i < locals_count; i++) {
__ push(ip); __ push(ip);
} }
} }
if (FLAG_check_stack) { // Check the stack for overflow or break request.
// Put the lr setup instruction in the delay slot. The kInstrSize is // Put the lr setup instruction in the delay slot. The kInstrSize is
// added to the implicit 8 byte offset that always applies to operations // added to the implicit 8 byte offset that always applies to operations
// with pc and gives a return address 12 bytes down. // with pc and gives a return address 12 bytes down.
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Stack check");
__ add(lr, pc, Operand(Assembler::kInstrSize)); __ add(lr, pc, Operand(Assembler::kInstrSize));
__ cmp(sp, Operand(r2)); __ cmp(sp, Operand(r2));
StackCheckStub stub; StackCheckStub stub;
__ mov(pc, __ mov(pc,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET), RelocInfo::CODE_TARGET),
LeaveCC, LeaveCC,
lo); lo);
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(fun->scope()->declarations());
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
} }
{ Comment cmnt(masm_, "[ Body"); { Comment cmnt(masm_, "[ Body");
@ -94,6 +100,13 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
// body. // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
SetReturnPosition(fun); SetReturnPosition(fun);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ RecordJSReturn(); __ RecordJSReturn();
__ mov(sp, fp); __ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit()); __ ldm(ia_w, sp, fp.bit() | lr.bit());
@ -104,52 +117,311 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
} }
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) { void FastCodeGenerator::Move(Location destination, Slot* source) {
Comment cmnt(masm_, "[ ExpressionStatement"); switch (destination.type()) {
SetStatementPosition(stmt); case Location::NOWHERE:
Visit(stmt->expression()); break;
case Location::TEMP:
__ ldr(ip, MemOperand(fp, SlotOffset(source)));
__ push(ip);
break;
}
}
void FastCodeGenerator::Move(Location destination, Literal* expr) {
switch (destination.type()) {
case Location::NOWHERE:
break;
case Location::TEMP:
__ mov(ip, Operand(expr->handle()));
__ push(ip);
break;
}
}
void FastCodeGenerator::Move(Slot* destination, Location source) {
switch (source.type()) {
case Location::NOWHERE:
UNREACHABLE();
case Location::TEMP:
__ pop(ip);
__ str(ip, MemOperand(fp, SlotOffset(destination)));
break;
}
}
void FastCodeGenerator::DropAndMove(Location destination, Register source) {
switch (destination.type()) {
case Location::NOWHERE:
__ pop();
break;
case Location::TEMP:
__ str(source, MemOperand(sp));
break;
}
}
void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
__ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement"); Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
Visit(stmt->expression()); Expression* expr = stmt->expression();
__ pop(r0); // Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
} else {
Visit(expr);
Move(r0, expr->location());
}
if (FLAG_trace) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ RecordJSReturn(); __ RecordJSReturn();
__ mov(sp, fp); __ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit()); __ ldm(ia_w, sp, fp.bit() | lr.bit());
int num_parameters = function_->scope()->num_parameters(); int num_parameters = function_->scope()->num_parameters();
__ add(sp, sp, Operand((num_parameters + 1) * kPointerSize)); __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
__ Jump(lr); __ Jump(lr);
} }
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
// Create a new closure.
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
Move(expr->location(), r0);
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy"); Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite(); Expression* rewrite = expr->var()->rewrite();
ASSERT(rewrite != NULL); if (rewrite == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
__ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndMove(expr->location(), r0);
} else {
Comment cmnt(masm_, "Stack slot");
Move(expr->location(), rewrite->AsSlot());
}
}
Slot* slot = rewrite->AsSlot();
ASSERT(slot != NULL); void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
{ Comment cmnt(masm_, "[ Slot"); Comment cmnt(masm_, "[ ObjectLiteral");
if (expr->location().is_temporary()) { Label boilerplate_exists;
__ ldr(ip, MemOperand(fp, SlotOffset(slot))); __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ push(ip); // r2 = literal array (0).
} else { __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
ASSERT(expr->location().is_nowhere()); int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r2, literal_offset));
// Check whether we need to materialize the object literal boilerplate.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, Operand(ip));
__ b(ne, &boilerplate_exists);
// Create boilerplate if it does not exist.
// r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
// r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&boilerplate_exists);
// r0 contains boilerplate.
// Clone boilerplate.
__ push(r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
// If result_saved == true: the result is saved on top of the stack.
// If result_saved == false: the result is in eax.
bool result_saved = false;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
__ push(r0); // Save result on stack
result_saved = true;
} }
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
case ObjectLiteral::Property::COMPUTED: // fall through
case ObjectLiteral::Property::PROTOTYPE:
__ push(r0);
Visit(key);
ASSERT(key->location().is_temporary());
Visit(value);
ASSERT(value->location().is_temporary());
__ CallRuntime(Runtime::kSetProperty, 3);
__ ldr(r0, MemOperand(sp)); // Restore result into r0
break;
case ObjectLiteral::Property::SETTER: // fall through
case ObjectLiteral::Property::GETTER:
__ push(r0);
Visit(key);
ASSERT(key->location().is_temporary());
__ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
__ push(r1);
Visit(value);
ASSERT(value->location().is_temporary());
__ CallRuntime(Runtime::kDefineAccessor, 4);
__ ldr(r0, MemOperand(sp)); // Restore result into r0
break;
default: UNREACHABLE();
}
}
switch (expr->location().type()) {
case Location::NOWHERE:
if (result_saved) __ pop();
break;
case Location::TEMP:
if (!result_saved) __ push(r0);
break;
} }
} }
void FastCodeGenerator::VisitLiteral(Literal* expr) { void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ Literal"); Comment cmnt(masm_, "[ RegExp Literal");
if (expr->location().is_temporary()) { Label done;
__ mov(ip, Operand(expr->handle())); // Registers will be used as follows:
__ push(ip); // r4 = JS function, literals array
// r3 = literal index
// r2 = RegExp pattern
// r1 = RegExp flags
// r0 = temp + return value (RegExp literal)
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(ne, &done);
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
Move(expr->location(), r0);
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ ldr(r0, FieldMemOperand(r3, offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(&make_clone, ne);
// Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->literals()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else { } else {
ASSERT(expr->location().is_nowhere()); __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
ZoneList<Expression*>* subexprs = expr->values();
for (int i = 0, len = subexprs->length(); i < len; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (subexpr->AsLiteral() != NULL ||
CompileTimeValue::IsCompileTimeValue(subexpr)) {
continue;
}
if (!result_saved) {
__ push(r0);
result_saved = true;
}
Visit(subexpr);
ASSERT(subexpr->location().is_temporary());
// Store the subexpression value in the array's elements.
__ pop(r0); // Subexpression value.
__ ldr(r1, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array store with r0 as the scratch
// register.
__ mov(r2, Operand(offset));
__ RecordWrite(r1, r2, r0);
}
switch (expr->location().type()) {
case Location::NOWHERE:
if (result_saved) __ pop();
break;
case Location::TEMP:
if (!result_saved) __ push(r0);
break;
} }
} }
@ -158,19 +430,239 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment"); Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
Visit(expr->value()); // Left-hand side can only be a global or a (parameter or local) slot.
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL && var->slot() != NULL); ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (expr->location().is_temporary()) { Expression* rhs = expr->value();
__ ldr(ip, MemOperand(sp)); Location destination = expr->location();
if (var->is_global()) {
// Assignment to a global variable, use inline caching. Right-hand-side
// value is passed in r0, variable name in r2, and the global object on
// the stack.
// Code for the right-hand-side expression depends on its type.
if (rhs->AsLiteral() != NULL) {
__ mov(r0, Operand(rhs->AsLiteral()->handle()));
} else {
ASSERT(rhs->location().is_temporary());
Visit(rhs);
__ pop(r0);
}
__ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
DropAndMove(expr->location(), r0);
} else { } else {
ASSERT(expr->location().is_nowhere()); // Local or parameter assignment.
__ pop(ip);
// Code for the right-hand side expression depends on its type.
if (rhs->AsLiteral() != NULL) {
// Two cases: 'temp <- (var = constant)', or 'var = constant' with a
// discarded result. Always perform the assignment.
__ mov(ip, Operand(rhs->AsLiteral()->handle()));
__ str(ip, MemOperand(fp, SlotOffset(var->slot())));
Move(expr->location(), ip);
} else {
ASSERT(rhs->location().is_temporary());
Visit(rhs);
// Load right-hand side into ip.
switch (expr->location().type()) {
case Location::NOWHERE:
// Case 'var = temp'. Discard right-hand-side temporary.
__ pop(ip);
break;
case Location::TEMP:
// Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
// temporary on the stack.
__ ldr(ip, MemOperand(sp));
break;
}
// Do the slot assignment.
__ str(ip, MemOperand(fp, SlotOffset(var->slot())));
}
}
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
uint32_t dummy;
// Record the source position for the property load.
SetSourcePosition(expr->position());
// Evaluate receiver.
Visit(expr->obj());
if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
!String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
// Do a NAMED property load.
// The IC expects the property name in ecx and the receiver on the stack.
__ mov(r2, Operand(key->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
} else {
// Do a KEYED property load.
Visit(expr->key());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key and receiver left on the stack by IC.
__ pop();
}
switch (expr->location().type()) {
case Location::TEMP:
__ str(r0, MemOperand(sp));
break;
case Location::NOWHERE:
__ pop();
}
}
void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
ASSERT(var != NULL && !var->is_this() && var->is_global());
ASSERT(!var->is_possibly_eval());
__ mov(r1, Operand(var->name()));
// Push global object as receiver.
__ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit());
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
} }
__ str(ip, MemOperand(fp, SlotOffset(var->slot()))); // Record source position for debugger
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
DropAndMove(expr->location(), r0);
} }
void FastCodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
// Push function on the stack.
Visit(node->expression());
ASSERT(node->expression()->location().is_temporary());
// Push global object (receiver).
__ ldr(r0, CodeGenerator::GlobalObject());
__ push(r0);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
// If location is temporary, it is already on the stack,
// so nothing to do here.
}
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetSourcePosition(node->position());
// Load function, arg_count into r1 and r0.
__ mov(r0, Operand(arg_count));
// Function is in esp[arg_count + 1].
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in r0, or pop it.
DropAndMove(node->location(), r0);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
Runtime::Function* function = expr->function();
ASSERT(function != NULL);
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
}
__ CallRuntime(function, arg_count);
Move(expr->location(), r0);
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// Compile a short-circuited boolean or operation in a non-test
// context.
ASSERT(expr->op() == Token::OR);
// Compile (e0 || e1) as if it were
// (let (temp = e0) temp ? temp : e1).
Label done;
Location destination = expr->location();
Expression* left = expr->left();
Expression* right = expr->right();
// Call the runtime to find the boolean value of the left-hand
// subexpression. Duplicate the value if it may be needed as the final
// result.
if (left->AsLiteral() != NULL) {
__ mov(r0, Operand(left->AsLiteral()->handle()));
__ push(r0);
if (destination.is_temporary()) __ push(r0);
} else {
Visit(left);
ASSERT(left->location().is_temporary());
if (destination.is_temporary()) {
__ ldr(r0, MemOperand(sp));
__ push(r0);
}
}
// The left-hand value is in on top of the stack. It is duplicated on the
// stack iff the destination location is temporary.
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
__ b(eq, &done);
// Discard the left-hand value if present on the stack.
if (destination.is_temporary()) __ pop();
// Save or discard the right-hand value as needed.
if (right->AsLiteral() != NULL) {
Move(destination, right->AsLiteral());
} else {
Visit(right);
Move(destination, right->location());
}
__ bind(&done);
}
} } // namespace v8::internal } } // namespace v8::internal

15
deps/v8/src/arm/frames-arm.cc

@ -54,23 +54,24 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE; if (fp == 0) return NONE;
// Compute frame type and stack pointer. // Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement; Address sp = fp + ExitFrameConstants::kSPDisplacement;
Type type; const int offset = ExitFrameConstants::kCodeOffset;
if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) { Object* code = Memory::Object_at(fp + offset);
type = EXIT_DEBUG; bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize; sp -= kNumJSCallerSaved * kPointerSize;
} else {
type = EXIT;
} }
// Fill in the state. // Fill in the state.
state->sp = sp; state->sp = sp;
state->fp = fp; state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
return type; return EXIT;
} }
void ExitFrame::Iterate(ObjectVisitor* v) const { void ExitFrame::Iterate(ObjectVisitor* v) const {
// Do nothing v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
} }

2
deps/v8/src/arm/frames-arm.h

@ -100,7 +100,7 @@ class ExitFrameConstants : public AllStatic {
static const int kSPDisplacement = -1 * kPointerSize; static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer. // The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize; static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize; static const int kSavedRegistersOffset = 0 * kPointerSize;

14
deps/v8/src/arm/ic-arm.cc

@ -615,6 +615,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
GenerateGeneric(masm);
}
void KeyedStoreIC::Generate(MacroAssembler* masm, void KeyedStoreIC::Generate(MacroAssembler* masm,
const ExternalReference& f) { const ExternalReference& f) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
@ -748,6 +755,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
GenerateGeneric(masm);
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) { void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- r0 : value // -- r0 : value

19
deps/v8/src/arm/macro-assembler-arm.cc

@ -274,9 +274,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
} }
void MacroAssembler::EnterExitFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Compute the argv pointer and keep it in a callee-saved register. // Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc. // r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@ -298,8 +296,11 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer mov(fp, Operand(sp)); // setup new frame pointer
// Push debug marker. if (mode == ExitFrame::MODE_DEBUG) {
mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0)); mov(ip, Operand(Smi::FromInt(0)));
} else {
mov(ip, Operand(CodeObject()));
}
push(ip); push(ip);
// Save the frame pointer and the context in top. // Save the frame pointer and the context in top.
@ -316,7 +317,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory // Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points. // location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push. // Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
} }
@ -348,14 +349,14 @@ void MacroAssembler::AlignStack(int offset) {
} }
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from // Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points. // the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3. // This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset)); add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved); CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
} }

8
deps/v8/src/arm/macro-assembler-arm.h

@ -87,14 +87,14 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or // Enter specific kind of exit frame; either normal or debug mode.
// EXIT_DEBUG. Expects the number of arguments in register r0 and // Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in // the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5. // r4, argv in r6, and and the builtin function to call in r5.
void EnterExitFrame(StackFrame::Type type); void EnterExitFrame(ExitFrame::Mode mode);
// Leave the current exit frame. Expects the return value in r0. // Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type); void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero. // Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset); void AlignStack(int offset);

15
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -29,6 +29,7 @@
#include "unicode.h" #include "unicode.h"
#include "log.h" #include "log.h"
#include "ast.h" #include "ast.h"
#include "code-stubs.h"
#include "regexp-stack.h" #include "regexp-stack.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
@ -1099,14 +1100,12 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() { void RegExpMacroAssemblerARM::CheckStackLimit() {
if (FLAG_check_stack) { ExternalReference stack_limit =
ExternalReference stack_limit = ExternalReference::address_of_regexp_stack_limit();
ExternalReference::address_of_regexp_stack_limit(); __ mov(r0, Operand(stack_limit));
__ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0));
__ ldr(r0, MemOperand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0)); SafeCall(&stack_overflow_label_, ls);
SafeCall(&stack_overflow_label_, ls);
}
} }

15
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -260,6 +260,21 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
}; };
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "RegExpCEntryStub"; }
};
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP

34
deps/v8/src/arm/virtual-frame-arm.cc

@ -146,29 +146,27 @@ void VirtualFrame::AllocateStackSlots() {
// Initialize stack slots with 'undefined' value. // Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
} }
if (FLAG_check_stack) { __ LoadRoot(r2, Heap::kStackLimitRootIndex);
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
__ push(ip); __ push(ip);
} }
if (FLAG_check_stack) { // Check the stack for overflow or a break request.
// Put the lr setup instruction in the delay slot. The kInstrSize is added // Put the lr setup instruction in the delay slot. The kInstrSize is added
// to the implicit 8 byte offset that always applies to operations with pc // to the implicit 8 byte offset that always applies to operations with pc
// and gives a return address 12 bytes down. // and gives a return address 12 bytes down.
masm()->add(lr, pc, Operand(Assembler::kInstrSize)); masm()->add(lr, pc, Operand(Assembler::kInstrSize));
masm()->cmp(sp, Operand(r2)); masm()->cmp(sp, Operand(r2));
StackCheckStub stub; StackCheckStub stub;
// Call the stub if lower. // Call the stub if lower.
masm()->mov(pc, masm()->mov(pc,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET), RelocInfo::CODE_TARGET),
LeaveCC, LeaveCC,
lo); lo);
}
} }
void VirtualFrame::SaveContextRegister() { void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -255,7 +253,7 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
void VirtualFrame::RawCallCodeObject(Handle<Code> code, void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode); __ Call(code, rmode);
} }

25
deps/v8/src/assembler.cc

@ -522,6 +522,10 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {} : address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(ApiFunction* fun)
: address_(Redirect(fun->address())) {}
ExternalReference::ExternalReference(Builtins::Name name) ExternalReference::ExternalReference(Builtins::Name name)
: address_(Builtins::builtin_address(name)) {} : address_(Builtins::builtin_address(name)) {}
@ -608,6 +612,27 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress()); return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
} }
ExternalReference ExternalReference::handle_scope_extensions_address() {
return ExternalReference(HandleScope::current_extensions_address());
}
ExternalReference ExternalReference::handle_scope_next_address() {
return ExternalReference(HandleScope::current_next_address());
}
ExternalReference ExternalReference::handle_scope_limit_address() {
return ExternalReference(HandleScope::current_limit_address());
}
ExternalReference ExternalReference::scheduled_exception_address() {
return ExternalReference(Top::scheduled_exception_address());
}
#ifdef V8_NATIVE_REGEXP #ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() { ExternalReference ExternalReference::re_check_stack_guard_state() {

8
deps/v8/src/assembler.h

@ -373,6 +373,8 @@ class ExternalReference BASE_EMBEDDED {
public: public:
explicit ExternalReference(Builtins::CFunctionId id); explicit ExternalReference(Builtins::CFunctionId id);
explicit ExternalReference(ApiFunction* ptr);
explicit ExternalReference(Builtins::Name name); explicit ExternalReference(Builtins::Name name);
explicit ExternalReference(Runtime::FunctionId id); explicit ExternalReference(Runtime::FunctionId id);
@ -422,6 +424,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference double_fp_operation(Token::Value operation); static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles(); static ExternalReference compare_doubles();
static ExternalReference handle_scope_extensions_address();
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
static ExternalReference scheduled_exception_address();
Address address() const {return reinterpret_cast<Address>(address_);} Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

8
deps/v8/src/ast.cc

@ -28,6 +28,7 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "string-stream.h" #include "string-stream.h"
@ -138,6 +139,13 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
} }
bool ObjectLiteral::Property::IsCompileTimeValue() {
return kind_ == CONSTANT ||
(kind_ == MATERIALIZED_LITERAL &&
CompileTimeValue::IsCompileTimeValue(value_));
}
bool ObjectLiteral::IsValidJSON() { bool ObjectLiteral::IsValidJSON() {
int length = properties()->length(); int length = properties()->length();
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {

2
deps/v8/src/ast.h

@ -747,6 +747,8 @@ class ObjectLiteral: public MaterializedLiteral {
Expression* value() { return value_; } Expression* value() { return value_; }
Kind kind() { return kind_; } Kind kind() { return kind_; }
bool IsCompileTimeValue();
private: private:
Literal* key_; Literal* key_;
Expression* value_; Expression* value_;

3
deps/v8/src/bootstrapper.cc

@ -316,8 +316,11 @@ Genesis* Genesis::current_ = NULL;
void Bootstrapper::Iterate(ObjectVisitor* v) { void Bootstrapper::Iterate(ObjectVisitor* v) {
natives_cache.Iterate(v); natives_cache.Iterate(v);
v->Synchronize("NativesCache");
extensions_cache.Iterate(v); extensions_cache.Iterate(v);
v->Synchronize("Extensions");
PendingFixups::Iterate(v); PendingFixups::Iterate(v);
v->Synchronize("PendingFixups");
} }

76
deps/v8/src/builtins.cc

@ -538,6 +538,44 @@ static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
} }
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) { static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm); KeyedLoadIC::GeneratePreMonomorphic(masm);
} }
@ -567,6 +605,44 @@ static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
} }
static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
}
static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
}
static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
}
static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
}
static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
}
static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) { static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
KeyedStoreIC::GenerateExtendStorage(masm); KeyedStoreIC::GenerateExtendStorage(masm);
} }

90
deps/v8/src/builtins.h

@ -48,44 +48,58 @@ namespace internal {
// Define list of builtins implemented in assembly. // Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \ #define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \ V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\ \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \ V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
\ \
V(StoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \ V(StoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \ V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
\ \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \ V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \ V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \ V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
\ \
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
\ V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \ V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
\ V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
\ V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(FunctionApply, BUILTIN, UNINITIALIZED) \ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\ \
V(ArrayCode, BUILTIN, UNINITIALIZED) \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
V(FunctionApply, BUILTIN, UNINITIALIZED) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED) V(ArrayConstructCode, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

91
deps/v8/src/code-stubs.cc

@ -36,10 +36,27 @@ namespace v8 {
namespace internal { namespace internal {
Handle<Code> CodeStub::GetCode() { Handle<Code> CodeStub::GetCode() {
uint32_t key = GetKey(); bool custom_cache = has_custom_cache();
int index = Heap::code_stubs()->FindEntry(key);
if (index == NumberDictionary::kNotFound) { int index = 0;
HandleScope scope; uint32_t key = 0;
if (custom_cache) {
Code* cached;
if (GetCustomCache(&cached)) {
return Handle<Code>(cached);
} else {
index = NumberDictionary::kNotFound;
}
} else {
key = GetKey();
index = Heap::code_stubs()->FindEntry(key);
if (index != NumberDictionary::kNotFound)
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
Code* result;
{
v8::HandleScope scope;
// Update the static counter each time a new code stub is generated. // Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment(); Counters::code_stubs.Increment();
@ -79,63 +96,29 @@ Handle<Code> CodeStub::GetCode() {
} }
#endif #endif
// Update the dictionary and the root in Heap. if (custom_cache) {
Handle<NumberDictionary> dict = SetCustomCache(*code);
Factory::DictionaryAtNumberPut( } else {
Handle<NumberDictionary>(Heap::code_stubs()), // Update the dictionary and the root in Heap.
key, Handle<NumberDictionary> dict =
code); Factory::DictionaryAtNumberPut(
Heap::public_set_code_stubs(*dict); Handle<NumberDictionary>(Heap::code_stubs()),
index = Heap::code_stubs()->FindEntry(key); key,
code);
Heap::public_set_code_stubs(*dict);
}
result = *code;
} }
ASSERT(index != NumberDictionary::kNotFound);
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index))); return Handle<Code>(result);
} }
const char* CodeStub::MajorName(CodeStub::Major major_key) { const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) { switch (major_key) {
case CallFunction: #define DEF_CASE(name) case name: return #name;
return "CallFunction"; CODE_STUB_LIST_ALL(DEF_CASE)
case GenericBinaryOp: #undef DEF_CASE
return "GenericBinaryOp";
case SmiOp:
return "SmiOp";
case Compare:
return "Compare";
case RecordWrite:
return "RecordWrite";
case StackCheck:
return "StackCheck";
case UnarySub:
return "UnarySub";
case RevertToNumber:
return "RevertToNumber";
case ToBoolean:
return "ToBoolean";
case Instanceof:
return "Instanceof";
case CounterOp:
return "CounterOp";
case ArgumentsAccess:
return "ArgumentsAccess";
case Runtime:
return "Runtime";
case CEntry:
return "CEntry";
case JSEntry:
return "JSEntry";
case GetProperty:
return "GetProperty";
case SetProperty:
return "SetProperty";
case InvokeBuiltin:
return "InvokeBuiltin";
case ConvertToDouble:
return "ConvertToDouble";
case WriteInt32ToHeapNumber:
return "WriteInt32ToHeapNumber";
default: default:
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;

67
deps/v8/src/code-stubs.h

@ -31,32 +31,51 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// List of code stubs used on all platforms. The order in this list is important
// as only the stubs up to and including RecordWrite allows nested stub calls.
#define CODE_STUB_LIST_ALL(V) \
V(CallFunction) \
V(GenericBinaryOp) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(UnarySub) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
V(Runtime) \
V(CEntry) \
V(JSEntry)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
V(RegExpCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL(V) \
CODE_STUB_LIST_ARM(V)
// Stub is base classes of all stubs. // Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED { class CodeStub BASE_EMBEDDED {
public: public:
enum Major { enum Major {
CallFunction, #define DEF_ENUM(name) name,
GenericBinaryOp, CODE_STUB_LIST(DEF_ENUM)
SmiOp, #undef DEF_ENUM
Compare, NoCache, // marker for stubs that do custom caching
RecordWrite, // Last stub that allows stub calls inside.
ConvertToDouble,
WriteInt32ToHeapNumber,
StackCheck,
UnarySub,
RevertToNumber,
ToBoolean,
Instanceof,
CounterOp,
ArgumentsAccess,
Runtime,
CEntry,
JSEntry,
GetProperty, // ARM only
SetProperty, // ARM only
InvokeBuiltin, // ARM only
RegExpCEntry, // ARM only
NUMBER_OF_IDS NUMBER_OF_IDS
}; };
@ -73,6 +92,12 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {} virtual ~CodeStub() {}
// Override these methods to provide a custom caching mechanism for
// an individual type of code stub.
virtual bool GetCustomCache(Code** code_out) { return false; }
virtual void SetCustomCache(Code* value) { }
virtual bool has_custom_cache() { return false; }
protected: protected:
static const int kMajorBits = 5; static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;

18
deps/v8/src/codegen.cc

@ -274,7 +274,7 @@ void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
} }
static Handle<Code> ComputeLazyCompile(int argc) { Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code); CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
} }
@ -551,4 +551,20 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
} }
bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
Object* cache = info()->load_stub_cache();
if (cache->IsUndefined()) {
return false;
} else {
*code_out = Code::cast(cache);
return true;
}
}
void ApiGetterEntryStub::SetCustomCache(Code* value) {
info()->set_load_stub_cache(value);
}
} } // namespace v8::internal } } // namespace v8::internal

29
deps/v8/src/codegen.h

@ -56,6 +56,7 @@
// ~CodeGenerator // ~CodeGenerator
// ProcessDeferred // ProcessDeferred
// GenCode // GenCode
// ComputeLazyCompile
// BuildBoilerplate // BuildBoilerplate
// ComputeCallInitialize // ComputeCallInitialize
// ComputeCallInitializeInLoop // ComputeCallInitializeInLoop
@ -300,7 +301,7 @@ class CEntryStub : public CodeStub {
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
StackFrame::Type frame_type, ExitFrame::Mode mode,
bool do_gc, bool do_gc,
bool always_allocate_scope); bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm); void GenerateThrowTOS(MacroAssembler* masm);
@ -319,6 +320,32 @@ class CEntryStub : public CodeStub {
}; };
class ApiGetterEntryStub : public CodeStub {
public:
ApiGetterEntryStub(Handle<AccessorInfo> info,
ApiFunction* fun)
: info_(info),
fun_(fun) { }
void Generate(MacroAssembler* masm);
virtual bool has_custom_cache() { return true; }
virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value);
static const int kStackSpace = 6;
static const int kArgc = 4;
private:
Handle<AccessorInfo> info() { return info_; }
ApiFunction* fun() { return fun_; }
Major MajorKey() { return NoCache; }
int MinorKey() { return 0; }
const char* GetName() { return "ApiEntryStub"; }
// The accessor info associated with the function.
Handle<AccessorInfo> info_;
// The function to be called.
ApiFunction* fun_;
};
class CEntryDebugBreakStub : public CEntryStub { class CEntryDebugBreakStub : public CEntryStub {
public: public:
CEntryDebugBreakStub() : CEntryStub(1) { } CEntryDebugBreakStub() : CEntryStub(1) { }

241
deps/v8/src/compiler.cc

@ -46,13 +46,25 @@ class CodeGenSelector: public AstVisitor {
public: public:
enum CodeGenTag { NORMAL, FAST }; enum CodeGenTag { NORMAL, FAST };
CodeGenSelector() : has_supported_syntax_(true) {} CodeGenSelector()
: has_supported_syntax_(true),
location_(Location::Nowhere()) {
}
CodeGenTag Select(FunctionLiteral* fun); CodeGenTag Select(FunctionLiteral* fun);
private: private:
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts); void VisitStatements(ZoneList<Statement*>* stmts);
// Visit an expression in effect context with a desired location of
// nowhere.
void VisitAsEffect(Expression* expr);
// Visit an expression in value context with a desired location of
// temporary.
void VisitAsValue(Expression* expr);
// AST node visit functions. // AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node); #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT) AST_NODE_LIST(DECLARE_VISIT)
@ -60,6 +72,9 @@ class CodeGenSelector: public AstVisitor {
bool has_supported_syntax_; bool has_supported_syntax_;
// The desired location of the currently visited expression.
Location location_;
DISALLOW_COPY_AND_ASSIGN(CodeGenSelector); DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
}; };
@ -107,7 +122,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
CodeGenSelector selector; CodeGenSelector selector;
CodeGenSelector::CodeGenTag code_gen = selector.Select(literal); CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
if (code_gen == CodeGenSelector::FAST) { if (code_gen == CodeGenSelector::FAST) {
return FastCodeGenerator::MakeCode(literal, script); return FastCodeGenerator::MakeCode(literal, script, is_eval);
} }
ASSERT(code_gen == CodeGenSelector::NORMAL); ASSERT(code_gen == CodeGenSelector::NORMAL);
} }
@ -450,15 +465,17 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) { CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Scope* scope = fun->scope(); Scope* scope = fun->scope();
if (!scope->is_global_scope()) return NORMAL; if (!scope->is_global_scope()) {
if (FLAG_trace_bailout) PrintF("Non-global scope\n");
return NORMAL;
}
ASSERT(scope->num_heap_slots() == 0); ASSERT(scope->num_heap_slots() == 0);
ASSERT(scope->arguments() == NULL); ASSERT(scope->arguments() == NULL);
if (!scope->declarations()->is_empty()) return NORMAL;
if (fun->materialized_literal_count() > 0) return NORMAL;
if (fun->body()->is_empty()) return NORMAL;
has_supported_syntax_ = true; has_supported_syntax_ = true;
VisitDeclarations(fun->scope()->declarations());
if (!has_supported_syntax_) return NORMAL;
VisitStatements(fun->body()); VisitStatements(fun->body());
return has_supported_syntax_ ? FAST : NORMAL; return has_supported_syntax_ ? FAST : NORMAL;
} }
@ -480,34 +497,66 @@ CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
} while (false) } while (false)
void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); i++) {
Visit(decls->at(i));
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) { void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) { for (int i = 0, len = stmts->length(); i < len; i++) {
CHECK_BAILOUT;
Visit(stmts->at(i)); Visit(stmts->at(i));
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitAsEffect(Expression* expr) {
if (location_.is_nowhere()) {
Visit(expr);
} else {
Location saved = location_;
location_ = Location::Nowhere();
Visit(expr);
location_ = saved;
}
}
void CodeGenSelector::VisitAsValue(Expression* expr) {
if (location_.is_temporary()) {
Visit(expr);
} else {
Location saved = location_;
location_ = Location::Temporary();
Visit(expr);
location_ = saved;
} }
} }
void CodeGenSelector::VisitDeclaration(Declaration* decl) { void CodeGenSelector::VisitDeclaration(Declaration* decl) {
BAILOUT("Declaration"); Variable* var = decl->proxy()->var();
if (!var->is_global() || var->mode() == Variable::CONST) {
BAILOUT("Non-global declaration");
}
} }
void CodeGenSelector::VisitBlock(Block* stmt) { void CodeGenSelector::VisitBlock(Block* stmt) {
BAILOUT("Block"); VisitStatements(stmt->statements());
} }
void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) { void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
Expression* expr = stmt->expression(); VisitAsEffect(stmt->expression());
Visit(expr);
CHECK_BAILOUT;
expr->set_location(Location::Nowhere());
} }
void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) { void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
BAILOUT("EmptyStatement"); // EmptyStatement is supported.
} }
@ -527,7 +576,7 @@ void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) { void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
Visit(stmt->expression()); VisitAsValue(stmt->expression());
} }
@ -582,7 +631,10 @@ void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) { void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
BAILOUT("FunctionLiteral"); if (!expr->AllowsLazyCompilation()) {
BAILOUT("FunctionLiteral does not allow lazy compilation");
}
expr->set_location(location_);
} }
@ -598,37 +650,88 @@ void CodeGenSelector::VisitConditional(Conditional* expr) {
void CodeGenSelector::VisitSlot(Slot* expr) { void CodeGenSelector::VisitSlot(Slot* expr) {
Slot::Type type = expr->type(); UNREACHABLE();
if (type != Slot::PARAMETER && type != Slot::LOCAL) {
BAILOUT("non-parameter/non-local slot reference");
}
} }
void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) { void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
Expression* rewrite = expr->var()->rewrite(); Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) BAILOUT("global variable reference"); // A rewrite of NULL indicates a global variable.
Visit(rewrite); if (rewrite != NULL) {
// Non-global.
Slot* slot = rewrite->AsSlot();
if (slot == NULL) {
// This is a variable rewritten to an explicit property access
// on the arguments object.
BAILOUT("non-global/non-slot variable reference");
}
Slot::Type type = slot->type();
if (type != Slot::PARAMETER && type != Slot::LOCAL) {
BAILOUT("non-parameter/non-local slot reference");
}
}
expr->set_location(location_);
} }
void CodeGenSelector::VisitLiteral(Literal* expr) { void CodeGenSelector::VisitLiteral(Literal* expr) {
// All literals are supported. expr->set_location(location_);
} }
void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) { void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
BAILOUT("RegExpLiteral"); expr->set_location(location_);
} }
void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) { void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
BAILOUT("ObjectLiteral"); ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0, len = properties->length(); i < len; i++) {
ObjectLiteral::Property* property = properties->at(i);
if (property->IsCompileTimeValue()) continue;
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
// For (non-compile-time) materialized literals and computed
// properties with symbolic keys we will use an IC and therefore not
// generate code for the key.
case ObjectLiteral::Property::COMPUTED: // Fall through.
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
if (property->key()->handle()->IsSymbol()) {
break;
}
// Fall through.
// In all other cases we need the key's value on the stack
// for a runtime call. (Relies on TEMP meaning STACK.)
case ObjectLiteral::Property::GETTER: // Fall through.
case ObjectLiteral::Property::SETTER: // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
VisitAsValue(property->key());
CHECK_BAILOUT;
break;
}
VisitAsValue(property->value());
CHECK_BAILOUT;
}
expr->set_location(location_);
} }
void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) { void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
BAILOUT("ArrayLiteral"); ZoneList<Expression*>* subexprs = expr->values();
for (int i = 0, len = subexprs->length(); i < len; i++) {
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitAsValue(subexpr);
CHECK_BAILOUT;
}
expr->set_location(location_);
} }
@ -640,7 +743,10 @@ void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void CodeGenSelector::VisitAssignment(Assignment* expr) { void CodeGenSelector::VisitAssignment(Assignment* expr) {
// We support plain non-compound assignments to parameters and // We support plain non-compound assignments to parameters and
// non-context (stack-allocated) locals. // non-context (stack-allocated) locals.
if (expr->starts_initialization_block()) BAILOUT("initialization block"); if (expr->starts_initialization_block() ||
expr->ends_initialization_block()) {
BAILOUT("initialization block start");
}
Token::Value op = expr->op(); Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant"); if (op == Token::INIT_CONST) BAILOUT("initialize constant");
@ -649,15 +755,18 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
} }
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
if (var == NULL || var->is_global()) BAILOUT("non-variable assignment"); if (var == NULL) BAILOUT("non-variable assignment");
ASSERT(var->slot() != NULL); if (!var->is_global()) {
Slot::Type type = var->slot()->type(); ASSERT(var->slot() != NULL);
if (type != Slot::PARAMETER && type != Slot::LOCAL) { Slot::Type type = var->slot()->type();
BAILOUT("non-parameter/non-local slot assignment"); if (type != Slot::PARAMETER && type != Slot::LOCAL) {
BAILOUT("non-parameter/non-local slot assignment");
}
} }
Visit(expr->value()); VisitAsValue(expr->value());
expr->set_location(location_);
} }
@ -667,22 +776,64 @@ void CodeGenSelector::VisitThrow(Throw* expr) {
void CodeGenSelector::VisitProperty(Property* expr) { void CodeGenSelector::VisitProperty(Property* expr) {
BAILOUT("Property"); VisitAsValue(expr->obj());
CHECK_BAILOUT;
VisitAsValue(expr->key());
expr->set_location(location_);
} }
void CodeGenSelector::VisitCall(Call* expr) { void CodeGenSelector::VisitCall(Call* expr) {
BAILOUT("Call"); Expression* fun = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
// Check for supported calls
if (var != NULL && var->is_possibly_eval()) {
BAILOUT("Call to a function named 'eval'");
} else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
} else {
BAILOUT("Call to a non-global function");
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < args->length(); i++) {
VisitAsValue(args->at(i));
CHECK_BAILOUT;
}
expr->set_location(location_);
} }
void CodeGenSelector::VisitCallNew(CallNew* expr) { void CodeGenSelector::VisitCallNew(CallNew* expr) {
BAILOUT("CallNew"); VisitAsValue(expr->expression());
CHECK_BAILOUT;
ZoneList<Expression*>* args = expr->arguments();
// Check all arguments to the call
for (int i = 0; i < args->length(); i++) {
VisitAsValue(args->at(i));
CHECK_BAILOUT;
}
expr->set_location(location_);
} }
void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) { void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
BAILOUT("CallRuntime"); // In case of JS runtime function bail out.
if (expr->function() == NULL) BAILOUT("call JS runtime function");
// Check for inline runtime call
if (expr->name()->Get(0) == '_' &&
CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
BAILOUT("inlined runtime call");
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
VisitAsValue(expr->arguments()->at(i));
CHECK_BAILOUT;
}
expr->set_location(location_);
} }
@ -697,7 +848,19 @@ void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) { void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
BAILOUT("BinaryOperation"); switch (expr->op()) {
case Token::OR:
VisitAsValue(expr->left());
CHECK_BAILOUT;
// The location for the right subexpression is the same as for the
// whole expression so we call Visit directly.
Visit(expr->right());
break;
default:
BAILOUT("Unsupported binary operation");
}
expr->set_location(location_);
} }

2
deps/v8/src/conversions-inl.h

@ -84,7 +84,7 @@ int32_t DoubleToInt32(double x) {
static const double two32 = 4294967296.0; static const double two32 = 4294967296.0;
static const double two31 = 2147483648.0; static const double two31 = 2147483648.0;
if (!isfinite(x) || x == 0) return 0; if (!isfinite(x) || x == 0) return 0;
if (x < 0 || x >= two32) x = fmod(x, two32); if (x < 0 || x >= two32) x = modulo(x, two32);
x = (x >= 0) ? floor(x) : ceil(x) + two32; x = (x >= 0) ? floor(x) : ceil(x) + two32;
return (int32_t) ((x >= two31) ? x - two32 : x); return (int32_t) ((x >= two31) ? x - two32 : x);
} }

2
deps/v8/src/conversions.cc

@ -664,7 +664,7 @@ char* DoubleToRadixCString(double value, int radix) {
int integer_pos = kBufferSize - 2; int integer_pos = kBufferSize - 2;
do { do {
integer_buffer[integer_pos--] = integer_buffer[integer_pos--] =
chars[static_cast<int>(fmod(integer_part, radix))]; chars[static_cast<int>(modulo(integer_part, radix))];
integer_part /= radix; integer_part /= radix;
} while (integer_part >= 1.0); } while (integer_part >= 1.0);
// Sanity check. // Sanity check.

1
deps/v8/src/conversions.h

@ -31,6 +31,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// The fast double-to-int conversion routine does not guarantee // The fast double-to-int conversion routine does not guarantee
// rounding towards zero. // rounding towards zero.
// The result is unspecified if x is infinite or NaN, or if the rounded // The result is unspecified if x is infinite or NaN, or if the rounded

10
deps/v8/src/debug-delay.js

@ -1243,6 +1243,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.threadsRequest_(request, response); this.threadsRequest_(request, response);
} else if (request.command == 'suspend') { } else if (request.command == 'suspend') {
this.suspendRequest_(request, response); this.suspendRequest_(request, response);
} else if (request.command == 'version') {
this.versionRequest_(request, response);
} else { } else {
throw new Error('Unknown command "' + request.command + '" in request'); throw new Error('Unknown command "' + request.command + '" in request');
} }
@ -1911,11 +1913,17 @@ DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) { DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
// TODO(peter.rybin): probably we need some body field here.
response.running = false; response.running = false;
}; };
DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
response.body = {
V8Version: %GetV8Version()
}
};
// Check whether the previously processed command caused the VM to become // Check whether the previously processed command caused the VM to become
// running. // running.
DebugCommandProcessor.prototype.isRunning = function() { DebugCommandProcessor.prototype.isRunning = function() {

12
deps/v8/src/factory.cc

@ -222,6 +222,18 @@ Handle<PixelArray> Factory::NewPixelArray(int length,
} }
Handle<ExternalArray> Factory::NewExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
ASSERT(0 <= length);
CALL_HEAP_FUNCTION(Heap::AllocateExternalArray(length,
array_type,
external_pointer,
pretenure), ExternalArray);
}
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) { Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map); CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
} }

9
deps/v8/src/factory.h

@ -155,10 +155,17 @@ class Factory : public AllStatic {
static Handle<ByteArray> NewByteArray(int length, static Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
static Handle<PixelArray> NewPixelArray(int length, static Handle<PixelArray> NewPixelArray(
int length,
uint8_t* external_pointer, uint8_t* external_pointer,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
static Handle<ExternalArray> NewExternalArray(
int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
static Handle<Map> NewMap(InstanceType type, int instance_size); static Handle<Map> NewMap(InstanceType type, int instance_size);
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);

197
deps/v8/src/fast-codegen.cc

@ -29,16 +29,19 @@
#include "codegen-inl.h" #include "codegen-inl.h"
#include "fast-codegen.h" #include "fast-codegen.h"
#include "stub-cache.h"
#include "debug.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script) { Handle<Script> script,
bool is_eval) {
CodeGenerator::MakeCodePrologue(fun); CodeGenerator::MakeCodePrologue(fun);
const int kInitialBufferSize = 4 * KB; const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize); MacroAssembler masm(NULL, kInitialBufferSize);
FastCodeGenerator cgen(&masm); FastCodeGenerator cgen(&masm, script, is_eval);
cgen.Generate(fun); cgen.Generate(fun);
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
@ -50,6 +53,7 @@ Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
int FastCodeGenerator::SlotOffset(Slot* slot) { int FastCodeGenerator::SlotOffset(Slot* slot) {
ASSERT(slot != NULL);
// Offset is negative because higher indexes are at lower addresses. // Offset is negative because higher indexes are at lower addresses.
int offset = -slot->index() * kPointerSize; int offset = -slot->index() * kPointerSize;
// Adjust by a (parameter or local) base offset. // Adjust by a (parameter or local) base offset.
@ -66,6 +70,137 @@ int FastCodeGenerator::SlotOffset(Slot* slot) {
return offset; return offset;
} }
void FastCodeGenerator::Move(Location destination, Location source) {
switch (destination.type()) {
case Location::NOWHERE:
break;
case Location::TEMP:
switch (source.type()) {
case Location::NOWHERE:
UNREACHABLE();
case Location::TEMP:
break;
}
break;
}
}
// All platform macro assemblers in {ia32,x64,arm} have a push(Register)
// function.
void FastCodeGenerator::Move(Location destination, Register source) {
switch (destination.type()) {
case Location::NOWHERE:
break;
case Location::TEMP:
masm_->push(source);
break;
}
}
// All platform macro assemblers in {ia32,x64,arm} have a pop(Register)
// function.
void FastCodeGenerator::Move(Register destination, Location source) {
switch (source.type()) {
case Location::NOWHERE:
UNREACHABLE();
case Location::TEMP:
masm_->pop(destination);
}
}
void FastCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->slot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
UNREACHABLE();
} else {
// Count global variables and functions for later processing
globals++;
}
}
// Return in case of no declared global functions or variables.
if (globals == 0) return;
// Compute array of global variable and function declarations.
Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->slot();
if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
array->set(j++, *(var->name()));
if (node->fun() == NULL) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<JSFunction> function = BuildBoilerplate(node->fun());
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
}
}
}
// Invoke the platform-dependent code generator to do the actual
// declaration the global variables and functions.
DeclareGlobals(array);
}
Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
fun->mark_as_compiled();
#endif
// Generate code
Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
// Check for stack-overflow exception.
if (code.is_null()) {
SetStackOverflow();
return Handle<JSFunction>::null();
}
// Create a boilerplate function.
Handle<JSFunction> function =
Factory::NewFunctionBoilerplate(fun->name(),
fun->materialized_literal_count(),
code);
CodeGenerator::SetFunctionInfo(function, fun, false, script_);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(function,
fun->expected_property_count());
return function;
}
void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) { void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) { if (FLAG_debug_info) {
CodeGenerator::RecordPositions(masm_, fun->start_position()); CodeGenerator::RecordPositions(masm_, fun->start_position());
@ -100,12 +235,22 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
void FastCodeGenerator::VisitBlock(Block* stmt) { void FastCodeGenerator::VisitBlock(Block* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ Block");
SetStatementPosition(stmt);
VisitStatements(stmt->statements());
}
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
Visit(stmt->expression());
} }
void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) { void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ EmptyStatement");
SetStatementPosition(stmt);
} }
@ -174,11 +319,6 @@ void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
} }
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitFunctionBoilerplateLiteral( void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) { FunctionBoilerplateLiteral* expr) {
UNREACHABLE(); UNREACHABLE();
@ -196,18 +336,8 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
} }
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { void FastCodeGenerator::VisitLiteral(Literal* expr) {
UNREACHABLE(); Move(expr->location(), expr);
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
} }
@ -221,26 +351,6 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
} }
void FastCodeGenerator::VisitProperty(Property* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCall(Call* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE(); UNREACHABLE();
} }
@ -251,11 +361,6 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
} }
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE(); UNREACHABLE();
} }

27
deps/v8/src/fast-codegen.h

@ -38,17 +38,36 @@ namespace internal {
class FastCodeGenerator: public AstVisitor { class FastCodeGenerator: public AstVisitor {
public: public:
explicit FastCodeGenerator(MacroAssembler* masm) FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
: masm_(masm), function_(NULL) { : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
} }
static Handle<Code> MakeCode(FunctionLiteral* fun, Handle<Script> script); static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval);
void Generate(FunctionLiteral* fun); void Generate(FunctionLiteral* fun);
private: private:
int SlotOffset(Slot* slot); int SlotOffset(Slot* slot);
void Move(Location destination, Location source);
void Move(Location destination, Register source);
void Move(Location destination, Slot* source);
void Move(Location destination, Literal* source);
void Move(Register destination, Location source);
void Move(Slot* destination, Location source);
// Drop the TOS, and store source to destination.
// If destination is TOS, just overwrite TOS with source.
void DropAndMove(Location destination, Register source);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
void DeclareGlobals(Handle<FixedArray> pairs);
void SetFunctionPosition(FunctionLiteral* fun); void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt); void SetStatementPosition(Statement* stmt);
@ -61,6 +80,8 @@ class FastCodeGenerator: public AstVisitor {
MacroAssembler* masm_; MacroAssembler* masm_;
FunctionLiteral* function_; FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator); DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
}; };

6
deps/v8/src/flag-definitions.h

@ -132,8 +132,6 @@ DEFINE_bool(stack_trace_on_abort, true,
// codegen-ia32.cc / codegen-arm.cc // codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace, false, "trace function calls") DEFINE_bool(trace, false, "trace function calls")
DEFINE_bool(defer_negation, true, "defer negation operation") DEFINE_bool(defer_negation, true, "defer negation operation")
DEFINE_bool(check_stack, true,
"check stack for overflow, interrupt, breakpoint")
// codegen.cc // codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation") DEFINE_bool(lazy, true, "use lazy compilation")
@ -163,8 +161,8 @@ DEFINE_int(max_stack_trace_source_length, 300,
"maximum length of function source code printed in a stack trace.") "maximum length of function source code printed in a stack trace.")
// heap.cc // heap.cc
DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation") DEFINE_int(max_new_space_size, 0, "max size of the new generation")
DEFINE_int(old_space_size, 0, "size of the old generation") DEFINE_int(max_old_space_size, 0, "max size of the old generation")
DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations") DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false, DEFINE_bool(trace_gc, false,

18
deps/v8/src/frames.cc

@ -393,8 +393,19 @@ Code* EntryConstructFrame::code() const {
} }
Object*& ExitFrame::code_slot() const {
const int offset = ExitFrameConstants::kCodeOffset;
return Memory::Object_at(fp() + offset);
}
Code* ExitFrame::code() const { Code* ExitFrame::code() const {
return Heap::c_entry_code(); Object* code = code_slot();
if (code->IsSmi()) {
return Heap::c_entry_debug_break_code();
} else {
return Code::cast(code);
}
} }
@ -412,11 +423,6 @@ Address ExitFrame::GetCallerStackPointer() const {
} }
Code* ExitDebugFrame::code() const {
return Heap::c_entry_debug_break_code();
}
Address StandardFrame::GetExpressionAddress(int n) const { Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset; const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize; return fp() + offset - n * kPointerSize;

25
deps/v8/src/frames.h

@ -93,7 +93,6 @@ class StackHandler BASE_EMBEDDED {
V(ENTRY, EntryFrame) \ V(ENTRY, EntryFrame) \
V(ENTRY_CONSTRUCT, EntryConstructFrame) \ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \ V(EXIT, ExitFrame) \
V(EXIT_DEBUG, ExitDebugFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \ V(JAVA_SCRIPT, JavaScriptFrame) \
V(INTERNAL, InternalFrame) \ V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \ V(CONSTRUCT, ConstructFrame) \
@ -119,7 +118,6 @@ class StackFrame BASE_EMBEDDED {
bool is_entry() const { return type() == ENTRY; } bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; } bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; } bool is_exit() const { return type() == EXIT; }
bool is_exit_debug() const { return type() == EXIT_DEBUG; }
bool is_java_script() const { return type() == JAVA_SCRIPT; } bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; } bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; } bool is_internal() const { return type() == INTERNAL; }
@ -260,10 +258,13 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C. // Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame { class ExitFrame: public StackFrame {
public: public:
enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; } virtual Type type() const { return EXIT; }
virtual Code* code() const; virtual Code* code() const;
Object*& code_slot() const;
// Garbage collection support. // Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const; virtual void Iterate(ObjectVisitor* v) const;
@ -289,26 +290,6 @@ class ExitFrame: public StackFrame {
}; };
class ExitDebugFrame: public ExitFrame {
public:
virtual Type type() const { return EXIT_DEBUG; }
virtual Code* code() const;
static ExitDebugFrame* cast(StackFrame* frame) {
ASSERT(frame->is_exit_debug());
return static_cast<ExitDebugFrame*>(frame);
}
protected:
explicit ExitDebugFrame(StackFrameIterator* iterator)
: ExitFrame(iterator) { }
private:
friend class StackFrameIterator;
};
class StandardFrame: public StackFrame { class StandardFrame: public StackFrame {
public: public:
// Testers. // Testers.

101
deps/v8/src/global-handles.cc

@ -44,6 +44,10 @@ class GlobalHandles::Node : public Malloced {
callback_ = NULL; callback_ = NULL;
} }
Node() {
state_ = DESTROYED;
}
explicit Node(Object* object) { explicit Node(Object* object) {
Initialize(object); Initialize(object);
// Initialize link structure. // Initialize link structure.
@ -200,20 +204,80 @@ class GlobalHandles::Node : public Malloced {
}; };
class GlobalHandles::Pool BASE_EMBEDDED {
public:
Pool() {
current_ = new Chunk();
current_->previous = NULL;
next_ = current_->nodes;
limit_ = current_->nodes + kNodesPerChunk;
}
Node* Allocate() {
if (next_ < limit_) {
return next_++;
}
return SlowAllocate();
}
void Release() {
Chunk* current = current_;
ASSERT(current != NULL); // At least a single block must by allocated
do {
Chunk* previous = current->previous;
delete current;
current = previous;
} while (current != NULL);
current_ = NULL;
next_ = limit_ = NULL;
}
private:
static const int kNodesPerChunk = (1 << 12) - 1;
struct Chunk : public Malloced {
Chunk* previous;
Node nodes[kNodesPerChunk];
};
Node* SlowAllocate() {
Chunk* chunk = new Chunk();
chunk->previous = current_;
current_ = chunk;
Node* new_nodes = current_->nodes;
next_ = new_nodes + 1;
limit_ = new_nodes + kNodesPerChunk;
return new_nodes;
}
Chunk* current_;
Node* next_;
Node* limit_;
};
static GlobalHandles::Pool pool_;
Handle<Object> GlobalHandles::Create(Object* value) { Handle<Object> GlobalHandles::Create(Object* value) {
Counters::global_handles.Increment(); Counters::global_handles.Increment();
Node* result; Node* result;
if (first_free() == NULL) { if (first_free()) {
// Allocate a new node.
result = new Node(value);
result->set_next(head());
set_head(result);
} else {
// Take the first node in the free list. // Take the first node in the free list.
result = first_free(); result = first_free();
set_first_free(result->next_free()); set_first_free(result->next_free());
result->Initialize(value); } else if (first_deallocated()) {
// Next try deallocated list
result = first_deallocated();
set_first_deallocated(result->next_free());
set_head(result);
} else {
// Allocate a new node.
result = pool_.Allocate();
result->set_next(head());
set_head(result);
} }
result->Initialize(value);
return result->handle(); return result->handle();
} }
@ -292,7 +356,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the // Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary // GC is completely done, because the callbacks may invoke arbitrary
// API functions. // API functions.
// At the same time deallocate all DESTROYED nodes // At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC); ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count; const int initial_post_gc_processing_count = ++post_gc_processing_count;
Node** p = &head_; Node** p = &head_;
@ -310,12 +374,19 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
// Delete the link. // Delete the link.
Node* node = *p; Node* node = *p;
*p = node->next(); // Update the link. *p = node->next(); // Update the link.
delete node; if (first_deallocated()) {
first_deallocated()->set_next(node);
}
node->set_next_free(first_deallocated());
set_first_deallocated(node);
} else { } else {
p = (*p)->next_addr(); p = (*p)->next_addr();
} }
} }
set_first_free(NULL); set_first_free(NULL);
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
} }
@ -329,16 +400,11 @@ void GlobalHandles::IterateRoots(ObjectVisitor* v) {
} }
void GlobalHandles::TearDown() { void GlobalHandles::TearDown() {
// Delete all the nodes in the linked list. // Reset all the lists.
Node* current = head_;
while (current != NULL) {
Node* n = current;
current = current->next();
delete n;
}
// Reset the head and free_list.
set_head(NULL); set_head(NULL);
set_first_free(NULL); set_first_free(NULL);
set_first_deallocated(NULL);
pool_.Release();
} }
@ -347,6 +413,7 @@ int GlobalHandles::number_of_global_object_weak_handles_ = 0;
GlobalHandles::Node* GlobalHandles::head_ = NULL; GlobalHandles::Node* GlobalHandles::head_ = NULL;
GlobalHandles::Node* GlobalHandles::first_free_ = NULL; GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
#ifdef DEBUG #ifdef DEBUG

18
deps/v8/src/global-handles.h

@ -127,6 +127,7 @@ class GlobalHandles : public AllStatic {
static void PrintStats(); static void PrintStats();
static void Print(); static void Print();
#endif #endif
class Pool;
private: private:
// Internal node structure, one for each global handle. // Internal node structure, one for each global handle.
class Node; class Node;
@ -148,6 +149,23 @@ class GlobalHandles : public AllStatic {
static Node* first_free_; static Node* first_free_;
static Node* first_free() { return first_free_; } static Node* first_free() { return first_free_; }
static void set_first_free(Node* value) { first_free_ = value; } static void set_first_free(Node* value) { first_free_ = value; }
// List of deallocated nodes.
// Deallocated nodes form a prefix of all the nodes and
// |first_deallocated| points to last deallocated node before
// |head|. Those deallocated nodes are additionally linked
// by |next_free|:
// 1st deallocated head
// | |
// V V
// node node ... node node
// .next -> .next -> .next ->
// <- .next_free <- .next_free <- .next_free
static Node* first_deallocated_;
static Node* first_deallocated() { return first_deallocated_; }
static void set_first_deallocated(Node* value) {
first_deallocated_ = value;
}
}; };

18
deps/v8/src/globals.h

@ -103,6 +103,10 @@ typedef byte* Address;
#define V8PRIxPTR "lx" #define V8PRIxPTR "lx"
#endif #endif
#if defined(__APPLE__) && defined(__MACH__)
#define USING_MAC_ABI
#endif
// Code-point values in Unicode 4.0 are 21 bits wide. // Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16; typedef uint16_t uc16;
typedef int32_t uc32; typedef int32_t uc32;
@ -170,6 +174,15 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif #endif
// Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Forward declarations for frequently used classes // Forward declarations for frequently used classes
// (sorted alphabetically) // (sorted alphabetically)
@ -239,6 +252,7 @@ class Variable;
class VariableProxy; class VariableProxy;
class RelocInfo; class RelocInfo;
class Deserializer; class Deserializer;
class GenericDeserializer; // TODO(erikcorry): Get rid of this.
class MessageLocation; class MessageLocation;
class ObjectGroup; class ObjectGroup;
class TickSample; class TickSample;
@ -263,7 +277,9 @@ enum AllocationSpace {
LO_SPACE, // Promoted large objects. LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE, FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE LAST_SPACE = LO_SPACE,
FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
LAST_PAGED_SPACE = CELL_SPACE
}; };
const int kSpaceTagSize = 3; const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;

17
deps/v8/src/handles.cc

@ -105,6 +105,21 @@ void HandleScope::ZapRange(Object** start, Object** end) {
} }
Address HandleScope::current_extensions_address() {
return reinterpret_cast<Address>(&current_.extensions);
}
Address HandleScope::current_next_address() {
return reinterpret_cast<Address>(&current_.next);
}
Address HandleScope::current_limit_address() {
return reinterpret_cast<Address>(&current_.limit);
}
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content, Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) { Handle<JSArray> array) {
CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray); CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
@ -345,7 +360,7 @@ Handle<String> SubString(Handle<String> str, int start, int end) {
Handle<Object> SetElement(Handle<JSObject> object, Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index, uint32_t index,
Handle<Object> value) { Handle<Object> value) {
if (object->HasPixelElements()) { if (object->HasPixelElements() || object->HasExternalArrayElements()) {
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) { if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
bool has_exception; bool has_exception;
Handle<Object> number = Execution::ToNumber(value, &has_exception); Handle<Object> number = Execution::ToNumber(value, &has_exception);

10
deps/v8/src/handles.h

@ -133,6 +133,13 @@ class HandleScope {
return result; return result;
} }
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
static Address current_extensions_address();
static Address current_next_address();
static Address current_limit_address();
private: private:
// Prevent heap allocation or illegal handle scopes. // Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&); HandleScope(const HandleScope&);
@ -166,9 +173,6 @@ class HandleScope {
// Extend the handle scope making room for more handles. // Extend the handle scope making room for more handles.
static internal::Object** Extend(); static internal::Object** Extend();
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
// Zaps the handles in the half-open interval [start, end). // Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end); static void ZapRange(internal::Object** start, internal::Object** end);

44
deps/v8/src/heap-profiler.cc

@ -78,6 +78,10 @@ JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
} }
} else if (obj->IsString()) { } else if (obj->IsString()) {
return JSObjectsCluster(Heap::String_symbol()); return JSObjectsCluster(Heap::String_symbol());
} else if (obj->IsJSGlobalPropertyCell()) {
return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
} else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
return JSObjectsCluster(JSObjectsCluster::CODE);
} }
return JSObjectsCluster(); return JSObjectsCluster();
} }
@ -112,6 +116,16 @@ int Clusterizer::CalculateNetworkSize(JSObject* obj) {
if (FixedArray::cast(obj->elements())->length() != 0) { if (FixedArray::cast(obj->elements())->length() != 0) {
size += obj->elements()->Size(); size += obj->elements()->Size();
} }
// For functions, also account non-empty context and literals sizes.
if (obj->IsJSFunction()) {
JSFunction* f = JSFunction::cast(obj);
if (f->unchecked_context()->IsContext()) {
size += f->context()->Size();
}
if (f->literals()->length() != 0) {
size += f->literals()->Size();
}
}
return size; return size;
} }
@ -127,15 +141,15 @@ class ReferencesExtractor : public ObjectVisitor {
} }
void VisitPointer(Object** o) { void VisitPointer(Object** o) {
if ((*o)->IsJSObject() || (*o)->IsString()) { if ((*o)->IsFixedArray() && !inside_array_) {
profile_->StoreReference(cluster_, HeapObject::cast(*o));
} else if ((*o)->IsFixedArray() && !inside_array_) {
// Traverse one level deep for data members that are fixed arrays. // Traverse one level deep for data members that are fixed arrays.
// This covers the case of 'elements' and 'properties' of JSObject, // This covers the case of 'elements' and 'properties' of JSObject,
// and function contexts. // and function contexts.
inside_array_ = true; inside_array_ = true;
FixedArray::cast(*o)->Iterate(this); FixedArray::cast(*o)->Iterate(this);
inside_array_ = false; inside_array_ = false;
} else if ((*o)->IsHeapObject()) {
profile_->StoreReference(cluster_, HeapObject::cast(*o));
} }
} }
@ -340,6 +354,8 @@ void JSObjectsCluster::Print(StringStream* accumulator) const {
accumulator->Add("(roots)"); accumulator->Add("(roots)");
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) { } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)"); accumulator->Add("(global property)");
} else if (constructor_ == FromSpecialCase(CODE)) {
accumulator->Add("(code)");
} else if (constructor_ == FromSpecialCase(SELF)) { } else if (constructor_ == FromSpecialCase(SELF)) {
accumulator->Add("(self)"); accumulator->Add("(self)");
} else { } else {
@ -527,6 +543,7 @@ RetainerHeapProfile::RetainerHeapProfile()
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster, void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) { HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref); JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
if (ref_cluster.is_null()) return;
JSObjectsRetainerTree::Locator ref_loc; JSObjectsRetainerTree::Locator ref_loc;
if (retainers_tree_.Insert(ref_cluster, &ref_loc)) { if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
ref_loc.set_value(new JSObjectsClusterTree()); ref_loc.set_value(new JSObjectsClusterTree());
@ -537,15 +554,10 @@ void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
void RetainerHeapProfile::CollectStats(HeapObject* obj) { void RetainerHeapProfile::CollectStats(HeapObject* obj) {
if (obj->IsJSObject()) { const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
const JSObjectsCluster cluster = Clusterizer::Clusterize(obj); if (cluster.is_null()) return;
ReferencesExtractor extractor(cluster, this); ReferencesExtractor extractor(cluster, this);
obj->Iterate(&extractor); obj->Iterate(&extractor);
} else if (obj->IsJSGlobalPropertyCell()) {
JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
ReferencesExtractor extractor(global_prop, this);
obj->Iterate(&extractor);
}
} }
@ -576,8 +588,10 @@ void RetainerHeapProfile::PrintStats() {
void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) { void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
InstanceType type = obj->map()->instance_type(); InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE); ASSERT(0 <= type && type <= LAST_TYPE);
info[type].increment_number(1); if (!FreeListNode::IsFreeListNode(obj)) {
info[type].increment_bytes(obj->Size()); info[type].increment_number(1);
info[type].increment_bytes(obj->Size());
}
} }
@ -601,7 +615,7 @@ static void PrintProducerStackTrace(Object* obj, void* trace) {
void HeapProfiler::WriteSample() { void HeapProfiler::WriteSample() {
LOG(HeapSampleBeginEvent("Heap", "allocated")); LOG(HeapSampleBeginEvent("Heap", "allocated"));
LOG(HeapSampleStats( LOG(HeapSampleStats(
"Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects())); "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
HistogramInfo info[LAST_TYPE+1]; HistogramInfo info[LAST_TYPE+1];
#define DEF_TYPE_NAME(name) info[name].set_name(#name); #define DEF_TYPE_NAME(name) info[name].set_name(#name);

4
deps/v8/src/heap-profiler.h

@ -54,7 +54,8 @@ class JSObjectsCluster BASE_EMBEDDED {
enum SpecialCase { enum SpecialCase {
ROOTS = 1, ROOTS = 1,
GLOBAL_PROPERTY = 2, GLOBAL_PROPERTY = 2,
SELF = 3 // This case is used in ClustersCoarser only. CODE = 3,
SELF = 100 // This case is used in ClustersCoarser only.
}; };
JSObjectsCluster() : constructor_(NULL), instance_(NULL) {} JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
@ -97,6 +98,7 @@ class JSObjectsCluster BASE_EMBEDDED {
switch (special) { switch (special) {
case ROOTS: return Heap::result_symbol(); case ROOTS: return Heap::result_symbol();
case GLOBAL_PROPERTY: return Heap::code_symbol(); case GLOBAL_PROPERTY: return Heap::code_symbol();
case CODE: return Heap::arguments_shadow_symbol();
case SELF: return Heap::catch_var_symbol(); case SELF: return Heap::catch_var_symbol();
default: default:
UNREACHABLE(); UNREACHABLE();

308
deps/v8/src/heap.cc

@ -39,9 +39,11 @@
#include "natives.h" #include "natives.h"
#include "scanner.h" #include "scanner.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "snapshot.h"
#include "v8threads.h" #include "v8threads.h"
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif #endif
namespace v8 { namespace v8 {
@ -74,28 +76,35 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be // semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize. // a multiple of Page::kPageSize.
#if defined(ANDROID) #if defined(ANDROID)
int Heap::semispace_size_ = 512*KB; int Heap::max_semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB; int Heap::max_old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB; int Heap::initial_semispace_size_ = 128*KB;
size_t Heap::code_range_size_ = 0; size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64) #elif defined(V8_TARGET_ARCH_X64)
int Heap::semispace_size_ = 16*MB; int Heap::max_semispace_size_ = 16*MB;
int Heap::old_generation_size_ = 1*GB; int Heap::max_old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB; int Heap::initial_semispace_size_ = 1*MB;
size_t Heap::code_range_size_ = 512*MB; size_t Heap::code_range_size_ = 512*MB;
#else #else
int Heap::semispace_size_ = 8*MB; int Heap::max_semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB; int Heap::max_old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB; int Heap::initial_semispace_size_ = 512*KB;
size_t Heap::code_range_size_ = 0; size_t Heap::code_range_size_ = 0;
#endif #endif
// The snapshot semispace size will be the default semispace size if
// snapshotting is used and will be the requested semispace size as
// set up by ConfigureHeap otherwise.
int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in // Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap. // ConfigureHeap.
int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
int Heap::survived_since_last_expansion_ = 0; int Heap::survived_since_last_expansion_ = 0;
int Heap::external_allocation_limit_ = 0; int Heap::external_allocation_limit_ = 0;
@ -105,6 +114,7 @@ int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0; int Heap::gc_count_ = 0;
int Heap::always_allocate_scope_depth_ = 0; int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0;
bool Heap::context_disposed_pending_ = false; bool Heap::context_disposed_pending_ = false;
#ifdef DEBUG #ifdef DEBUG
@ -127,6 +137,19 @@ int Heap::Capacity() {
} }
int Heap::CommittedMemory() {
if (!HasBeenSetup()) return 0;
return new_space_.CommittedMemory() +
old_pointer_space_->CommittedMemory() +
old_data_space_->CommittedMemory() +
code_space_->CommittedMemory() +
map_space_->CommittedMemory() +
cell_space_->CommittedMemory() +
lo_space_->Size();
}
int Heap::Available() { int Heap::Available() {
if (!HasBeenSetup()) return 0; if (!HasBeenSetup()) return 0;
@ -222,19 +245,34 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() { void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return; if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8d, available: %8d\n", PrintF("Memory allocator, used: %8d, available: %8d\n",
MemoryAllocator::Size(), MemoryAllocator::Available()); MemoryAllocator::Size(),
MemoryAllocator::Available());
PrintF("New space, used: %8d, available: %8d\n", PrintF("New space, used: %8d, available: %8d\n",
Heap::new_space_.Size(), new_space_.Available()); Heap::new_space_.Size(),
PrintF("Old pointers, used: %8d, available: %8d\n", new_space_.Available());
old_pointer_space_->Size(), old_pointer_space_->Available()); PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
PrintF("Old data space, used: %8d, available: %8d\n", old_pointer_space_->Size(),
old_data_space_->Size(), old_data_space_->Available()); old_pointer_space_->Available(),
PrintF("Code space, used: %8d, available: %8d\n", old_pointer_space_->Waste());
code_space_->Size(), code_space_->Available()); PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
PrintF("Map space, used: %8d, available: %8d\n", old_data_space_->Size(),
map_space_->Size(), map_space_->Available()); old_data_space_->Available(),
old_data_space_->Waste());
PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
code_space_->Size(),
code_space_->Available(),
code_space_->Waste());
PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
map_space_->Size(),
map_space_->Available(),
map_space_->Waste());
PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
cell_space_->Size(),
cell_space_->Available(),
cell_space_->Waste());
PrintF("Large object space, used: %8d, avaialble: %8d\n", PrintF("Large object space, used: %8d, avaialble: %8d\n",
lo_space_->Size(), lo_space_->Available()); lo_space_->Size(),
lo_space_->Available());
} }
#endif #endif
@ -478,7 +516,13 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
Counters::objs_since_last_young.Set(0); Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing(); if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
if (collector == MARK_COMPACTOR) { if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory. // Register the amount of external allocated memory.
@ -494,17 +538,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
} }
void Heap::PostGarbageCollectionProcessing() {
// Process weak handles post gc.
{
DisableAssertNoAllocation allow_allocation;
GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
}
void Heap::MarkCompact(GCTracer* tracer) { void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT; gc_state_ = MARK_COMPACT;
mc_count_++; mc_count_++;
@ -1195,6 +1228,41 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_pixel_array_map(Map::cast(obj)); set_pixel_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_byte_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_unsigned_byte_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_short_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_unsigned_short_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_int_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_unsigned_int_array_map(Map::cast(obj));
obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (obj->IsFailure()) return false;
set_external_float_array_map(Map::cast(obj));
obj = AllocateMap(CODE_TYPE, Code::kHeaderSize); obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj)); set_code_map(Map::cast(obj));
@ -1615,6 +1683,35 @@ Object* Heap::NumberToString(Object* number) {
} }
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
Heap::RootListIndex Heap::RootIndexForExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
return kExternalByteArrayMapRootIndex;
case kExternalUnsignedByteArray:
return kExternalUnsignedByteArrayMapRootIndex;
case kExternalShortArray:
return kExternalShortArrayMapRootIndex;
case kExternalUnsignedShortArray:
return kExternalUnsignedShortArrayMapRootIndex;
case kExternalIntArray:
return kExternalIntArrayMapRootIndex;
case kExternalUnsignedIntArray:
return kExternalUnsignedIntArrayMapRootIndex;
case kExternalFloatArray:
return kExternalFloatArrayMapRootIndex;
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
}
}
Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) { Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
return SmiOrNumberFromDouble(value, return SmiOrNumberFromDouble(value,
true /* number object must be new */, true /* number object must be new */,
@ -1713,10 +1810,10 @@ Object* Heap::AllocateConsString(String* first, String* second) {
} }
Map* map; Map* map;
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = is_ascii ? short_cons_ascii_string_map() map = is_ascii ? short_cons_ascii_string_map()
: short_cons_string_map(); : short_cons_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = is_ascii ? medium_cons_ascii_string_map() map = is_ascii ? medium_cons_ascii_string_map()
: medium_cons_string_map(); : medium_cons_string_map();
} else { } else {
@ -1746,11 +1843,11 @@ Object* Heap::AllocateSlicedString(String* buffer,
} }
Map* map; Map* map;
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = buffer->IsAsciiRepresentation() ? map = buffer->IsAsciiRepresentation() ?
short_sliced_ascii_string_map() : short_sliced_ascii_string_map() :
short_sliced_string_map(); short_sliced_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = buffer->IsAsciiRepresentation() ? map = buffer->IsAsciiRepresentation() ?
medium_sliced_ascii_string_map() : medium_sliced_ascii_string_map() :
medium_sliced_string_map(); medium_sliced_string_map();
@ -1815,9 +1912,9 @@ Object* Heap::AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource) { ExternalAsciiString::Resource* resource) {
Map* map; Map* map;
int length = resource->length(); int length = resource->length();
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = short_external_ascii_string_map(); map = short_external_ascii_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = medium_external_ascii_string_map(); map = medium_external_ascii_string_map();
} else { } else {
map = long_external_ascii_string_map(); map = long_external_ascii_string_map();
@ -1940,6 +2037,31 @@ Object* Heap::AllocatePixelArray(int length,
} }
Object* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = AllocateRaw(ExternalArray::kAlignedSize,
space,
OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<ExternalArray*>(result)->set_map(
MapForExternalArrayType(array_type));
reinterpret_cast<ExternalArray*>(result)->set_length(length);
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
external_pointer);
return result;
}
Object* Heap::CreateCode(const CodeDesc& desc, Object* Heap::CreateCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo, ZoneScopeInfo* sinfo,
Code::Flags flags, Code::Flags flags,
@ -2021,7 +2143,9 @@ Object* Heap::Allocate(Map* map, AllocationSpace space) {
TargetSpaceId(map->instance_type())); TargetSpaceId(map->instance_type()));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(map); HeapObject::cast(result)->set_map(map);
#ifdef ENABLE_LOGGING_AND_PROFILING
ProducerHeapProfile::RecordJSObjectAllocation(result); ProducerHeapProfile::RecordJSObjectAllocation(result);
#endif
return result; return result;
} }
@ -2134,7 +2258,7 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
// descriptors for these to the initial map as the object cannot be // descriptors for these to the initial map as the object cannot be
// constructed without having these properties. // constructed without having these properties.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields); ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
if (fun->shared()->has_only_this_property_assignments() && if (fun->shared()->has_only_simple_this_property_assignments() &&
fun->shared()->this_property_assignments_count() > 0) { fun->shared()->this_property_assignments_count() > 0) {
int count = fun->shared()->this_property_assignments_count(); int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) { if (count > in_object_properties) {
@ -2343,7 +2467,9 @@ Object* Heap::CopyJSObject(JSObject* source) {
JSObject::cast(clone)->set_properties(FixedArray::cast(prop)); JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
} }
// Return the new clone. // Return the new clone.
#ifdef ENABLE_LOGGING_AND_PROFILING
ProducerHeapProfile::RecordJSObjectAllocation(clone); ProducerHeapProfile::RecordJSObjectAllocation(clone);
#endif
return clone; return clone;
} }
@ -2533,18 +2659,18 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map; Map* map;
if (is_ascii) { if (is_ascii) {
if (chars <= String::kMaxShortStringSize) { if (chars <= String::kMaxShortSize) {
map = short_ascii_symbol_map(); map = short_ascii_symbol_map();
} else if (chars <= String::kMaxMediumStringSize) { } else if (chars <= String::kMaxMediumSize) {
map = medium_ascii_symbol_map(); map = medium_ascii_symbol_map();
} else { } else {
map = long_ascii_symbol_map(); map = long_ascii_symbol_map();
} }
size = SeqAsciiString::SizeFor(chars); size = SeqAsciiString::SizeFor(chars);
} else { } else {
if (chars <= String::kMaxShortStringSize) { if (chars <= String::kMaxShortSize) {
map = short_symbol_map(); map = short_symbol_map();
} else if (chars <= String::kMaxMediumStringSize) { } else if (chars <= String::kMaxMediumSize) {
map = medium_symbol_map(); map = medium_symbol_map();
} else { } else {
map = long_symbol_map(); map = long_symbol_map();
@ -2594,9 +2720,9 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
// Determine the map based on the string's length. // Determine the map based on the string's length.
Map* map; Map* map;
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = short_ascii_string_map(); map = short_ascii_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = medium_ascii_string_map(); map = medium_ascii_string_map();
} else { } else {
map = long_ascii_string_map(); map = long_ascii_string_map();
@ -2631,9 +2757,9 @@ Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
// Determine the map based on the string's length. // Determine the map based on the string's length.
Map* map; Map* map;
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = short_string_map(); map = short_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = medium_string_map(); map = medium_string_map();
} else { } else {
map = long_string_map(); map = long_string_map();
@ -3118,60 +3244,53 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
} }
#ifdef DEBUG
#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
#else
#define SYNCHRONIZE_TAG(tag)
#endif
void Heap::IterateRoots(ObjectVisitor* v) { void Heap::IterateRoots(ObjectVisitor* v) {
IterateStrongRoots(v); IterateStrongRoots(v);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
SYNCHRONIZE_TAG("symbol_table"); v->Synchronize("symbol_table");
} }
void Heap::IterateStrongRoots(ObjectVisitor* v) { void Heap::IterateStrongRoots(ObjectVisitor* v) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
SYNCHRONIZE_TAG("strong_root_list"); v->Synchronize("strong_root_list");
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_)); v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
SYNCHRONIZE_TAG("symbol"); v->Synchronize("symbol");
Bootstrapper::Iterate(v); Bootstrapper::Iterate(v);
SYNCHRONIZE_TAG("bootstrapper"); v->Synchronize("bootstrapper");
Top::Iterate(v); Top::Iterate(v);
SYNCHRONIZE_TAG("top"); v->Synchronize("top");
Relocatable::Iterate(v); Relocatable::Iterate(v);
SYNCHRONIZE_TAG("relocatable"); v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v); Debug::Iterate(v);
#endif #endif
SYNCHRONIZE_TAG("debug"); v->Synchronize("debug");
CompilationCache::Iterate(v); CompilationCache::Iterate(v);
SYNCHRONIZE_TAG("compilationcache"); v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes. // Iterate over local handles in handle scopes.
HandleScopeImplementer::Iterate(v); HandleScopeImplementer::Iterate(v);
SYNCHRONIZE_TAG("handlescope"); v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the heap. Note // Iterate over the builtin code objects and code stubs in the heap. Note
// that it is not strictly necessary to iterate over code objects on // that it is not strictly necessary to iterate over code objects on
// scavenge collections. We still do it here because this same function // scavenge collections. We still do it here because this same function
// is used by the mark-sweep collector and the deserializer. // is used by the mark-sweep collector and the deserializer.
Builtins::IterateBuiltins(v); Builtins::IterateBuiltins(v);
SYNCHRONIZE_TAG("builtins"); v->Synchronize("builtins");
// Iterate over global handles. // Iterate over global handles.
GlobalHandles::IterateRoots(v); GlobalHandles::IterateRoots(v);
SYNCHRONIZE_TAG("globalhandles"); v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads. // Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v); ThreadManager::Iterate(v);
SYNCHRONIZE_TAG("threadmanager"); v->Synchronize("threadmanager");
} }
#undef SYNCHRONIZE_TAG
// Flag is set when the heap has been configured. The heap can be repeatedly // Flag is set when the heap has been configured. The heap can be repeatedly
@ -3181,21 +3300,37 @@ static bool heap_configured = false;
// TODO(1236194): Since the heap size is configurable on the command line // TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap // and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects. // size is not big enough to fit all the initial objects.
bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) { bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
if (HasBeenSetup()) return false; if (HasBeenSetup()) return false;
if (semispace_size > 0) semispace_size_ = semispace_size; if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
if (old_gen_size > 0) old_generation_size_ = old_gen_size;
if (Snapshot::IsEnabled()) {
// If we are using a snapshot we always reserve the default amount
// of memory for each semispace because code in the snapshot has
// write-barrier code that relies on the size and alignment of new
// space. We therefore cannot use a larger max semispace size
// than the default reserved semispace size.
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
}
} else {
// If we are not using snapshots we reserve space for the actual
// max semispace size.
reserved_semispace_size_ = max_semispace_size_;
}
if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
// The new space size must be a power of two to support single-bit testing // The new space size must be a power of two to support single-bit testing
// for containment. // for containment.
semispace_size_ = RoundUpToPowerOf2(semispace_size_); max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_); reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
young_generation_size_ = 2 * semispace_size_; initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
external_allocation_limit_ = 10 * semispace_size_; external_allocation_limit_ = 10 * max_semispace_size_;
// The old generation is paged. // The old generation is paged.
old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize); max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
heap_configured = true; heap_configured = true;
return true; return true;
@ -3203,7 +3338,7 @@ bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
bool Heap::ConfigureHeapDefault() { bool Heap::ConfigureHeapDefault() {
return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size); return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
} }
@ -3239,30 +3374,31 @@ bool Heap::Setup(bool create_heap_objects) {
} }
// Setup memory allocator and reserve a chunk of memory for new // Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the new space to ensure // space. The chunk is double the size of the requested reserved
// that we can find a pair of semispaces that are contiguous and // new space size to ensure that we can find a pair of semispaces that
// aligned to their size. // are contiguous and aligned to their size.
if (!MemoryAllocator::Setup(MaxCapacity())) return false; if (!MemoryAllocator::Setup(MaxReserved())) return false;
void* chunk = void* chunk =
MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_); MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
if (chunk == NULL) return false; if (chunk == NULL) return false;
// Align the pair of semispaces to their size, which must be a power // Align the pair of semispaces to their size, which must be a power
// of 2. // of 2.
ASSERT(IsPowerOf2(young_generation_size_));
Address new_space_start = Address new_space_start =
RoundUp(reinterpret_cast<byte*>(chunk), young_generation_size_); RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
if (!new_space_.Setup(new_space_start, young_generation_size_)) return false; if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
return false;
}
// Initialize old pointer space. // Initialize old pointer space.
old_pointer_space_ = old_pointer_space_ =
new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE); new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false; if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->Setup(NULL, 0)) return false; if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space. // Initialize old data space.
old_data_space_ = old_data_space_ =
new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE); new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false; if (old_data_space_ == NULL) return false;
if (!old_data_space_->Setup(NULL, 0)) return false; if (!old_data_space_->Setup(NULL, 0)) return false;
@ -3277,7 +3413,7 @@ bool Heap::Setup(bool create_heap_objects) {
} }
code_space_ = code_space_ =
new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE); new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false; if (code_space_ == NULL) return false;
if (!code_space_->Setup(NULL, 0)) return false; if (!code_space_->Setup(NULL, 0)) return false;
@ -3287,7 +3423,7 @@ bool Heap::Setup(bool create_heap_objects) {
if (!map_space_->Setup(NULL, 0)) return false; if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space. // Initialize global property cell space.
cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE); cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false; if (cell_space_ == NULL) return false;
if (!cell_space_->Setup(NULL, 0)) return false; if (!cell_space_->Setup(NULL, 0)) return false;
@ -3310,8 +3446,10 @@ bool Heap::Setup(bool create_heap_objects) {
LOG(IntEvent("heap-capacity", Capacity())); LOG(IntEvent("heap-capacity", Capacity()));
LOG(IntEvent("heap-available", Available())); LOG(IntEvent("heap-available", Available()));
#ifdef ENABLE_LOGGING_AND_PROFILING
// This should be called only after initial objects have been created. // This should be called only after initial objects have been created.
ProducerHeapProfile::Setup(); ProducerHeapProfile::Setup();
#endif
return true; return true;
} }

84
deps/v8/src/heap.h

@ -38,7 +38,13 @@ namespace internal {
// Defines all the roots in Heap. // Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \ #define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \ /* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, stack_limit, StackLimit) \ V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \ V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \ V(Object, the_hole_value, TheHoleValue) \
@ -109,8 +115,14 @@ namespace internal {
undetectable_medium_ascii_string_map, \ undetectable_medium_ascii_string_map, \
UndetectableMediumAsciiStringMap) \ UndetectableMediumAsciiStringMap) \
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, pixel_array_map, PixelArrayMap) \ V(Map, pixel_array_map, PixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
V(Map, external_short_array_map, ExternalShortArrayMap) \
V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
V(Map, external_int_array_map, ExternalIntArrayMap) \
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
V(Map, context_map, ContextMap) \ V(Map, context_map, ContextMap) \
V(Map, catch_context_map, CatchContextMap) \ V(Map, catch_context_map, CatchContextMap) \
V(Map, code_map, CodeMap) \ V(Map, code_map, CodeMap) \
@ -119,8 +131,6 @@ namespace internal {
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \ V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \ V(Map, proxy_map, ProxyMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \ V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \ V(Object, minus_zero_value, MinusZeroValue) \
V(String, empty_string, EmptyString) \ V(String, empty_string, EmptyString) \
@ -214,7 +224,8 @@ namespace internal {
V(exec_symbol, "exec") \ V(exec_symbol, "exec") \
V(zero_symbol, "0") \ V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \ V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") V(identity_hash_symbol, "v8::IdentityHash") \
V(closure_symbol, "(closure)")
// Forward declaration of the GCTracer class. // Forward declaration of the GCTracer class.
@ -228,7 +239,7 @@ class Heap : public AllStatic {
public: public:
// Configure heap size before setup. Return false if the heap has been // Configure heap size before setup. Return false if the heap has been
// setup already. // setup already.
static bool ConfigureHeap(int semispace_size, int old_gen_size); static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
static bool ConfigureHeapDefault(); static bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true, // Initializes the global object heap. If create_heap_objects is true,
@ -247,19 +258,26 @@ class Heap : public AllStatic {
// Returns whether Setup has been called. // Returns whether Setup has been called.
static bool HasBeenSetup(); static bool HasBeenSetup();
// Returns the maximum heap capacity. // Returns the maximum amount of memory reserved for the heap. For
static int MaxCapacity() { // the young generation, we reserve 4 times the amount needed for a
return young_generation_size_ + old_generation_size_; // semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size.
static int MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
} }
static int SemiSpaceSize() { return semispace_size_; } static int MaxSemiSpaceSize() { return max_semispace_size_; }
static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
static int InitialSemiSpaceSize() { return initial_semispace_size_; } static int InitialSemiSpaceSize() { return initial_semispace_size_; }
static int YoungGenerationSize() { return young_generation_size_; } static int MaxOldGenerationSize() { return max_old_generation_size_; }
static int OldGenerationSize() { return old_generation_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when // Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit. // more spaces are needed until it reaches the limit.
static int Capacity(); static int Capacity();
// Returns the amount of memory currently committed for the heap.
static int CommittedMemory();
// Returns the available bytes in space w/o growing. // Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires // Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead. // all available bytes. Check MaxHeapObjectSize() instead.
@ -290,6 +308,9 @@ class Heap : public AllStatic {
static Address always_allocate_scope_depth_address() { static Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_); return reinterpret_cast<Address>(&always_allocate_scope_depth_);
} }
static bool linear_allocation() {
return linear_allocation_scope_depth_ != 0;
}
static Address* NewSpaceAllocationTopAddress() { static Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address(); return new_space_.allocation_top_address();
@ -449,6 +470,15 @@ class Heap : public AllStatic {
uint8_t* external_pointer, uint8_t* external_pointer,
PretenureFlag pretenure); PretenureFlag pretenure);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure);
// Allocate a tenured JS global property cell. // Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
@ -637,9 +667,6 @@ class Heap : public AllStatic {
static void GarbageCollectionPrologue(); static void GarbageCollectionPrologue();
static void GarbageCollectionEpilogue(); static void GarbageCollectionEpilogue();
// Code that should be executed after the garbage collection proper.
static void PostGarbageCollectionProcessing();
// Performs garbage collection operation. // Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection. // Returns whether required_space bytes are available after the collection.
static bool CollectGarbage(int required_space, AllocationSpace space); static bool CollectGarbage(int required_space, AllocationSpace space);
@ -729,7 +756,7 @@ class Heap : public AllStatic {
static bool Contains(HeapObject* value); static bool Contains(HeapObject* value);
// Checks whether an address/object in a space. // Checks whether an address/object in a space.
// Currently used by tests and heap verification only. // Currently used by tests, serialization and heap verification only.
static bool InSpace(Address addr, AllocationSpace space); static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space); static bool InSpace(HeapObject* value, AllocationSpace space);
@ -884,11 +911,15 @@ class Heap : public AllStatic {
static Object* NumberToString(Object* number); static Object* NumberToString(Object* number);
static Map* MapForExternalArrayType(ExternalArrayType array_type);
static RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
private: private:
static int semispace_size_; static int reserved_semispace_size_;
static int max_semispace_size_;
static int initial_semispace_size_; static int initial_semispace_size_;
static int young_generation_size_; static int max_old_generation_size_;
static int old_generation_size_;
static size_t code_range_size_; static size_t code_range_size_;
// For keeping track of how much data has survived // For keeping track of how much data has survived
@ -896,6 +927,7 @@ class Heap : public AllStatic {
static int survived_since_last_expansion_; static int survived_since_last_expansion_;
static int always_allocate_scope_depth_; static int always_allocate_scope_depth_;
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_; static bool context_disposed_pending_;
static const int kMaxMapSpaceSize = 8*MB; static const int kMaxMapSpaceSize = 8*MB;
@ -1111,6 +1143,7 @@ class Heap : public AllStatic {
friend class Factory; friend class Factory;
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope; friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
}; };
@ -1132,6 +1165,19 @@ class AlwaysAllocateScope {
}; };
class LinearAllocationScope {
public:
LinearAllocationScope() {
Heap::linear_allocation_scope_depth_++;
}
~LinearAllocationScope() {
Heap::linear_allocation_scope_depth_--;
ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
}
};
#ifdef DEBUG #ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set // Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location // bits. All heap object pointers have to point into the heap to a location

18
deps/v8/src/ia32/assembler-ia32.cc

@ -1850,6 +1850,22 @@ void Assembler::fucompp() {
} }
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
EMIT(0xE8 + i);
}
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDF);
EMIT(0xE9);
}
void Assembler::fcompp() { void Assembler::fcompp() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2109,7 +2125,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers, // Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large. // they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) || if ((desc.buffer_size > kMaximalBufferSize) ||
(desc.buffer_size > Heap::OldGenerationSize())) { (desc.buffer_size > Heap::MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
} }

10
deps/v8/src/ia32/assembler-ia32.h

@ -439,6 +439,14 @@ class Assembler : public Malloced {
inline static Address target_address_at(Address pc); inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target); inline static void set_target_address_at(Address pc, Address target);
// This sets the branch destination (which is in the instruction on x86).
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address // and the return address
static const int kCallTargetAddressOffset = kPointerSize; static const int kCallTargetAddressOffset = kPointerSize;
@ -702,6 +710,8 @@ class Assembler : public Malloced {
void ftst(); void ftst();
void fucomp(int i); void fucomp(int i);
void fucompp(); void fucompp();
void fucomi(int i);
void fucomip();
void fcompp(); void fcompp();
void fnstsw_ax(); void fnstsw_ax();
void fwait(); void fwait();

86
deps/v8/src/ia32/builtins-ia32.cc

@ -462,6 +462,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex = const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex)); __ mov(ebx, FieldOperand(esi, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver); __ bind(&patch_receiver);
@ -520,48 +522,48 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(ebp, 2 * kPointerSize)); // push arguments __ push(Operand(ebp, 2 * kPointerSize)); // push arguments
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
if (FLAG_check_stack) { // Check the stack for overflow or a break request.
// We need to catch preemptions right here, otherwise an unlucky preemption // We need to catch preemptions right here, otherwise an unlucky preemption
// could show up as a failed apply. // could show up as a failed apply.
ExternalReference stack_guard_limit = ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit(); ExternalReference::address_of_stack_guard_limit();
Label retry_preemption; Label retry_preemption;
Label no_preemption; Label no_preemption;
__ bind(&retry_preemption); __ bind(&retry_preemption);
__ mov(edi, Operand::StaticVariable(stack_guard_limit)); __ mov(edi, Operand::StaticVariable(stack_guard_limit));
__ cmp(esp, Operand(edi)); __ cmp(esp, Operand(edi));
__ j(above, &no_preemption, taken); __ j(above, &no_preemption, taken);
// Preemption! // Preemption!
// Because builtins always remove the receiver from the stack, we // Because builtins always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack. // have to fake one to avoid underflowing the stack.
__ push(eax); __ push(eax);
__ push(Immediate(Smi::FromInt(0))); __ push(Immediate(Smi::FromInt(0)));
// Do call to runtime routine. // Do call to runtime routine.
__ CallRuntime(Runtime::kStackGuard, 1); __ CallRuntime(Runtime::kStackGuard, 1);
__ pop(eax); __ pop(eax);
__ jmp(&retry_preemption); __ jmp(&retry_preemption);
__ bind(&no_preemption); __ bind(&no_preemption);
Label okay; Label okay;
// Make ecx the space we have left. // Make ecx the space we have left.
__ mov(ecx, Operand(esp)); __ mov(ecx, Operand(esp));
__ sub(ecx, Operand(edi)); __ sub(ecx, Operand(edi));
// Make edx the space we need for the array when it is unrolled onto the // Make edx the space we need for the array when it is unrolled onto the
// stack. // stack.
__ mov(edx, Operand(eax)); __ mov(edx, Operand(eax));
__ shl(edx, kPointerSizeLog2 - kSmiTagSize); __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
__ cmp(ecx, Operand(edx)); __ cmp(ecx, Operand(edx));
__ j(greater, &okay, taken); __ j(greater, &okay, taken);
// Too bad: Out of stack space. // Too bad: Out of stack space.
__ push(Operand(ebp, 4 * kPointerSize)); // push this __ push(Operand(ebp, 4 * kPointerSize)); // push this
__ push(eax); __ push(eax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay); __ bind(&okay);
} // End of stack check.
// Push current index and limit. // Push current index and limit.
const int kLimitOffset = const int kLimitOffset =
@ -606,6 +608,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset = const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset)); __ mov(ebx, FieldOperand(esi, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver. // Push the receiver.

303
deps/v8/src/ia32/codegen-ia32.cc

@ -697,18 +697,6 @@ void CodeGenerator::UnloadReference(Reference* ref) {
} }
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to // convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate. // 'false_target'/'true_target' as appropriate.
@ -773,13 +761,6 @@ class FloatingPointHelper : public AllStatic {
// either operand is not a number. Operands are in edx and eax. // either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged. // Leaves operands unchanged.
static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
// Allocate a heap number in new space with undefined value.
// Returns tagged pointer in eax, or jumps to need_gc if new space is full.
static void AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch1,
Register scratch2,
Register result);
}; };
@ -2222,14 +2203,12 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() { void CodeGenerator::CheckStack() {
if (FLAG_check_stack) { DeferredStackCheck* deferred = new DeferredStackCheck;
DeferredStackCheck* deferred = new DeferredStackCheck; ExternalReference stack_guard_limit =
ExternalReference stack_guard_limit = ExternalReference::address_of_stack_guard_limit();
ExternalReference::address_of_stack_guard_limit(); __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
__ cmp(esp, Operand::StaticVariable(stack_guard_limit)); deferred->Branch(below);
deferred->Branch(below); deferred->BindExit();
deferred->BindExit();
}
} }
@ -2282,8 +2261,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// allow us to push the arguments directly into place. // allow us to push the arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1); frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi); // The context is the first argument.
frame_->EmitPush(Immediate(pairs)); frame_->EmitPush(Immediate(pairs));
frame_->EmitPush(esi); // The context is the second argument.
frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0))); frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored. // Return value is ignored.
@ -3583,11 +3562,9 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1); frame_->SyncRange(0, frame_->element_count() - 1);
// Push the boilerplate on the stack.
frame_->EmitPush(Immediate(boilerplate));
// Create a new closure. // Create a new closure.
frame_->EmitPush(esi); frame_->EmitPush(esi);
frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2); Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result); frame_->Push(&result);
} }
@ -5175,11 +5152,10 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
Result scratch1 = allocator()->Allocate(); Result scratch1 = allocator()->Allocate();
Result scratch2 = allocator()->Allocate(); Result scratch2 = allocator()->Allocate();
Result heap_number = allocator()->Allocate(); Result heap_number = allocator()->Allocate();
FloatingPointHelper::AllocateHeapNumber(masm_, __ AllocateHeapNumber(heap_number.reg(),
call_runtime.entry_label(), scratch1.reg(),
scratch1.reg(), scratch2.reg(),
scratch2.reg(), call_runtime.entry_label());
heap_number.reg());
scratch1.Unuse(); scratch1.Unuse();
scratch2.Unuse(); scratch2.Unuse();
@ -6508,11 +6484,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &true_result); __ j(not_equal, &true_result);
__ fldz(); __ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fucompp(); __ FCmp();
__ push(eax);
__ fnstsw_ax();
__ sahf();
__ pop(eax);
__ j(zero, &false_result); __ j(zero, &false_result);
// Fall through to |true_result|. // Fall through to |true_result|.
@ -6531,47 +6503,52 @@ void GenericBinaryOpStub::GenerateCall(
Register left, Register left,
Register right) { Register right) {
if (!ArgsInRegistersSupported()) { if (!ArgsInRegistersSupported()) {
// Only pass arguments in registers if there is no smi code in the stub. // Pass arguments on the stack.
__ push(left); __ push(left);
__ push(right); __ push(right);
} else { } else {
// The calling convention with registers is left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); Register left_arg = edx;
if (!(left.is(edx) && right.is(eax))) { Register right_arg = eax;
if (left.is(eax) && right.is(edx)) { if (!(left.is(left_arg) && right.is(right_arg))) {
if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ xchg(left, right); __ xchg(left, right);
} }
} else if (left.is(edx)) { } else if (left.is(left_arg)) {
__ mov(eax, right); __ mov(right_arg, right);
} else if (left.is(eax)) { } else if (left.is(right_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
__ mov(edx, right); __ mov(left_arg, right);
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(edx, left); // Order of moves important to avoid destroying left argument.
__ mov(eax, right); __ mov(left_arg, left);
__ mov(right_arg, right);
} }
} else if (right.is(edx)) { } else if (right.is(left_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
__ mov(eax, left); __ mov(right_arg, left);
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(eax, right); // Order of moves important to avoid destroying right argument.
__ mov(edx, left); __ mov(right_arg, right);
__ mov(left_arg, left);
} }
} else if (right.is(eax)) { } else if (right.is(right_arg)) {
__ mov(edx, left); __ mov(left_arg, left);
} else { } else {
__ mov(edx, left); // Order of moves is not important.
__ mov(eax, right); __ mov(left_arg, left);
__ mov(right_arg, right);
} }
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
@ -6584,23 +6561,26 @@ void GenericBinaryOpStub::GenerateCall(
Register left, Register left,
Smi* right) { Smi* right) {
if (!ArgsInRegistersSupported()) { if (!ArgsInRegistersSupported()) {
// Only pass arguments in registers if there is no smi code in the stub. // Pass arguments on the stack.
__ push(left); __ push(left);
__ push(Immediate(right)); __ push(Immediate(right));
} else { } else {
// Adapt arguments to the calling convention left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
if (left.is(edx)) { Register left_arg = edx;
__ mov(eax, Immediate(right)); Register right_arg = eax;
} else if (left.is(eax) && IsOperationCommutative()) { if (left.is(left_arg)) {
__ mov(edx, Immediate(right)); __ mov(right_arg, Immediate(right));
} else if (left.is(right_arg) && IsOperationCommutative()) {
__ mov(left_arg, Immediate(right));
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(edx, left); __ mov(left_arg, left);
__ mov(eax, Immediate(right)); __ mov(right_arg, Immediate(right));
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
@ -6612,23 +6592,26 @@ void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm, MacroAssembler* masm,
Smi* left, Smi* left,
Register right) { Register right) {
if (flags_ != NO_SMI_CODE_IN_STUB) { if (!ArgsInRegistersSupported()) {
// Only pass arguments in registers if there is no smi code in the stub. // Pass arguments on the stack.
__ push(Immediate(left)); __ push(Immediate(left));
__ push(right); __ push(right);
} else { } else {
// Adapt arguments to the calling convention left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL)); Register left_arg = edx;
if (right.is(eax)) { Register right_arg = eax;
__ mov(edx, Immediate(left)); if (right.is(right_arg)) {
} else if (right.is(edx) && is_commutative) { __ mov(left_arg, Immediate(left));
__ mov(eax, Immediate(left)); } else if (right.is(left_arg) && IsOperationCommutative()) {
__ mov(right_arg, Immediate(left));
SetArgsReversed();
} else { } else {
__ mov(edx, Immediate(left)); __ mov(left_arg, Immediate(left));
__ mov(eax, right); __ mov(right_arg, right);
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
@ -6836,11 +6819,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case NO_OVERWRITE: { case NO_OVERWRITE: {
// Allocate a heap number for the result. Keep eax and edx intact // Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call. // for the possible runtime call.
FloatingPointHelper::AllocateHeapNumber(masm, __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
&call_runtime,
ecx,
no_reg,
ebx);
// Now eax can be overwritten losing one of the arguments as we are // Now eax can be overwritten losing one of the arguments as we are
// now done and will not need it any more. // now done and will not need it any more.
__ mov(eax, ebx); __ mov(eax, ebx);
@ -6868,11 +6847,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case NO_OVERWRITE: case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact // Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call. // for the possible runtime call.
FloatingPointHelper::AllocateHeapNumber(masm, __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
&call_runtime,
ecx,
no_reg,
ebx);
// Now eax can be overwritten losing one of the arguments as we are // Now eax can be overwritten losing one of the arguments as we are
// now done and will not need it any more. // now done and will not need it any more.
__ mov(eax, ebx); __ mov(eax, ebx);
@ -6924,18 +6899,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Check if right operand is int32. // Check if right operand is int32.
__ fist_s(Operand(esp, 0 * kPointerSize)); __ fist_s(Operand(esp, 0 * kPointerSize));
__ fild_s(Operand(esp, 0 * kPointerSize)); __ fild_s(Operand(esp, 0 * kPointerSize));
__ fucompp(); __ FCmp();
__ fnstsw_ax();
__ sahf();
__ j(not_zero, &operand_conversion_failure); __ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure); __ j(parity_even, &operand_conversion_failure);
// Check if left operand is int32. // Check if left operand is int32.
__ fist_s(Operand(esp, 1 * kPointerSize)); __ fist_s(Operand(esp, 1 * kPointerSize));
__ fild_s(Operand(esp, 1 * kPointerSize)); __ fild_s(Operand(esp, 1 * kPointerSize));
__ fucompp(); __ FCmp();
__ fnstsw_ax();
__ sahf();
__ j(not_zero, &operand_conversion_failure); __ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure); __ j(parity_even, &operand_conversion_failure);
} }
@ -6964,7 +6935,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Tag smi result and return. // Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag)); __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
__ ret(2 * kPointerSize); GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber. // All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR) { if (op_ != Token::SHR) {
@ -6982,8 +6953,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &skip_allocation, not_taken); __ j(not_zero, &skip_allocation, not_taken);
// Fall through! // Fall through!
case NO_OVERWRITE: case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
ecx, edx, eax);
__ bind(&skip_allocation); __ bind(&skip_allocation);
break; break;
default: UNREACHABLE(); default: UNREACHABLE();
@ -6992,7 +6962,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ mov(Operand(esp, 1 * kPointerSize), ebx); __ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize)); __ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize); GenerateReturn(masm);
} }
// Clear the FPU exception flag and reset the stack before calling // Clear the FPU exception flag and reset the stack before calling
@ -7024,7 +6994,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// If all else fails, use the runtime system to get the correct // If all else fails, use the runtime system to get the correct
// result. If arguments was passed in registers now place them on the // result. If arguments was passed in registers now place them on the
// stack in the correct order. // stack in the correct order below the return address.
__ bind(&call_runtime); __ bind(&call_runtime);
if (HasArgumentsInRegisters()) { if (HasArgumentsInRegisters()) {
__ pop(ecx); __ pop(ecx);
@ -7133,25 +7103,6 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
} }
void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch1,
Register scratch2,
Register result) {
// Allocate heap number in new space.
__ AllocateInNewSpace(HeapNumber::kSize,
result,
scratch1,
scratch2,
need_gc,
TAG_OBJECT);
// Set the map.
__ mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
}
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) { Register number) {
Label load_smi, done; Label load_smi, done;
@ -7308,7 +7259,7 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
} else { } else {
__ mov(edx, Operand(eax)); __ mov(edx, Operand(eax));
// edx: operand // edx: operand
FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax); __ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number // eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign. __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
@ -7458,20 +7409,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
// not NaN. // not NaN.
// The representation of NaN values has all exponent bits (52..62) set, // The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear. // and not all mantissa bits (0..51) clear.
// We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value). // Read top bits of double representation (second word of value).
__ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
// Test that exponent bits are all set. // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
__ not_(eax); // all bits in the mask are set. We only need to check the word
__ test(eax, Immediate(0x7ff00000)); // that contains the exponent and high bit of the mantissa.
__ j(not_zero, &return_equal); ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
__ not_(eax); __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(eax, Operand(eax));
// Shift out flag and all exponent bits, retaining only mantissa. // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
__ shl(eax, 12); __ add(edx, Operand(edx));
// Or with all low-bits of mantissa. __ cmp(edx, kQuietNaNHighBitsMask << 1);
__ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset)); __ setcc(above_equal, eax);
// Return zero equal if all bits in mantissa is zero (it's an Infinity)
// and non-zero if not (it's a NaN).
__ ret(0); __ ret(0);
__ bind(&not_identical); __ bind(&not_identical);
@ -7757,11 +7707,84 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
} }
// If true, a Handle<T> passed by value is passed and returned by
// using the location_ field directly. If false, it is passed and
// returned as a pointer to a handle.
#ifdef USING_MAC_ABI
static const bool kPassHandlesDirectly = true;
#else
static const bool kPassHandlesDirectly = false;
#endif
void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
Label get_result;
Label prologue;
Label promote_scheduled_exception;
__ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
ASSERT_EQ(kArgc, 4);
if (kPassHandlesDirectly) {
// When handles as passed directly we don't have to allocate extra
// space for and pass an out parameter.
__ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
__ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
} else {
// The function expects three arguments to be passed but we allocate
// four to get space for the output cell. The argument slots are filled
// as follows:
//
// 3: output cell
// 2: arguments pointer
// 1: name
// 0: pointer to the output cell
//
// Note that this is one more "argument" than the function expects
// so the out cell will have to be popped explicitly after returning
// from the function.
__ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
__ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
__ mov(ebx, esp);
__ add(Operand(ebx), Immediate(3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), ebx); // output
__ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
}
// Call the api function!
__ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address();
__ cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(Factory::the_hole_value()));
__ j(not_equal, &promote_scheduled_exception, not_taken);
if (!kPassHandlesDirectly) {
// The returned value is a pointer to the handle holding the result.
// Dereference this to get to the location.
__ mov(eax, Operand(eax, 0));
}
// Check if the result handle holds 0
__ test(eax, Operand(eax));
__ j(not_zero, &get_result, taken);
// It was zero; the result is undefined.
__ mov(eax, Factory::undefined_value());
__ jmp(&prologue);
// It was non-zero. Dereference to get the result value.
__ bind(&get_result);
__ mov(eax, Operand(eax, 0));
__ bind(&prologue);
__ LeaveExitFrame(ExitFrame::MODE_NORMAL);
__ ret(0);
__ bind(&promote_scheduled_exception);
__ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
0,
1);
}
void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
StackFrame::Type frame_type, ExitFrame::Mode mode,
bool do_gc, bool do_gc,
bool always_allocate_scope) { bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any // eax: result parameter for PerformGC, if any
@ -7811,7 +7834,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken); __ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame. // Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(frame_type); __ LeaveExitFrame(mode);
__ ret(0); __ ret(0);
// Handling of failure. // Handling of failure.
@ -7910,12 +7933,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing // of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice). // a garbage collection and retrying the builtin (twice).
StackFrame::Type frame_type = is_debug_break ? ExitFrame::Mode mode = is_debug_break
StackFrame::EXIT_DEBUG : ? ExitFrame::MODE_DEBUG
StackFrame::EXIT; : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++. // Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type); __ EnterExitFrame(mode);
// eax: result parameter for PerformGC, if any (setup below) // eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved) // ebx: pointer to builtin function (C callee-saved)
@ -7933,7 +7956,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
false, false,
false); false);
@ -7942,7 +7965,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
true, true,
false); false);
@ -7953,7 +7976,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception, &throw_normal_exception,
&throw_termination_exception, &throw_termination_exception,
&throw_out_of_memory_exception, &throw_out_of_memory_exception,
frame_type, mode,
true, true,
true); true);

27
deps/v8/src/ia32/codegen-ia32.h

@ -396,7 +396,7 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
void UnloadReference(Reference* ref); void UnloadReference(Reference* ref);
Operand ContextOperand(Register context, int index) const { static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index)); return Operand(context, Context::SlotOffset(index));
} }
@ -407,7 +407,7 @@ class CodeGenerator: public AstVisitor {
JumpTarget* slow); JumpTarget* slow);
// Expressions // Expressions
Operand GlobalObject() const { static Operand GlobalObject() {
return ContextOperand(esi, Context::GLOBAL_INDEX); return ContextOperand(esi, Context::GLOBAL_INDEX);
} }
@ -511,10 +511,11 @@ class CodeGenerator: public AstVisitor {
const InlineRuntimeLUT& new_entry, const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry); InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations); void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of // Declare global variables and functions in the given array of
// name/value pairs. // name/value pairs.
@ -616,6 +617,8 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget; friend class JumpTarget;
friend class Reference; friend class Reference;
friend class Result; friend class Result;
friend class FastCodeGenerator;
friend class CodeGenSelector;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@ -623,7 +626,19 @@ class CodeGenerator: public AstVisitor {
}; };
// Flag that indicates whether how to generate code for the stub. class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags { enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0, NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
@ -632,10 +647,10 @@ enum GenericBinaryFlags {
class GenericBinaryOpStub: public CodeStub { class GenericBinaryOpStub: public CodeStub {
public: public:
GenericBinaryOpStub(Token::Value operation, GenericBinaryOpStub(Token::Value op,
OverwriteMode mode, OverwriteMode mode,
GenericBinaryFlags flags) GenericBinaryFlags flags)
: op_(operation), : op_(op),
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
args_in_registers_(false), args_in_registers_(false),

272
deps/v8/src/ia32/disasm-ia32.cc

@ -204,7 +204,7 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
InstructionDesc* id = &instructions_[bm[i].b]; InstructionDesc* id = &instructions_[bm[i].b];
id->mnem = bm[i].mnem; id->mnem = bm[i].mnem;
id->op_order_ = bm[i].op_order_; id->op_order_ = bm[i].op_order_;
assert(id->type == NO_INSTR); // Information already entered ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->type = type; id->type = type;
} }
} }
@ -216,7 +216,7 @@ void InstructionTable::SetTableRange(InstructionType type,
const char* mnem) { const char* mnem) {
for (byte b = start; b <= end; b++) { for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b]; InstructionDesc* id = &instructions_[b];
assert(id->type == NO_INSTR); // Information already entered ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = mnem; id->mnem = mnem;
id->type = type; id->type = type;
} }
@ -226,7 +226,7 @@ void InstructionTable::SetTableRange(InstructionType type,
void InstructionTable::AddJumpConditionalShort() { void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) { for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b]; InstructionDesc* id = &instructions_[b];
assert(id->type == NO_INSTR); // Information already entered ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = jump_conditional_mnem[b & 0x0F]; id->mnem = jump_conditional_mnem[b & 0x0F];
id->type = JUMP_CONDITIONAL_SHORT_INSTR; id->type = JUMP_CONDITIONAL_SHORT_INSTR;
} }
@ -321,6 +321,8 @@ class DisassemblerIA32 {
int SetCC(byte* data); int SetCC(byte* data);
int CMov(byte* data); int CMov(byte* data);
int FPUInstruction(byte* data); int FPUInstruction(byte* data);
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...); void AppendToBuffer(const char* format, ...);
@ -493,7 +495,7 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::F7Instruction(byte* data) { int DisassemblerIA32::F7Instruction(byte* data) {
assert(*data == 0xF7); ASSERT_EQ(0xF7, *data);
byte modrm = *(data+1); byte modrm = *(data+1);
int mod, regop, rm; int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm); get_modrm(modrm, &mod, &regop, &rm);
@ -526,7 +528,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
int DisassemblerIA32::D1D3C1Instruction(byte* data) { int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data; byte op = *data;
assert(op == 0xD1 || op == 0xD3 || op == 0xC1); ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
byte modrm = *(data+1); byte modrm = *(data+1);
int mod, regop, rm; int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm); get_modrm(modrm, &mod, &regop, &rm);
@ -560,7 +562,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
default: UnimplementedInstruction(); default: UnimplementedInstruction();
} }
} }
assert(mnem != NULL); ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm)); AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
if (imm8 > 0) { if (imm8 > 0) {
AppendToBuffer("%d", imm8); AppendToBuffer("%d", imm8);
@ -576,7 +578,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::JumpShort(byte* data) { int DisassemblerIA32::JumpShort(byte* data) {
assert(*data == 0xEB); ASSERT_EQ(0xEB, *data);
byte b = *(data+1); byte b = *(data+1);
byte* dest = data + static_cast<int8_t>(b) + 2; byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest)); AppendToBuffer("jmp %s", NameOfAddress(dest));
@ -586,7 +588,7 @@ int DisassemblerIA32::JumpShort(byte* data) {
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) { int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
assert(*data == 0x0F); ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F; byte cond = *(data+1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6; byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
const char* mnem = jump_conditional_mnem[cond]; const char* mnem = jump_conditional_mnem[cond];
@ -614,18 +616,18 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) { int DisassemblerIA32::SetCC(byte* data) {
assert(*data == 0x0F); ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F; byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond]; const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem); AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2); PrintRightByteOperand(data+2);
return 3; // includes 0x0F return 3; // Includes 0x0F.
} }
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::CMov(byte* data) { int DisassemblerIA32::CMov(byte* data) {
assert(*data == 0x0F); ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F; byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_move_mnem[cond]; const char* mnem = conditional_move_mnem[cond];
int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2); int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@ -635,107 +637,165 @@ int DisassemblerIA32::CMov(byte* data) {
// Returns number of bytes used, including *data. // Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) { int DisassemblerIA32::FPUInstruction(byte* data) {
byte b1 = *data; byte escape_opcode = *data;
byte b2 = *(data + 1); ASSERT_EQ(0xD8, escape_opcode & 0xF8);
if (b1 == 0xD9) { byte modrm_byte = *(data+1);
const char* mnem = NULL;
switch (b2) { if (modrm_byte >= 0xC0) {
case 0xE8: mnem = "fld1"; break; return RegisterFPUInstruction(escape_opcode, modrm_byte);
case 0xEE: mnem = "fldz"; break; } else {
case 0xE1: mnem = "fabs"; break; return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
case 0xE0: mnem = "fchs"; break; }
case 0xF8: mnem = "fprem"; break; }
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break; int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xE4: mnem = "ftst"; break; int modrm_byte,
} byte* modrm_start) {
if (mnem != NULL) { const char* mnem = "?";
AppendToBuffer("%s", mnem); int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
return 2; switch (escape_opcode) {
} else if ((b2 & 0xF8) == 0xC8) { case 0xD9: switch (regop) {
AppendToBuffer("fxch st%d", b2 & 0x7); case 0: mnem = "fld_s"; break;
return 2; case 3: mnem = "fstp_s"; break;
} else { case 7: mnem = "fstcw"; break;
int mod, regop, rm;
get_modrm(*(data+1), &mod, &regop, &rm);
const char* mnem = "?";
switch (regop) {
case eax: mnem = "fld_s"; break;
case ebx: mnem = "fstp_s"; break;
default: UnimplementedInstruction(); default: UnimplementedInstruction();
} }
AppendToBuffer("%s ", mnem); break;
int count = PrintRightOperand(data + 1);
return count + 1; case 0xDB: switch (regop) {
} case 0: mnem = "fild_s"; break;
} else if (b1 == 0xDD) { case 1: mnem = "fisttp_s"; break;
if ((b2 & 0xF8) == 0xC0) { case 2: mnem = "fist_s"; break;
AppendToBuffer("ffree st%d", b2 & 0x7); case 3: mnem = "fistp_s"; break;
return 2;
} else {
int mod, regop, rm;
get_modrm(*(data+1), &mod, &regop, &rm);
const char* mnem = "?";
switch (regop) {
case eax: mnem = "fld_d"; break;
case ebx: mnem = "fstp_d"; break;
default: UnimplementedInstruction(); default: UnimplementedInstruction();
} }
AppendToBuffer("%s ", mnem); break;
int count = PrintRightOperand(data + 1);
return count + 1; case 0xDD: switch (regop) {
} case 0: mnem = "fld_d"; break;
} else if (b1 == 0xDB) { case 3: mnem = "fstp_d"; break;
int mod, regop, rm; default: UnimplementedInstruction();
get_modrm(*(data+1), &mod, &regop, &rm); }
const char* mnem = "?"; break;
switch (regop) {
case eax: mnem = "fild_s"; break; case 0xDF: switch (regop) {
case edx: mnem = "fist_s"; break; case 5: mnem = "fild_d"; break;
case ebx: mnem = "fistp_s"; break; case 7: mnem = "fistp_d"; break;
default: UnimplementedInstruction(); default: UnimplementedInstruction();
} }
AppendToBuffer("%s ", mnem); break;
int count = PrintRightOperand(data + 1);
return count + 1; default: UnimplementedInstruction();
} else if (b1 == 0xDF) { }
if (b2 == 0xE0) { AppendToBuffer("%s ", mnem);
AppendToBuffer("fnstsw_ax"); int count = PrintRightOperand(modrm_start);
return 2; return count + 1;
} }
int mod, regop, rm;
get_modrm(*(data+1), &mod, &regop, &rm); int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
const char* mnem = "?"; byte modrm_byte) {
switch (regop) { bool has_register = false; // Is the FPU register encoded in modrm_byte?
case ebp: mnem = "fild_d"; break; const char* mnem = "?";
case edi: mnem = "fistp_d"; break;
default: UnimplementedInstruction(); switch (escape_opcode) {
} case 0xD8:
AppendToBuffer("%s ", mnem); UnimplementedInstruction();
int count = PrintRightOperand(data + 1); break;
return count + 1;
} else if (b1 == 0xDC || b1 == 0xDE) { case 0xD9:
bool is_pop = (b1 == 0xDE); switch (modrm_byte & 0xF8) {
if (is_pop && b2 == 0xD9) { case 0xC8:
AppendToBuffer("fcompp"); mnem = "fxch";
return 2; has_register = true;
} break;
const char* mnem = "FP0xDC"; default:
switch (b2 & 0xF8) { switch (modrm_byte) {
case 0xC0: mnem = "fadd"; break; case 0xE0: mnem = "fchs"; break;
case 0xE8: mnem = "fsub"; break; case 0xE1: mnem = "fabs"; break;
case 0xC8: mnem = "fmul"; break; case 0xE4: mnem = "ftst"; break;
case 0xF8: mnem = "fdiv"; break; case 0xE8: mnem = "fld1"; break;
default: UnimplementedInstruction(); case 0xEE: mnem = "fldz"; break;
} case 0xF5: mnem = "fprem1"; break;
AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7); case 0xF7: mnem = "fincstp"; break;
return 2; case 0xF8: mnem = "fprem"; break;
} else if (b1 == 0xDA && b2 == 0xE9) { case 0xFE: mnem = "fsin"; break;
const char* mnem = "fucompp"; case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
}
}
break;
case 0xDA:
if (modrm_byte == 0xE9) {
mnem = "fucompp";
} else {
UnimplementedInstruction();
}
break;
case 0xDB:
if ((modrm_byte & 0xF8) == 0xE8) {
mnem = "fucomi";
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
} else {
UnimplementedInstruction();
}
break;
case 0xDC:
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "fadd"; break;
case 0xE8: mnem = "fsub"; break;
case 0xC8: mnem = "fmul"; break;
case 0xF8: mnem = "fdiv"; break;
default: UnimplementedInstruction();
}
break;
case 0xDD:
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "ffree"; break;
case 0xD8: mnem = "fstp"; break;
default: UnimplementedInstruction();
}
break;
case 0xDE:
if (modrm_byte == 0xD9) {
mnem = "fcompp";
} else {
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "faddp"; break;
case 0xE8: mnem = "fsubp"; break;
case 0xC8: mnem = "fmulp"; break;
case 0xF8: mnem = "fdivp"; break;
default: UnimplementedInstruction();
}
}
break;
case 0xDF:
if (modrm_byte == 0xE0) {
mnem = "fnstsw_ax";
} else if ((modrm_byte & 0xF8) == 0xE8) {
mnem = "fucomip";
has_register = true;
}
break;
default: UnimplementedInstruction();
}
if (has_register) {
AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
} else {
AppendToBuffer("%s", mnem); AppendToBuffer("%s", mnem);
return 2;
} }
AppendToBuffer("Unknown FP instruction");
return 2; return 2;
} }

584
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -29,6 +29,7 @@
#include "codegen-inl.h" #include "codegen-inl.h"
#include "fast-codegen.h" #include "fast-codegen.h"
#include "parser.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -75,6 +76,14 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
__ bind(&ok); __ bind(&ok);
} }
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(fun->scope()->declarations());
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
{ Comment cmnt(masm_, "[ Body"); { Comment cmnt(masm_, "[ Body");
VisitStatements(fun->body()); VisitStatements(fun->body());
} }
@ -84,6 +93,11 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
// body. // body.
__ mov(eax, Factory::undefined_value()); __ mov(eax, Factory::undefined_value());
SetReturnPosition(fun); SetReturnPosition(fun);
if (FLAG_trace) {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ RecordJSReturn(); __ RecordJSReturn();
// Do not use the leave instruction here because it is too short to // Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger. // patch with the code required by the debugger.
@ -94,19 +108,79 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
} }
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) { void FastCodeGenerator::Move(Location destination, Slot* source) {
Comment cmnt(masm_, "[ ExpressionStatement"); switch (destination.type()) {
SetStatementPosition(stmt); case Location::NOWHERE:
Visit(stmt->expression()); break;
case Location::TEMP:
__ push(Operand(ebp, SlotOffset(source)));
break;
}
}
void FastCodeGenerator::Move(Location destination, Literal* expr) {
switch (destination.type()) {
case Location::NOWHERE:
break;
case Location::TEMP:
__ push(Immediate(expr->handle()));
break;
}
}
void FastCodeGenerator::Move(Slot* destination, Location source) {
switch (source.type()) {
case Location::NOWHERE:
UNREACHABLE();
case Location::TEMP:
__ pop(Operand(ebp, SlotOffset(destination)));
break;
}
}
void FastCodeGenerator::DropAndMove(Location destination, Register source) {
switch (destination.type()) {
case Location::NOWHERE:
__ add(Operand(esp), Immediate(kPointerSize));
break;
case Location::TEMP:
__ mov(Operand(esp, 0), source);
break;
}
}
void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
__ push(Immediate(pairs));
__ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement"); Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
Visit(stmt->expression()); Expression* expr = stmt->expression();
__ pop(eax); // Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ mov(eax, expr->AsLiteral()->handle());
} else {
Visit(expr);
Move(eax, expr->location());
}
if (FLAG_trace) {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ RecordJSReturn(); __ RecordJSReturn();
// Do not use the leave instruction here because it is too short to // Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger. // patch with the code required by the debugger.
__ mov(esp, ebp); __ mov(esp, ebp);
@ -115,29 +189,240 @@ void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
} }
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
// Create a new closure.
__ push(esi);
__ push(Immediate(boilerplate));
__ CallRuntime(Runtime::kNewClosure, 2);
Move(expr->location(), eax);
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy"); Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite(); Expression* rewrite = expr->var()->rewrite();
ASSERT(rewrite != NULL); if (rewrite == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
__ mov(ecx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// By emitting a nop we make sure that we do not have a test eax
// instruction after the call it is treated specially by the LoadIC code
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
Slot* slot = rewrite->AsSlot(); DropAndMove(expr->location(), eax);
ASSERT(slot != NULL); } else {
{ Comment cmnt(masm_, "[ Slot"); Comment cmnt(masm_, "Stack slot");
if (expr->location().is_temporary()) { Move(expr->location(), rewrite->AsSlot());
__ push(Operand(ebp, SlotOffset(slot))); }
} else { }
ASSERT(expr->location().is_nowhere());
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Label exists;
// Registers will be used as follows:
// edi = JS function.
// ebx = literals array.
// eax = boilerplate
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(eax, FieldOperand(ebx, literal_offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &exists);
// Create boilerplate if it does not exist.
// Literal array (0).
__ push(ebx);
// Literal index (1).
__ push(Immediate(Smi::FromInt(expr->literal_index())));
// Constant properties (2).
__ push(Immediate(expr->constant_properties()));
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&exists);
// eax contains boilerplate.
// Clone boilerplate.
__ push(eax);
if (expr->depth() == 1) {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
}
// If result_saved == true: the result is saved on top of the stack.
// If result_saved == false: the result not on the stack, just is in eax.
bool result_saved = false;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
__ push(eax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
Visit(value);
Move(eax, value->location());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// StoreIC leaves the receiver on the stack.
break;
}
// fall through
case ObjectLiteral::Property::PROTOTYPE:
__ push(eax);
Visit(key);
ASSERT(key->location().is_temporary());
Visit(value);
ASSERT(value->location().is_temporary());
__ CallRuntime(Runtime::kSetProperty, 3);
__ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
case ObjectLiteral::Property::SETTER: // fall through
case ObjectLiteral::Property::GETTER:
__ push(eax);
Visit(key);
ASSERT(key->location().is_temporary());
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
Visit(value);
ASSERT(value->location().is_temporary());
__ CallRuntime(Runtime::kDefineAccessor, 4);
__ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
default: UNREACHABLE();
} }
} }
switch (expr->location().type()) {
case Location::NOWHERE:
if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
break;
case Location::TEMP:
if (!result_saved) __ push(eax);
break;
}
} }
void FastCodeGenerator::VisitLiteral(Literal* expr) { void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ Literal"); Comment cmnt(masm_, "[ RegExp Literal");
if (expr->location().is_temporary()) { Label done;
__ push(Immediate(expr->handle())); // Registers will be used as follows:
// edi = JS function.
// ebx = literals array.
// eax = regexp literal.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(eax, FieldOperand(ebx, literal_offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &done);
// Create regexp literal using runtime function
// Result will be in eax.
__ push(ebx);
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->pattern()));
__ push(Immediate(expr->flags()));
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
Move(expr->location(), eax);
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ mov(eax, FieldOperand(ebx, offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &make_clone);
// Instantiate the boilerplate.
__ push(ebx);
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->literals()));
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(eax);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else { } else {
ASSERT(expr->location().is_nowhere()); __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
ZoneList<Expression*>* subexprs = expr->values();
for (int i = 0, len = subexprs->length(); i < len; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (subexpr->AsLiteral() != NULL ||
CompileTimeValue::IsCompileTimeValue(subexpr)) {
continue;
}
if (!result_saved) {
__ push(eax);
result_saved = true;
}
Visit(subexpr);
ASSERT(subexpr->location().is_temporary());
// Store the subexpression value in the array's elements.
__ pop(eax); // Subexpression value.
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(FieldOperand(ebx, offset), eax);
// Update the write barrier for the array store.
__ RecordWrite(ebx, offset, eax, ecx);
}
switch (expr->location().type()) {
case Location::NOWHERE:
if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
break;
case Location::TEMP:
if (!result_saved) __ push(eax);
break;
} }
} }
@ -145,18 +430,265 @@ void FastCodeGenerator::VisitLiteral(Literal* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) { void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment"); Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
Visit(expr->value());
// Left-hand side can only be a global or a (parameter or local) slot.
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL && var->slot() != NULL); ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
Expression* rhs = expr->value();
if (var->is_global()) {
// Assignment to a global variable, use inline caching. Right-hand-side
// value is passed in eax, variable name in ecx, and the global object
// on the stack.
// Code for the right-hand-side expression depends on its type.
if (rhs->AsLiteral() != NULL) {
__ mov(eax, rhs->AsLiteral()->handle());
} else {
ASSERT(rhs->location().is_temporary());
Visit(rhs);
__ pop(eax);
}
__ mov(ecx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
DropAndMove(expr->location(), eax);
} else {
// Local or parameter assignment.
// Code for the right-hand side expression depends on its type.
if (rhs->AsLiteral() != NULL) {
// Two cases: 'temp <- (var = constant)', or 'var = constant' with a
// discarded result. Always perform the assignment.
__ mov(eax, rhs->AsLiteral()->handle());
__ mov(Operand(ebp, SlotOffset(var->slot())), eax);
Move(expr->location(), eax);
} else {
ASSERT(rhs->location().is_temporary());
Visit(rhs);
switch (expr->location().type()) {
case Location::NOWHERE:
// Case 'var = temp'. Discard right-hand-side temporary.
Move(var->slot(), rhs->location());
break;
case Location::TEMP:
// Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
// temporary on the stack.
__ mov(eax, Operand(esp, 0));
__ mov(Operand(ebp, SlotOffset(var->slot())), eax);
break;
}
}
}
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
uint32_t dummy;
// Record the source position for the property load.
SetSourcePosition(expr->position());
// Evaluate receiver.
Visit(expr->obj());
if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
!String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
// Do a NAMED property load.
// The IC expects the property name in ecx and the receiver on the stack.
__ mov(ecx, Immediate(key->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a test eax
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
} else {
// Do a KEYED property load.
Visit(expr->key());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
__ add(Operand(esp), Immediate(kPointerSize));
}
switch (expr->location().type()) {
case Location::TEMP:
__ mov(Operand(esp, 0), eax);
break;
case Location::NOWHERE:
__ add(Operand(esp), Immediate(kPointerSize));
break;
}
}
void FastCodeGenerator::VisitCall(Call* expr) {
Expression* fun = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
ASSERT(var != NULL && !var->is_this() && var->is_global());
ASSERT(!var->is_possibly_eval());
__ push(Immediate(var->name()));
// Push global object (receiver).
__ push(CodeGenerator::GlobalObject());
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
}
// Record source position for debugger
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndMove(expr->location(), eax);
}
if (expr->location().is_temporary()) {
__ mov(eax, Operand(esp, 0)); void FastCodeGenerator::VisitCallNew(CallNew* node) {
__ mov(Operand(ebp, SlotOffset(var->slot())), eax); Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
// Push function on the stack.
Visit(node->expression());
ASSERT(node->expression()->location().is_temporary());
// Push global object (receiver).
__ push(CodeGenerator::GlobalObject());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
// If location is temporary, it is already on the stack,
// so nothing to do here.
}
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetSourcePosition(node->position());
// Load function, arg_count into edi and eax.
__ Set(eax, Immediate(arg_count));
// Function is in esp[arg_count + 1].
__ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in eax, or pop it.
DropAndMove(node->location(), eax);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
Runtime::Function* function = expr->function();
ASSERT(function != NULL);
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
ASSERT(args->at(i)->location().is_temporary());
}
__ CallRuntime(function, arg_count);
Move(expr->location(), eax);
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// Compile a short-circuited boolean or operation in a non-test
// context.
ASSERT(expr->op() == Token::OR);
// Compile (e0 || e1) as if it were
// (let (temp = e0) temp ? temp : e1).
Label eval_right, done;
Location destination = expr->location();
Expression* left = expr->left();
Expression* right = expr->right();
// Use the shared ToBoolean stub to find the boolean value of the
// left-hand subexpression. Load the value into eax to perform some
// inlined checks assumed by the stub.
// Compile the left-hand value into eax. Put it on the stack if we may
// need it as the value of the whole expression.
if (left->AsLiteral() != NULL) {
__ mov(eax, left->AsLiteral()->handle());
if (destination.is_temporary()) __ push(eax);
} else {
Visit(left);
ASSERT(left->location().is_temporary());
switch (destination.type()) {
case Location::NOWHERE:
// Pop the left-hand value into eax because we will not need it as the
// final result.
__ pop(eax);
break;
case Location::TEMP:
// Copy the left-hand value into eax because we may need it as the
// final result.
__ mov(eax, Operand(esp, 0));
break;
}
}
// The left-hand value is in eax. It is also on the stack iff the
// destination location is temporary.
// Perform fast checks assumed by the stub.
__ cmp(eax, Factory::undefined_value()); // The undefined value is false.
__ j(equal, &eval_right);
__ cmp(eax, Factory::true_value()); // True is true.
__ j(equal, &done);
__ cmp(eax, Factory::false_value()); // False is false.
__ j(equal, &eval_right);
ASSERT(kSmiTag == 0);
__ test(eax, Operand(eax)); // The smi zero is false.
__ j(zero, &eval_right);
__ test(eax, Immediate(kSmiTagMask)); // All other smis are true.
__ j(zero, &done);
// Call the stub for all other cases.
__ push(eax);
ToBooleanStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax)); // The stub returns nonzero for true.
__ j(not_zero, &done);
__ bind(&eval_right);
// Discard the left-hand value if present on the stack.
if (destination.is_temporary()) {
__ add(Operand(esp), Immediate(kPointerSize));
}
// Save or discard the right-hand value as needed.
if (right->AsLiteral() != NULL) {
Move(destination, right->AsLiteral());
} else { } else {
ASSERT(expr->location().is_nowhere()); Visit(right);
__ pop(Operand(ebp, SlotOffset(var->slot()))); Move(destination, right->location());
} }
__ bind(&done);
} }

13
deps/v8/src/ia32/frames-ia32.cc

@ -56,19 +56,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp; state->fp = fp;
state->sp = sp; state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
// Determine frame type. return EXIT;
if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
return EXIT_DEBUG;
} else {
return EXIT;
}
} }
void ExitFrame::Iterate(ObjectVisitor* v) const { void ExitFrame::Iterate(ObjectVisitor* v) const {
// Exit frames on IA-32 do not contain any pointers. The arguments v->VisitPointer(&code_slot());
// are traversed as part of the expression stack of the calling // The arguments are traversed as part of the expression stack of
// frame. // the calling frame.
} }

2
deps/v8/src/ia32/frames-ia32.h

@ -76,7 +76,7 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic {
public: public:
static const int kDebugMarkOffset = -2 * kPointerSize; static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;

367
deps/v8/src/ia32/ic-ia32.cc

@ -301,7 +301,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Load name and receiver from stack and jump to runtime. // Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
__ bind(&check_string); __ bind(&check_string);
// The key is not a smi. // The key is not a smi.
@ -342,6 +342,166 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label slow, failed_allocation;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken);
// Get the instance type from the map of the receiver.
__ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
// Check that the object is a JS object.
__ cmp(edx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: index (as a smi)
// ecx: JSObject
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow, not_taken);
// Check that the index is in range.
__ sar(eax, kSmiTagSize); // Untag the index.
__ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// eax: untagged index
// ecx: elements array
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
__ movsx_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalUnsignedByteArray:
__ mov_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalUnsignedShortArray:
__ mov_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(eax, Operand(ecx, eax, times_4, 0));
break;
case kExternalFloatArray:
__ fld_s(Operand(ecx, eax, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// eax: value
// For floating-point array type:
// FP(0): value
if (array_type == kExternalIntArray ||
array_type == kExternalUnsignedIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
// See Smi::IsValid for why this works.
__ mov(ebx, eax);
__ add(Operand(ebx), Immediate(0x40000000));
__ cmp(ebx, 0x80000000);
__ j(above_equal, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need
// the Smi-encoded result to be treated as unsigned, we can't
// handle either of the top two bits being set in the value.
__ test(eax, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
__ shl(eax, kSmiTagSize);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (array_type == kExternalIntArray) {
__ push(eax);
__ fild_s(Operand(esp, 0));
__ pop(eax);
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(eax);
__ fild_d(Operand(esp, 0));
__ pop(eax);
__ pop(eax);
}
// FP(0): value
__ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
// Set the value.
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
// Set the value.
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ shl(eax, kSmiTagSize);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ ffree();
__ fincstp();
// Fall through to slow case.
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
@ -395,15 +555,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi) // ebx: index (as a smi)
__ j(below, &fast, taken); __ j(below, &fast, taken);
// Slow case: Push extra copies of the arguments (3). // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
__ pop(ecx); Generate(masm, ExternalReference(Runtime::kSetProperty));
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ecx);
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// eax: value // eax: value
@ -485,6 +639,201 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label slow, check_heap_number;
// Get the receiver from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Get the key from the stack.
__ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
__ test(ebx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Check that the object is a JS object.
__ cmp(ecx, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: JSObject
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow);
// Check that the index is in range.
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// ecx: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(ecx, ebx, times_4, 0), eax);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(eax);
__ fild_s(Operand(esp, 0));
__ pop(eax);
__ fstp_s(Operand(ecx, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ bind(&check_heap_number);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edx, eax); // Save the value.
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// ecx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(ecx, ebx, times_4, 0));
__ mov(eax, edx); // Return the original value.
__ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN.
Label is_nan;
__ fucomi(0);
__ j(parity_even, &is_nan);
if (array_type != kExternalUnsignedIntArray) {
__ push(eax); // Make room on stack
__ fistp_s(Operand(esp, 0));
__ pop(eax);
} else {
// fistp stores values as signed integers.
// To represent the entire range, we need to store as a 64-bit
// int and discard the high 32 bits.
__ push(eax); // Make room on stack
__ push(eax); // Make room on stack
__ fistp_d(Operand(esp, 0));
__ pop(eax);
__ mov(Operand(esp, 0), eax);
__ pop(eax);
}
// eax: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
// We also need to explicitly check for +/-Infinity. These are
// converted to MIN_INT, but we need to be careful not to
// confuse with legal uses of MIN_INT.
Label not_infinity;
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
__ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
__ and_(edi, 0x7FF0);
__ cmp(edi, 0x7FF0);
__ j(not_equal, &not_infinity);
__ mov(eax, 0);
__ bind(&not_infinity);
__ mov(Operand(ecx, ebx, times_4, 0), eax);
break;
}
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ bind(&is_nan);
__ ffree();
__ fincstp();
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), 0);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov(eax, 0);
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
break;
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
}
// Slow case: call runtime.
__ bind(&slow);
Generate(masm, ExternalReference(Runtime::kSetProperty));
}
// Defined in ic.cc. // Defined in ic.cc.
Object* CallIC_Miss(Arguments args); Object* CallIC_Miss(Arguments args);

130
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -319,11 +319,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() { void MacroAssembler::FCmp() {
fucompp(); if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
push(eax); fucomip();
fnstsw_ax(); ffree(0);
sahf(); fincstp();
pop(eax); } else {
fucompp();
push(eax);
fnstsw_ax();
sahf();
pop(eax);
}
} }
@ -349,10 +355,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave(); leave();
} }
void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack. // Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@ -363,23 +366,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Reserve room for entry stack pointer and push the debug marker. // Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call push(Immediate(0)); // saved entry sp, patched before call
push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0)); if (mode == ExitFrame::MODE_DEBUG) {
push(Immediate(0));
} else {
push(Immediate(CodeObject()));
}
// Save the frame pointer and the context in top. // Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address); ExternalReference context_address(Top::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp); mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi); mov(Operand::StaticVariable(context_address), esi);
}
// Setup argc and argv in callee-saved registers. void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory // Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points. // location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to // TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error // correct here, but computed for the other call. Very error
@ -390,8 +394,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
} }
#endif #endif
// Reserve space for two arguments: argc and argv. // Reserve space for arguments.
sub(Operand(esp), Immediate(2 * kPointerSize)); sub(Operand(esp), Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS. // Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment(); static const int kFrameAlignment = OS::ActivationFrameAlignment();
@ -405,15 +409,39 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
} }
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
EnterExitFramePrologue(mode);
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
EnterExitFrameEpilogue(mode, 2);
}
void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
int stack_space,
int argc) {
EnterExitFramePrologue(mode);
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
EnterExitFrameEpilogue(mode, argc);
}
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from // Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points. // the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need // It's okay to clobber register ebx below because we don't need
// the function pointer after this. // the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset)); lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved); CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
} }
@ -767,6 +795,24 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
} }
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
AllocateInNewSpace(HeapNumber::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
}
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen, void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result, Register result,
Register op, Register op,
@ -907,6 +953,48 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
} }
void MacroAssembler::PushHandleScope(Register scratch) {
// Push the number of extensions, smi-tagged so the gc will ignore it.
ExternalReference extensions_address =
ExternalReference::handle_scope_extensions_address();
mov(scratch, Operand::StaticVariable(extensions_address));
ASSERT_EQ(0, kSmiTag);
shl(scratch, kSmiTagSize);
push(scratch);
mov(Operand::StaticVariable(extensions_address), Immediate(0));
// Push next and limit pointers which will be wordsize aligned and
// hence automatically smi tagged.
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
push(Operand::StaticVariable(next_address));
ExternalReference limit_address =
ExternalReference::handle_scope_limit_address();
push(Operand::StaticVariable(limit_address));
}
void MacroAssembler::PopHandleScope(Register scratch) {
ExternalReference extensions_address =
ExternalReference::handle_scope_extensions_address();
Label write_back;
mov(scratch, Operand::StaticVariable(extensions_address));
cmp(Operand(scratch), Immediate(0));
j(equal, &write_back);
CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
bind(&write_back);
ExternalReference limit_address =
ExternalReference::handle_scope_limit_address();
pop(Operand::StaticVariable(limit_address));
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
pop(Operand::StaticVariable(next_address));
pop(scratch);
shr(scratch, kSmiTagSize);
mov(Operand::StaticVariable(extensions_address), scratch);
}
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) { void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub. // Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext)); mov(ebx, Immediate(ext));

25
deps/v8/src/ia32/macro-assembler-ia32.h

@ -77,16 +77,18 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or // Enter specific kind of exit frame; either in normal or debug mode.
// EXIT_DEBUG. Expects the number of arguments in register eax and // Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer // sets up the number of arguments in register edi and the pointer
// to the first argument in register esi. // to the first argument in register esi.
void EnterExitFrame(StackFrame::Type type); void EnterExitFrame(ExitFrame::Mode mode);
void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
// Leave the current exit frame. Expects the return value in // Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first // register eax:edx (untouched) and the pointer to the first
// argument in register esi. // argument in register esi.
void LeaveExitFrame(StackFrame::Type type); void LeaveExitFrame(ExitFrame::Mode mode);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -206,6 +208,15 @@ class MacroAssembler: public Assembler {
// un-done. // un-done.
void UndoAllocationInNewSpace(Register object); void UndoAllocationInNewSpace(Register object);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -260,6 +271,9 @@ class MacroAssembler: public Assembler {
int num_arguments, int num_arguments,
int result_size); int result_size);
void PushHandleScope(Register scratch);
void PopHandleScope(Register scratch);
// Jump to a runtime routine. // Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext); void JumpToRuntime(const ExternalReference& ext);
@ -337,6 +351,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
void EnterExitFramePrologue(ExitFrame::Mode mode);
void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
// Allocation support helpers. // Allocation support helpers.
void LoadAllocationTopHelper(Register result, void LoadAllocationTopHelper(Register result,
Register result_end, Register result_end,

20
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1093,17 +1093,15 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() { void RegExpMacroAssemblerIA32::CheckStackLimit() {
if (FLAG_check_stack) { Label no_stack_overflow;
Label no_stack_overflow; ExternalReference stack_limit =
ExternalReference stack_limit = ExternalReference::address_of_regexp_stack_limit();
ExternalReference::address_of_regexp_stack_limit(); __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
__ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit)); __ j(above, &no_stack_overflow);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_); SafeCall(&stack_overflow_label_);
__ bind(&no_stack_overflow); __ bind(&no_stack_overflow);
}
} }
@ -1163,10 +1161,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
} }
void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
__ int3(); // Unused on ia32.
}
#undef __ #undef __
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP

39
deps/v8/src/ia32/stub-cache-ia32.cc

@ -776,20 +776,39 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder, CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss); scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller. Handle<AccessorInfo> callback_handle(callback);
__ pop(scratch2); // remove return address
Register other = reg.is(scratch1) ? scratch2 : scratch1;
__ EnterInternalFrame();
__ PushHandleScope(other);
// Push the stack address where the list of arguments ends
__ mov(other, esp);
__ sub(Operand(other), Immediate(2 * kPointerSize));
__ push(other);
__ push(receiver); // receiver __ push(receiver); // receiver
__ push(reg); // holder __ push(reg); // holder
__ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data __ mov(other, Immediate(callback_handle));
__ push(reg); __ push(other);
__ push(FieldOperand(reg, AccessorInfo::kDataOffset)); __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name __ push(name_reg); // name
__ push(scratch2); // restore return address // Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const Arguments& to the C++ callback.
__ mov(eax, esp);
__ add(Operand(eax), Immediate(5 * kPointerSize));
__ mov(ebx, esp);
// Do call through the api.
ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
__ CallStub(&stub);
// Do tail-call to the runtime system. Register tmp = other.is(eax) ? reg : other;
ExternalReference load_callback_property = __ PopHandleScope(tmp);
ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); __ LeaveInternalFrame();
__ TailCallRuntime(load_callback_property, 5, 1);
__ ret(0);
} }

69
deps/v8/src/ic.cc

@ -265,6 +265,55 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
} }
Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
case JSObject::EXTERNAL_SHORT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return Builtins::builtin(
Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
case JSObject::EXTERNAL_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
default:
UNREACHABLE();
return NULL;
}
}
static bool HasInterceptorGetter(JSObject* object) { static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined(); return !object->GetNamedInterceptor()->getter()->IsUndefined();
} }
@ -823,7 +872,14 @@ Object* KeyedLoadIC::Load(State state,
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
if (use_ic) { if (use_ic) {
set_target(generic_stub()); Code* stub = generic_stub();
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());
}
}
set_target(stub);
// For JSObjects that are not value wrappers and that do not have // For JSObjects that are not value wrappers and that do not have
// indexed interceptors, we initialize the inlined fast case (if // indexed interceptors, we initialize the inlined fast case (if
// present) by patching the inlined map check. // present) by patching the inlined map check.
@ -1110,7 +1166,16 @@ Object* KeyedStoreIC::Store(State state,
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
ASSERT(!(use_ic && object->IsJSGlobalProxy())); ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) set_target(generic_stub()); if (use_ic) {
Code* stub = generic_stub();
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());
}
}
set_target(stub);
}
// Set the property. // Set the property.
return Runtime::SetObjectProperty(object, key, value, NONE); return Runtime::SetObjectProperty(object, key, value, NONE);

16
deps/v8/src/ic.h

@ -269,6 +269,13 @@ class KeyedLoadIC: public IC {
static void GeneratePreMonomorphic(MacroAssembler* masm); static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
// Clear the use of the inlined version. // Clear the use of the inlined version.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -294,6 +301,7 @@ class KeyedLoadIC: public IC {
static Code* pre_monomorphic_stub() { static Code* pre_monomorphic_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic); return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
@ -358,6 +366,13 @@ class KeyedStoreIC: public IC {
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm); static void GenerateExtendStorage(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
// operating upon external array types but fall back to the runtime
// for all other types.
static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type);
// Clear the inlined version so the IC is always hit. // Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -384,6 +399,7 @@ class KeyedStoreIC: public IC {
static Code* generic_stub() { static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic); return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);

1
deps/v8/src/list.h

@ -48,6 +48,7 @@ template <typename T, class P>
class List { class List {
public: public:
List() { Initialize(0); }
INLINE(explicit List(int capacity)) { Initialize(capacity); } INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); } INLINE(~List()) { DeleteData(data_); }

7
deps/v8/src/location.h

@ -35,16 +35,17 @@ namespace internal {
class Location BASE_EMBEDDED { class Location BASE_EMBEDDED {
public: public:
enum Type { NOWHERE, TEMP };
static Location Temporary() { return Location(TEMP); } static Location Temporary() { return Location(TEMP); }
static Location Nowhere() { return Location(NOWHERE); } static Location Nowhere() { return Location(NOWHERE); }
static Location Constant() { return Location(CONSTANT); }
bool is_temporary() { return type_ == TEMP; } bool is_temporary() { return type_ == TEMP; }
bool is_nowhere() { return type_ == NOWHERE; } bool is_nowhere() { return type_ == NOWHERE; }
private: Type type() { return type_; }
enum Type { TEMP, NOWHERE, CONSTANT };
private:
explicit Location(Type type) : type_(type) {} explicit Location(Type type) : type_(type) {}
Type type_; Type type_;

32
deps/v8/src/log.cc

@ -125,6 +125,9 @@ class Profiler: public Thread {
bool overflow_; // Tell whether a buffer overflow has occurred. bool overflow_; // Tell whether a buffer overflow has occurred.
Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization. Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
// Tells whether profiler is engaged, that is, processing thread is stated.
bool engaged_;
// Tells whether worker thread should continue running. // Tells whether worker thread should continue running.
bool running_; bool running_;
@ -243,17 +246,25 @@ void SlidingStateWindow::AddState(StateTag state) {
// //
// Profiler implementation. // Profiler implementation.
// //
Profiler::Profiler() { Profiler::Profiler()
buffer_semaphore_ = OS::CreateSemaphore(0); : head_(0),
head_ = 0; tail_(0),
tail_ = 0; overflow_(false),
overflow_ = false; buffer_semaphore_(OS::CreateSemaphore(0)),
running_ = false; engaged_(false),
running_(false) {
} }
void Profiler::Engage() { void Profiler::Engage() {
OS::LogSharedLibraryAddresses(); if (engaged_) return;
engaged_ = true;
// TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
// http://code.google.com/p/v8/issues/detail?id=487
if (!FLAG_prof_lazy) {
OS::LogSharedLibraryAddresses();
}
// Start thread processing the profiler buffer. // Start thread processing the profiler buffer.
running_ = true; running_ = true;
@ -268,6 +279,8 @@ void Profiler::Engage() {
void Profiler::Disengage() { void Profiler::Disengage() {
if (!engaged_) return;
// Stop receiving ticks. // Stop receiving ticks.
Logger::ticker_->ClearProfiler(); Logger::ticker_->ClearProfiler();
@ -1053,6 +1066,7 @@ void Logger::ResumeProfiler(int flags) {
} }
if (modules_to_enable & PROFILER_MODULE_CPU) { if (modules_to_enable & PROFILER_MODULE_CPU) {
if (FLAG_prof_lazy) { if (FLAG_prof_lazy) {
profiler_->Engage();
LOG(UncheckedStringEvent("profiler", "resume")); LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true; FLAG_log_code = true;
LogCompiledFunctions(); LogCompiledFunctions();
@ -1245,7 +1259,9 @@ bool Logger::Setup() {
} else { } else {
is_logging_ = true; is_logging_ = true;
} }
profiler_->Engage(); if (!FLAG_prof_lazy) {
profiler_->Engage();
}
} }
LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling); LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);

176
deps/v8/src/objects-debug.cc

@ -117,6 +117,27 @@ void HeapObject::HeapObjectPrint() {
case PIXEL_ARRAY_TYPE: case PIXEL_ARRAY_TYPE:
PixelArray::cast(this)->PixelArrayPrint(); PixelArray::cast(this)->PixelArrayPrint();
break; break;
case EXTERNAL_BYTE_ARRAY_TYPE:
ExternalByteArray::cast(this)->ExternalByteArrayPrint();
break;
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayPrint();
break;
case EXTERNAL_SHORT_ARRAY_TYPE:
ExternalShortArray::cast(this)->ExternalShortArrayPrint();
break;
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
ExternalUnsignedShortArray::cast(this)->ExternalUnsignedShortArrayPrint();
break;
case EXTERNAL_INT_ARRAY_TYPE:
ExternalIntArray::cast(this)->ExternalIntArrayPrint();
break;
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint();
break;
case EXTERNAL_FLOAT_ARRAY_TYPE:
ExternalFloatArray::cast(this)->ExternalFloatArrayPrint();
break;
case FILLER_TYPE: case FILLER_TYPE:
PrintF("filler"); PrintF("filler");
break; break;
@ -196,6 +217,28 @@ void HeapObject::HeapObjectVerify() {
case PIXEL_ARRAY_TYPE: case PIXEL_ARRAY_TYPE:
PixelArray::cast(this)->PixelArrayVerify(); PixelArray::cast(this)->PixelArrayVerify();
break; break;
case EXTERNAL_BYTE_ARRAY_TYPE:
ExternalByteArray::cast(this)->ExternalByteArrayVerify();
break;
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
break;
case EXTERNAL_SHORT_ARRAY_TYPE:
ExternalShortArray::cast(this)->ExternalShortArrayVerify();
break;
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
ExternalUnsignedShortArray::cast(this)->
ExternalUnsignedShortArrayVerify();
break;
case EXTERNAL_INT_ARRAY_TYPE:
ExternalIntArray::cast(this)->ExternalIntArrayVerify();
break;
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
break;
case EXTERNAL_FLOAT_ARRAY_TYPE:
ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
break;
case CODE_TYPE: case CODE_TYPE:
Code::cast(this)->CodeVerify(); Code::cast(this)->CodeVerify();
break; break;
@ -274,6 +317,41 @@ void PixelArray::PixelArrayPrint() {
} }
void ExternalByteArray::ExternalByteArrayPrint() {
PrintF("external byte array");
}
void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint() {
PrintF("external unsigned byte array");
}
void ExternalShortArray::ExternalShortArrayPrint() {
PrintF("external short array");
}
void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint() {
PrintF("external unsigned short array");
}
void ExternalIntArray::ExternalIntArrayPrint() {
PrintF("external int array");
}
void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint() {
PrintF("external unsigned int array");
}
void ExternalFloatArray::ExternalFloatArrayPrint() {
PrintF("external float array");
}
void ByteArray::ByteArrayVerify() { void ByteArray::ByteArrayVerify() {
ASSERT(IsByteArray()); ASSERT(IsByteArray());
} }
@ -284,6 +362,41 @@ void PixelArray::PixelArrayVerify() {
} }
void ExternalByteArray::ExternalByteArrayVerify() {
ASSERT(IsExternalByteArray());
}
void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
ASSERT(IsExternalUnsignedByteArray());
}
void ExternalShortArray::ExternalShortArrayVerify() {
ASSERT(IsExternalShortArray());
}
void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
ASSERT(IsExternalUnsignedShortArray());
}
void ExternalIntArray::ExternalIntArrayVerify() {
ASSERT(IsExternalIntArray());
}
void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
ASSERT(IsExternalUnsignedIntArray());
}
void ExternalFloatArray::ExternalFloatArrayVerify() {
ASSERT(IsExternalFloatArray());
}
void JSObject::PrintProperties() { void JSObject::PrintProperties() {
if (HasFastProperties()) { if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors(); DescriptorArray* descs = map()->instance_descriptors();
@ -345,6 +458,58 @@ void JSObject::PrintElements() {
} }
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS: {
ExternalByteArray* p = ExternalByteArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
ExternalUnsignedByteArray* p =
ExternalUnsignedByteArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_SHORT_ELEMENTS: {
ExternalShortArray* p = ExternalShortArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
ExternalUnsignedShortArray* p =
ExternalUnsignedShortArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_INT_ELEMENTS: {
ExternalIntArray* p = ExternalIntArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
ExternalUnsignedIntArray* p =
ExternalUnsignedIntArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %d\n", i, static_cast<int>(p->get(i)));
}
break;
}
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalFloatArray* p = ExternalFloatArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(" %d: %f\n", i, p->get(i));
}
break;
}
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
elements()->Print(); elements()->Print();
break; break;
@ -433,6 +598,16 @@ static const char* TypeToString(InstanceType type) {
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY"; case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_INT_ARRAY";
case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
case FILLER_TYPE: return "FILLER"; case FILLER_TYPE: return "FILLER";
case JS_OBJECT_TYPE: return "JS_OBJECT"; case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT"; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@ -804,6 +979,7 @@ void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name()); VerifyPointer(name());
VerifyPointer(data()); VerifyPointer(data());
VerifyPointer(flag()); VerifyPointer(flag());
VerifyPointer(load_stub_cache());
} }
void AccessorInfo::AccessorInfoPrint() { void AccessorInfo::AccessorInfoPrint() {

286
deps/v8/src/objects-inl.h

@ -360,6 +360,65 @@ bool Object::IsPixelArray() {
} }
bool Object::IsExternalArray() {
if (!Object::IsHeapObject())
return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
}
bool Object::IsExternalByteArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_BYTE_ARRAY_TYPE;
}
bool Object::IsExternalUnsignedByteArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
}
bool Object::IsExternalShortArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_SHORT_ARRAY_TYPE;
}
bool Object::IsExternalUnsignedShortArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
}
bool Object::IsExternalIntArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_INT_ARRAY_TYPE;
}
bool Object::IsExternalUnsignedIntArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
}
bool Object::IsExternalFloatArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
EXTERNAL_FLOAT_ARRAY_TYPE;
}
bool Object::IsFailure() { bool Object::IsFailure() {
return HAS_FAILURE_TAG(this); return HAS_FAILURE_TAG(this);
} }
@ -886,6 +945,25 @@ HeapObject* MapWord::ToForwardingAddress() {
} }
bool MapWord::IsSerializationAddress() {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
MapWord MapWord::FromSerializationAddress(int raw) {
// When the map word is being used as a serialization address we Smi-encode
// the serialization address (which is always a smallish positive integer).
return MapWord(reinterpret_cast<uintptr_t>(Smi::FromInt(raw)));
}
int MapWord::ToSerializationAddress() {
// When the map word is being used as a serialization address we treat the
// map word as a Smi and get the small integer that it encodes.
return reinterpret_cast<Smi*>(value_)->value();
}
bool MapWord::IsMarked() { bool MapWord::IsMarked() {
return (value_ & kMarkingMask) == 0; return (value_ & kMarkingMask) == 0;
} }
@ -1084,14 +1162,16 @@ ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
Array* JSObject::elements() { Array* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset); Object* array = READ_FIELD(this, kElementsOffset);
// In the assert below Dictionary is covered under FixedArray. // In the assert below Dictionary is covered under FixedArray.
ASSERT(array->IsFixedArray() || array->IsPixelArray()); ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
array->IsExternalArray());
return reinterpret_cast<Array*>(array); return reinterpret_cast<Array*>(array);
} }
void JSObject::set_elements(Array* value, WriteBarrierMode mode) { void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
// In the assert below Dictionary is covered under FixedArray. // In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray()); ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
WRITE_FIELD(this, kElementsOffset, value); WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode); CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
} }
@ -1554,6 +1634,14 @@ CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(Proxy) CAST_ACCESSOR(Proxy)
CAST_ACCESSOR(ByteArray) CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(PixelArray) CAST_ACCESSOR(PixelArray)
CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalByteArray)
CAST_ACCESSOR(ExternalUnsignedByteArray)
CAST_ACCESSOR(ExternalShortArray)
CAST_ACCESSOR(ExternalUnsignedShortArray)
CAST_ACCESSOR(ExternalIntArray)
CAST_ACCESSOR(ExternalUnsignedIntArray)
CAST_ACCESSOR(ExternalFloatArray)
CAST_ACCESSOR(Struct) CAST_ACCESSOR(Struct)
@ -1819,9 +1907,9 @@ void ExternalAsciiString::set_resource(
Map* ExternalAsciiString::StringMap(int length) { Map* ExternalAsciiString::StringMap(int length) {
Map* map; Map* map;
// Number of characters: determines the map. // Number of characters: determines the map.
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = Heap::short_external_ascii_string_map(); map = Heap::short_external_ascii_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = Heap::medium_external_ascii_string_map(); map = Heap::medium_external_ascii_string_map();
} else { } else {
map = Heap::long_external_ascii_string_map(); map = Heap::long_external_ascii_string_map();
@ -1833,9 +1921,9 @@ Map* ExternalAsciiString::StringMap(int length) {
Map* ExternalAsciiString::SymbolMap(int length) { Map* ExternalAsciiString::SymbolMap(int length) {
Map* map; Map* map;
// Number of characters: determines the map. // Number of characters: determines the map.
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = Heap::short_external_ascii_symbol_map(); map = Heap::short_external_ascii_symbol_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = Heap::medium_external_ascii_symbol_map(); map = Heap::medium_external_ascii_symbol_map();
} else { } else {
map = Heap::long_external_ascii_symbol_map(); map = Heap::long_external_ascii_symbol_map();
@ -1858,9 +1946,9 @@ void ExternalTwoByteString::set_resource(
Map* ExternalTwoByteString::StringMap(int length) { Map* ExternalTwoByteString::StringMap(int length) {
Map* map; Map* map;
// Number of characters: determines the map. // Number of characters: determines the map.
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = Heap::short_external_string_map(); map = Heap::short_external_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = Heap::medium_external_string_map(); map = Heap::medium_external_string_map();
} else { } else {
map = Heap::long_external_string_map(); map = Heap::long_external_string_map();
@ -1872,9 +1960,9 @@ Map* ExternalTwoByteString::StringMap(int length) {
Map* ExternalTwoByteString::SymbolMap(int length) { Map* ExternalTwoByteString::SymbolMap(int length) {
Map* map; Map* map;
// Number of characters: determines the map. // Number of characters: determines the map.
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortSize) {
map = Heap::short_external_symbol_map(); map = Heap::short_external_symbol_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumSize) {
map = Heap::medium_external_symbol_map(); map = Heap::medium_external_symbol_map();
} else { } else {
map = Heap::long_external_symbol_map(); map = Heap::long_external_symbol_map();
@ -1938,6 +2026,116 @@ void PixelArray::set(int index, uint8_t value) {
} }
void* ExternalArray::external_pointer() {
intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
return reinterpret_cast<void*>(ptr);
}
void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
intptr_t ptr = reinterpret_cast<intptr_t>(value);
WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
}
int8_t ExternalByteArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
return ptr[index];
}
void ExternalByteArray::set(int index, int8_t value) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
ptr[index] = value;
}
uint8_t ExternalUnsignedByteArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
return ptr[index];
}
void ExternalUnsignedByteArray::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
ptr[index] = value;
}
int16_t ExternalShortArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
return ptr[index];
}
void ExternalShortArray::set(int index, int16_t value) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
ptr[index] = value;
}
uint16_t ExternalUnsignedShortArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
return ptr[index];
}
void ExternalUnsignedShortArray::set(int index, uint16_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
ptr[index] = value;
}
int32_t ExternalIntArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
return ptr[index];
}
void ExternalIntArray::set(int index, int32_t value) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
ptr[index] = value;
}
uint32_t ExternalUnsignedIntArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
return ptr[index];
}
void ExternalUnsignedIntArray::set(int index, uint32_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
ptr[index] = value;
}
float ExternalFloatArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
return ptr[index];
}
void ExternalFloatArray::set(int index, float value) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
ptr[index] = value;
}
int Map::instance_size() { int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
} }
@ -2238,6 +2436,7 @@ ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset) ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset) ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset) ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset) ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset) ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@ -2646,6 +2845,25 @@ JSObject::ElementsKind JSObject::GetElementsKind() {
ASSERT(array->IsDictionary()); ASSERT(array->IsDictionary());
return DICTIONARY_ELEMENTS; return DICTIONARY_ELEMENTS;
} }
if (array->IsExternalArray()) {
switch (array->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE:
return EXTERNAL_BYTE_ELEMENTS;
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
case EXTERNAL_SHORT_ARRAY_TYPE:
return EXTERNAL_SHORT_ELEMENTS;
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
case EXTERNAL_INT_ARRAY_TYPE:
return EXTERNAL_INT_ELEMENTS;
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
return EXTERNAL_UNSIGNED_INT_ELEMENTS;
default:
ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
return EXTERNAL_FLOAT_ELEMENTS;
}
}
ASSERT(array->IsPixelArray()); ASSERT(array->IsPixelArray());
return PIXEL_ELEMENTS; return PIXEL_ELEMENTS;
} }
@ -2666,6 +2884,52 @@ bool JSObject::HasPixelElements() {
} }
bool JSObject::HasExternalArrayElements() {
return (HasExternalByteElements() ||
HasExternalUnsignedByteElements() ||
HasExternalShortElements() ||
HasExternalUnsignedShortElements() ||
HasExternalIntElements() ||
HasExternalUnsignedIntElements() ||
HasExternalFloatElements());
}
bool JSObject::HasExternalByteElements() {
return GetElementsKind() == EXTERNAL_BYTE_ELEMENTS;
}
bool JSObject::HasExternalUnsignedByteElements() {
return GetElementsKind() == EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
}
bool JSObject::HasExternalShortElements() {
return GetElementsKind() == EXTERNAL_SHORT_ELEMENTS;
}
bool JSObject::HasExternalUnsignedShortElements() {
return GetElementsKind() == EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
}
bool JSObject::HasExternalIntElements() {
return GetElementsKind() == EXTERNAL_INT_ELEMENTS;
}
bool JSObject::HasExternalUnsignedIntElements() {
return GetElementsKind() == EXTERNAL_UNSIGNED_INT_ELEMENTS;
}
bool JSObject::HasExternalFloatElements() {
return GetElementsKind() == EXTERNAL_FLOAT_ELEMENTS;
}
bool JSObject::HasNamedInterceptor() { bool JSObject::HasNamedInterceptor() {
return map()->has_named_interceptor(); return map()->has_named_interceptor();
} }
@ -2712,7 +2976,7 @@ StringHasher::StringHasher(int length)
bool StringHasher::has_trivial_hash() { bool StringHasher::has_trivial_hash() {
return length_ > String::kMaxMediumStringSize; return length_ > String::kMaxMediumSize;
} }

374
deps/v8/src/objects.cc

@ -751,10 +751,11 @@ Object* String::TryFlatten() {
bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
#ifdef DEBUG #ifdef DEBUG
{ // NOLINT (presubmit.py gets confused about if and braces) if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent. // Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length()); ASSERT(static_cast<size_t>(this->length()) == resource->length());
SmartPointer<uc16> smart_chars = this->ToWideCString(); SmartPointer<uc16> smart_chars(NewArray<uc16>(this->length()));
String::WriteToFlat(this, *smart_chars, 0, this->length());
ASSERT(memcmp(*smart_chars, ASSERT(memcmp(*smart_chars,
resource->data(), resource->data(),
resource->length() * sizeof(**smart_chars)) == 0); resource->length() * sizeof(**smart_chars)) == 0);
@ -794,10 +795,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
#ifdef DEBUG #ifdef DEBUG
{ // NOLINT (presubmit.py gets confused about if and braces) if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent. // Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length()); ASSERT(static_cast<size_t>(this->length()) == resource->length());
SmartPointer<char> smart_chars = this->ToCString(); SmartPointer<char> smart_chars(NewArray<char>(this->length()));
String::WriteToFlat(this, *smart_chars, 0, this->length());
ASSERT(memcmp(*smart_chars, ASSERT(memcmp(*smart_chars,
resource->data(), resource->data(),
resource->length()*sizeof(**smart_chars)) == 0); resource->length()*sizeof(**smart_chars)) == 0);
@ -837,7 +839,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
void String::StringShortPrint(StringStream* accumulator) { void String::StringShortPrint(StringStream* accumulator) {
int len = length(); int len = length();
if (len > kMaxMediumStringSize) { if (len > kMaxMediumSize) {
accumulator->Add("<Very long string[%u]>", len); accumulator->Add("<Very long string[%u]>", len);
return; return;
} }
@ -1005,6 +1007,34 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case PIXEL_ARRAY_TYPE: case PIXEL_ARRAY_TYPE:
accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length()); accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
break; break;
case EXTERNAL_BYTE_ARRAY_TYPE:
accumulator->Add("<ExternalByteArray[%u]>",
ExternalByteArray::cast(this)->length());
break;
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
accumulator->Add("<ExternalUnsignedByteArray[%u]>",
ExternalUnsignedByteArray::cast(this)->length());
break;
case EXTERNAL_SHORT_ARRAY_TYPE:
accumulator->Add("<ExternalShortArray[%u]>",
ExternalShortArray::cast(this)->length());
break;
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
accumulator->Add("<ExternalUnsignedShortArray[%u]>",
ExternalUnsignedShortArray::cast(this)->length());
break;
case EXTERNAL_INT_ARRAY_TYPE:
accumulator->Add("<ExternalIntArray[%u]>",
ExternalIntArray::cast(this)->length());
break;
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
accumulator->Add("<ExternalUnsignedIntArray[%u]>",
ExternalUnsignedIntArray::cast(this)->length());
break;
case EXTERNAL_FLOAT_ARRAY_TYPE:
accumulator->Add("<ExternalFloatArray[%u]>",
ExternalFloatArray::cast(this)->length());
break;
case SHARED_FUNCTION_INFO_TYPE: case SHARED_FUNCTION_INFO_TYPE:
accumulator->Add("<SharedFunctionInfo>"); accumulator->Add("<SharedFunctionInfo>");
break; break;
@ -1147,6 +1177,13 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FILLER_TYPE: case FILLER_TYPE:
case BYTE_ARRAY_TYPE: case BYTE_ARRAY_TYPE:
case PIXEL_ARRAY_TYPE: case PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
case EXTERNAL_SHORT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
case EXTERNAL_INT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
break; break;
case SHARED_FUNCTION_INFO_TYPE: { case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this); SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
@ -1214,7 +1251,8 @@ String* JSObject::class_name() {
String* JSObject::constructor_name() { String* JSObject::constructor_name() {
if (IsJSFunction()) { if (IsJSFunction()) {
return Heap::function_class_symbol(); return JSFunction::cast(this)->IsBoilerplate() ?
Heap::function_class_symbol() : Heap::closure_symbol();
} }
if (map()->constructor()->IsJSFunction()) { if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor()); JSFunction* constructor = JSFunction::cast(map()->constructor());
@ -2237,7 +2275,7 @@ Object* JSObject::TransformToFastProperties(int unused_property_fields) {
Object* JSObject::NormalizeElements() { Object* JSObject::NormalizeElements() {
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
if (HasDictionaryElements()) return this; if (HasDictionaryElements()) return this;
// Get number of entries. // Get number of entries.
@ -2322,7 +2360,7 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) {
Object* JSObject::DeleteElementPostInterceptor(uint32_t index, Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
DeleteMode mode) { DeleteMode mode) {
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
switch (GetElementsKind()) { switch (GetElementsKind()) {
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ? uint32_t length = IsJSArray() ?
@ -2413,10 +2451,17 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
} }
break; break;
} }
case PIXEL_ELEMENTS: { case PIXEL_ELEMENTS:
// Pixel elements cannot be deleted. Just silently ignore here. case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
// Pixel and external array elements cannot be deleted. Just
// silently ignore here.
break; break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = element_dictionary(); NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index); int entry = dictionary->FindEntry(index);
@ -2507,7 +2552,15 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check if the object is among the indexed properties. // Check if the object is among the indexed properties.
switch (GetElementsKind()) { switch (GetElementsKind()) {
case PIXEL_ELEMENTS: case PIXEL_ELEMENTS:
// Raw pixels do not reference other objects. case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break; break;
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
int length = IsJSArray() ? int length = IsJSArray() ?
@ -2752,7 +2805,15 @@ Object* JSObject::DefineGetterSetter(String* name,
case FAST_ELEMENTS: case FAST_ELEMENTS:
break; break;
case PIXEL_ELEMENTS: case PIXEL_ELEMENTS:
// Ignore getters and setters on pixel elements. case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return Heap::undefined_value(); return Heap::undefined_value();
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
// Lookup the index. // Lookup the index.
@ -3087,7 +3148,7 @@ static bool HasKey(FixedArray* array, Object* key) {
Object* FixedArray::AddKeysFromJSArray(JSArray* array) { Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
ASSERT(!array->HasPixelElements()); ASSERT(!array->HasPixelElements() && !array->HasExternalArrayElements());
switch (array->GetElementsKind()) { switch (array->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: case JSObject::FAST_ELEMENTS:
return UnionOfKeys(FixedArray::cast(array->elements())); return UnionOfKeys(FixedArray::cast(array->elements()));
@ -4599,7 +4660,7 @@ static inline uint32_t HashField(uint32_t hash, bool is_array_index) {
uint32_t StringHasher::GetHashField() { uint32_t StringHasher::GetHashField() {
ASSERT(is_valid()); ASSERT(is_valid());
if (length_ <= String::kMaxShortStringSize) { if (length_ <= String::kMaxShortSize) {
uint32_t payload; uint32_t payload;
if (is_array_index()) { if (is_array_index()) {
payload = v8::internal::HashField(array_index(), true); payload = v8::internal::HashField(array_index(), true);
@ -4608,7 +4669,7 @@ uint32_t StringHasher::GetHashField() {
} }
return (payload & ((1 << String::kShortLengthShift) - 1)) | return (payload & ((1 << String::kShortLengthShift) - 1)) |
(length_ << String::kShortLengthShift); (length_ << String::kShortLengthShift);
} else if (length_ <= String::kMaxMediumStringSize) { } else if (length_ <= String::kMaxMediumSize) {
uint32_t payload = v8::internal::HashField(GetHash(), false); uint32_t payload = v8::internal::HashField(GetHash(), false);
return (payload & ((1 << String::kMediumLengthShift) - 1)) | return (payload & ((1 << String::kMediumLengthShift) - 1)) |
(length_ << String::kMediumLengthShift); (length_ << String::kMediumLengthShift);
@ -5201,8 +5262,8 @@ void Code::Disassemble(const char* name) {
void JSObject::SetFastElements(FixedArray* elems) { void JSObject::SetFastElements(FixedArray* elems) {
// We should never end in here with a pixel array. // We should never end in here with a pixel or external array.
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
#ifdef DEBUG #ifdef DEBUG
// Check the provided array is filled with the_hole. // Check the provided array is filled with the_hole.
uint32_t len = static_cast<uint32_t>(elems->length()); uint32_t len = static_cast<uint32_t>(elems->length());
@ -5239,8 +5300,8 @@ void JSObject::SetFastElements(FixedArray* elems) {
Object* JSObject::SetSlowElements(Object* len) { Object* JSObject::SetSlowElements(Object* len) {
// We should never end in here with a pixel array. // We should never end in here with a pixel or external array.
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
uint32_t new_length = static_cast<uint32_t>(len->Number()); uint32_t new_length = static_cast<uint32_t>(len->Number());
@ -5318,8 +5379,8 @@ static Object* ArrayLengthRangeError() {
Object* JSObject::SetElementsLength(Object* len) { Object* JSObject::SetElementsLength(Object* len) {
// We should never end in here with a pixel array. // We should never end in here with a pixel or external array.
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
Object* smi_length = len->ToSmi(); Object* smi_length = len->ToSmi();
if (smi_length->IsSmi()) { if (smi_length->IsSmi()) {
@ -5420,6 +5481,20 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
} }
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
// TODO(kbr): Add testcase.
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
}
break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) if (element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound) { != NumberDictionary::kNotFound) {
@ -5507,6 +5582,16 @@ bool JSObject::HasLocalElement(uint32_t index) {
PixelArray* pixels = PixelArray::cast(elements()); PixelArray* pixels = PixelArray::cast(elements());
return (index < static_cast<uint32_t>(pixels->length())); return (index < static_cast<uint32_t>(pixels->length()));
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
return (index < static_cast<uint32_t>(array->length()));
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index) return element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound; != NumberDictionary::kNotFound;
@ -5550,6 +5635,19 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
} }
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
}
break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) if (element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound) { != NumberDictionary::kNotFound) {
@ -5690,6 +5788,37 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
PixelArray* pixels = PixelArray::cast(elements()); PixelArray* pixels = PixelArray::cast(elements());
return pixels->SetValue(index, value); return pixels->SetValue(index, value);
} }
case EXTERNAL_BYTE_ELEMENTS: {
ExternalByteArray* array = ExternalByteArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
ExternalUnsignedByteArray* array =
ExternalUnsignedByteArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_SHORT_ELEMENTS: {
ExternalShortArray* array = ExternalShortArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
ExternalUnsignedShortArray* array =
ExternalUnsignedShortArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_INT_ELEMENTS: {
ExternalIntArray* array = ExternalIntArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
ExternalUnsignedIntArray* array =
ExternalUnsignedIntArray::cast(elements());
return array->SetValue(index, value);
}
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalFloatArray* array = ExternalFloatArray::cast(elements());
return array->SetValue(index, value);
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
// Insert element in the dictionary. // Insert element in the dictionary.
FixedArray* elms = FixedArray::cast(elements()); FixedArray* elms = FixedArray::cast(elements());
@ -5807,6 +5936,17 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
UNIMPLEMENTED(); UNIMPLEMENTED();
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
// TODO(kbr): Add testcase and implement.
UNIMPLEMENTED();
break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = element_dictionary(); NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index); int entry = dictionary->FindEntry(index);
@ -5905,6 +6045,65 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
} }
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS: {
ExternalByteArray* array = ExternalByteArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
int8_t value = array->get(index);
return Smi::FromInt(value);
}
break;
}
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
ExternalUnsignedByteArray* array =
ExternalUnsignedByteArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
uint8_t value = array->get(index);
return Smi::FromInt(value);
}
break;
}
case EXTERNAL_SHORT_ELEMENTS: {
ExternalShortArray* array = ExternalShortArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
int16_t value = array->get(index);
return Smi::FromInt(value);
}
break;
}
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
ExternalUnsignedShortArray* array =
ExternalUnsignedShortArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
uint16_t value = array->get(index);
return Smi::FromInt(value);
}
break;
}
case EXTERNAL_INT_ELEMENTS: {
ExternalIntArray* array = ExternalIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
int32_t value = array->get(index);
return Heap::NumberFromInt32(value);
}
break;
}
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
ExternalUnsignedIntArray* array =
ExternalUnsignedIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
uint32_t value = array->get(index);
return Heap::NumberFromUint32(value);
}
break;
}
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalFloatArray* array = ExternalFloatArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
float value = array->get(index);
return Heap::AllocateHeapNumber(value);
}
break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = element_dictionary(); NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index); int entry = dictionary->FindEntry(index);
@ -5948,7 +6147,14 @@ bool JSObject::HasDenseElements() {
} }
break; break;
} }
case PIXEL_ELEMENTS: { case PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
return true; return true;
} }
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
@ -6172,6 +6378,16 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
PixelArray* pixels = PixelArray::cast(elements()); PixelArray* pixels = PixelArray::cast(elements());
return index < static_cast<uint32_t>(pixels->length()); return index < static_cast<uint32_t>(pixels->length());
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
return index < static_cast<uint32_t>(array->length());
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index) return element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound; != NumberDictionary::kNotFound;
@ -6392,6 +6608,23 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter); ASSERT(!storage || storage->length() >= counter);
break; break;
} }
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
int length = ExternalArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
}
counter++;
}
ASSERT(!storage || storage->length() >= counter);
break;
}
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
if (storage != NULL) { if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage, filter); element_dictionary()->CopyKeysTo(storage, filter);
@ -6938,7 +7171,7 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// If the object is in dictionary mode, it is converted to fast elements // If the object is in dictionary mode, it is converted to fast elements
// mode. // mode.
Object* JSObject::PrepareElementsForSort(uint32_t limit) { Object* JSObject::PrepareElementsForSort(uint32_t limit) {
ASSERT(!HasPixelElements()); ASSERT(!HasPixelElements() && !HasExternalArrayElements());
if (HasDictionaryElements()) { if (HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties. // Convert to fast elements containing only the existing properties.
@ -7070,6 +7303,99 @@ Object* PixelArray::SetValue(uint32_t index, Object* value) {
} }
template<typename ExternalArrayClass, typename ValueType>
static Object* ExternalArrayIntSetter(ExternalArrayClass* receiver,
uint32_t index,
Object* value) {
ValueType cast_value = 0;
if (index < static_cast<uint32_t>(receiver->length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
cast_value = static_cast<ValueType>(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
} else {
// Clamp undefined to zero (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
receiver->set(index, cast_value);
}
return Heap::NumberFromInt32(cast_value);
}
Object* ExternalByteArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalByteArray, int8_t>
(this, index, value);
}
Object* ExternalUnsignedByteArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
(this, index, value);
}
Object* ExternalShortArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalShortArray, int16_t>
(this, index, value);
}
Object* ExternalUnsignedShortArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
(this, index, value);
}
Object* ExternalIntArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalIntArray, int32_t>
(this, index, value);
}
Object* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
cast_value = static_cast<uint32_t>(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
} else {
// Clamp undefined to zero (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
set(index, cast_value);
}
return Heap::NumberFromUint32(cast_value);
}
Object* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
float cast_value = 0;
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
cast_value = static_cast<float>(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
cast_value = static_cast<float>(double_value);
} else {
// Clamp undefined to zero (default). All other types have been
// converted to a number type further up in the call chain.
ASSERT(value->IsUndefined());
}
set(index, cast_value);
}
return Heap::AllocateHeapNumber(cast_value);
}
Object* GlobalObject::GetPropertyCell(LookupResult* result) { Object* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties()); ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());

273
deps/v8/src/objects.h

@ -56,6 +56,14 @@
// - Array // - Array
// - ByteArray // - ByteArray
// - PixelArray // - PixelArray
// - ExternalArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
// - ExternalUnsignedShortArray
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
// - FixedArray // - FixedArray
// - DescriptorArray // - DescriptorArray
// - HashTable // - HashTable
@ -274,6 +282,16 @@ enum PropertyNormalizationMode {
V(PROXY_TYPE) \ V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \ V(BYTE_ARRAY_TYPE) \
V(PIXEL_ARRAY_TYPE) \ V(PIXEL_ARRAY_TYPE) \
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
V(EXTERNAL_BYTE_ARRAY_TYPE) \
V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
V(EXTERNAL_SHORT_ARRAY_TYPE) \
V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
V(EXTERNAL_INT_ARRAY_TYPE) \
V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
V(FILLER_TYPE) \ V(FILLER_TYPE) \
\ \
V(ACCESSOR_INFO_TYPE) \ V(ACCESSOR_INFO_TYPE) \
@ -673,6 +691,13 @@ enum InstanceType {
PROXY_TYPE, PROXY_TYPE,
BYTE_ARRAY_TYPE, BYTE_ARRAY_TYPE,
PIXEL_ARRAY_TYPE, PIXEL_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE,
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
EXTERNAL_INT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
EXTERNAL_FLOAT_ARRAY_TYPE,
FILLER_TYPE, FILLER_TYPE,
SMI_TYPE, SMI_TYPE,
@ -780,6 +805,14 @@ class Object BASE_EMBEDDED {
inline bool IsNumber(); inline bool IsNumber();
inline bool IsByteArray(); inline bool IsByteArray();
inline bool IsPixelArray(); inline bool IsPixelArray();
inline bool IsExternalArray();
inline bool IsExternalByteArray();
inline bool IsExternalUnsignedByteArray();
inline bool IsExternalShortArray();
inline bool IsExternalUnsignedShortArray();
inline bool IsExternalIntArray();
inline bool IsExternalUnsignedIntArray();
inline bool IsExternalFloatArray();
inline bool IsFailure(); inline bool IsFailure();
inline bool IsRetryAfterGC(); inline bool IsRetryAfterGC();
inline bool IsOutOfMemoryFailure(); inline bool IsOutOfMemoryFailure();
@ -1049,6 +1082,15 @@ class MapWord BASE_EMBEDDED {
// View this map word as a forwarding address. // View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress(); inline HeapObject* ToForwardingAddress();
// True if this map word is a serialization address. This will only be the
// case during a destructive serialization of the heap.
inline bool IsSerializationAddress();
// Create a map word from a serialization address.
static inline MapWord FromSerializationAddress(int raw);
// View this map word as a serialization address.
inline int ToSerializationAddress();
// Marking phase of full collection: the map word of live objects is // Marking phase of full collection: the map word of live objects is
// marked, and may be marked as overflowed (eg, the object is live, its // marked, and may be marked as overflowed (eg, the object is live, its
@ -1323,7 +1365,14 @@ class JSObject: public HeapObject {
enum ElementsKind { enum ElementsKind {
FAST_ELEMENTS, FAST_ELEMENTS,
DICTIONARY_ELEMENTS, DICTIONARY_ELEMENTS,
PIXEL_ELEMENTS PIXEL_ELEMENTS,
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS
}; };
// [properties]: Backing storage for properties. // [properties]: Backing storage for properties.
@ -1343,6 +1392,14 @@ class JSObject: public HeapObject {
inline bool HasFastElements(); inline bool HasFastElements();
inline bool HasDictionaryElements(); inline bool HasDictionaryElements();
inline bool HasPixelElements(); inline bool HasPixelElements();
inline bool HasExternalArrayElements();
inline bool HasExternalByteElements();
inline bool HasExternalUnsignedByteElements();
inline bool HasExternalShortElements();
inline bool HasExternalUnsignedShortElements();
inline bool HasExternalIntElements();
inline bool HasExternalUnsignedIntElements();
inline bool HasExternalFloatElements();
inline NumberDictionary* element_dictionary(); // Gets slow elements. inline NumberDictionary* element_dictionary(); // Gets slow elements.
// Collects elements starting at index 0. // Collects elements starting at index 0.
@ -2507,6 +2564,200 @@ class PixelArray: public Array {
}; };
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
// specification. As of this writing the first public draft is not yet
// available, but Khronos members can access the draft at:
// https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
//
// The semantics of these arrays differ from CanvasPixelArray.
// Out-of-range values passed to the setter are converted via a C
// cast, not clamping. Out-of-range indices cause exceptions to be
// raised rather than being silently ignored.
class ExternalArray: public Array {
public:
// [external_pointer]: The pointer to the external memory area backing this
// external array.
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
// Casting.
static inline ExternalArray* cast(Object* obj);
// Maximal acceptable length for an external array.
static const int kMaxLength = 0x3fffffff;
// ExternalArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
};
class ExternalByteArray: public ExternalArray {
public:
// Setter and getter.
inline int8_t get(int index);
inline void set(int index, int8_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalByteArray* cast(Object* obj);
#ifdef DEBUG
void ExternalByteArrayPrint();
void ExternalByteArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
};
class ExternalUnsignedByteArray: public ExternalArray {
public:
// Setter and getter.
inline uint8_t get(int index);
inline void set(int index, uint8_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalUnsignedByteArray* cast(Object* obj);
#ifdef DEBUG
void ExternalUnsignedByteArrayPrint();
void ExternalUnsignedByteArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
};
class ExternalShortArray: public ExternalArray {
public:
// Setter and getter.
inline int16_t get(int index);
inline void set(int index, int16_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalShortArray* cast(Object* obj);
#ifdef DEBUG
void ExternalShortArrayPrint();
void ExternalShortArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
};
class ExternalUnsignedShortArray: public ExternalArray {
public:
// Setter and getter.
inline uint16_t get(int index);
inline void set(int index, uint16_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalUnsignedShortArray* cast(Object* obj);
#ifdef DEBUG
void ExternalUnsignedShortArrayPrint();
void ExternalUnsignedShortArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
};
class ExternalIntArray: public ExternalArray {
public:
// Setter and getter.
inline int32_t get(int index);
inline void set(int index, int32_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalIntArray* cast(Object* obj);
#ifdef DEBUG
void ExternalIntArrayPrint();
void ExternalIntArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
};
class ExternalUnsignedIntArray: public ExternalArray {
public:
// Setter and getter.
inline uint32_t get(int index);
inline void set(int index, uint32_t value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalUnsignedIntArray* cast(Object* obj);
#ifdef DEBUG
void ExternalUnsignedIntArrayPrint();
void ExternalUnsignedIntArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
};
class ExternalFloatArray: public ExternalArray {
public:
// Setter and getter.
inline float get(int index);
inline void set(int index, float value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
Object* SetValue(uint32_t index, Object* value);
// Casting.
static inline ExternalFloatArray* cast(Object* obj);
#ifdef DEBUG
void ExternalFloatArrayPrint();
void ExternalFloatArrayVerify();
#endif // DEBUG
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
};
// Code describes objects with on-the-fly generated machine code. // Code describes objects with on-the-fly generated machine code.
class Code: public HeapObject { class Code: public HeapObject {
public: public:
@ -3819,10 +4070,8 @@ class String: public HeapObject {
static const int kSize = kLengthOffset + kIntSize; static const int kSize = kLengthOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit. // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
// Limits on sizes of different types of strings. // Maximum number of characters to consider when trying to convert a string
static const int kMaxShortStringSize = 63; // value into an array index.
static const int kMaxMediumStringSize = 16383;
static const int kMaxArrayIndexSize = 10; static const int kMaxArrayIndexSize = 10;
// Max ascii char code. // Max ascii char code.
@ -3846,13 +4095,17 @@ class String: public HeapObject {
// field. // field.
static const int kMaxCachedArrayIndexLength = 7; static const int kMaxCachedArrayIndexLength = 7;
// Shift constants for retriving length and hash code from // Shift constants for retrieving length and hash code from
// length/hash field. // length/hash field.
static const int kHashShift = kNofLengthBitFields; static const int kHashShift = kNofLengthBitFields;
static const int kShortLengthShift = kHashShift + kShortStringTag; static const int kShortLengthShift = kHashShift + kShortStringTag;
static const int kMediumLengthShift = kHashShift + kMediumStringTag; static const int kMediumLengthShift = kHashShift + kMediumStringTag;
static const int kLongLengthShift = kHashShift + kLongStringTag; static const int kLongLengthShift = kHashShift + kLongStringTag;
// Maximal string length that can be stored in the hash/length field.
// Maximal string length that can be stored in the hash/length field for
// different types of strings.
static const int kMaxShortSize = (1 << (32 - kShortLengthShift)) - 1;
static const int kMaxMediumSize = (1 << (32 - kMediumLengthShift)) - 1;
static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1; static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
// Limit for truncation in short printing. // Limit for truncation in short printing.
@ -4466,6 +4719,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(data, Object) DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object) DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi) DECL_ACCESSORS(flag, Smi)
DECL_ACCESSORS(load_stub_cache, Object)
inline bool all_can_read(); inline bool all_can_read();
inline void set_all_can_read(bool value); inline void set_all_can_read(bool value);
@ -4491,7 +4745,8 @@ class AccessorInfo: public Struct {
static const int kDataOffset = kSetterOffset + kPointerSize; static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize; static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize; static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kSize = kFlagOffset + kPointerSize; static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize;
static const int kSize = kLoadStubCacheOffset + kPointerSize;
private: private:
// Bit positions in flag. // Bit positions in flag.
@ -4863,6 +5118,8 @@ class ObjectVisitor BASE_EMBEDDED {
// Intended for serialization/deserialization checking: insert, or // Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream. // check for the presence of, a tag at this position in the stream.
virtual void Synchronize(const char* tag) {} virtual void Synchronize(const char* tag) {}
#else
inline void Synchronize(const char* tag) {}
#endif #endif
}; };

7
deps/v8/src/platform-nullos.cc

@ -47,6 +47,13 @@ double ceiling(double x) {
} }
// Give V8 the opportunity to override the default fmod behavior.
double modulo(double x, double y) {
UNIMPLEMENTED();
return 0;
}
// Initialize OS class early in the V8 startup. // Initialize OS class early in the V8 startup.
void OS::Setup() { void OS::Setup() {
// Seed the random number generator. // Seed the random number generator.

6
deps/v8/src/platform-posix.cc

@ -54,6 +54,12 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// ----------------------------------------------------------------------------
// Math functions
double modulo(double x, double y) {
return fmod(x, y);
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// POSIX date/time support. // POSIX date/time support.

50
deps/v8/src/platform-win32.cc

@ -48,10 +48,10 @@
#ifndef NOMCX #ifndef NOMCX
#define NOMCX #define NOMCX
#endif #endif
// Require Windows 2000 or higher (this is required for the IsDebuggerPresent // Require Windows XP or higher (this is required for the RtlCaptureContext
// function to be present). // function to be present).
#ifndef _WIN32_WINNT #ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x500 #define _WIN32_WINNT 0x501
#endif #endif
#include <windows.h> #include <windows.h>
@ -223,6 +223,31 @@ double ceiling(double x) {
return ceil(x); return ceil(x);
} }
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
// Defined in codegen-x64.cc.
ModuloFunction CreateModuloFunction();
double modulo(double x, double y) {
static ModuloFunction function = CreateModuloFunction();
return function(x, y);
}
#else // Win32
double modulo(double x, double y) {
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
// dividend is a zero and divisor is nonzero finite => result equals dividend
if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
!(x == 0 && (y != 0 && isfinite(y)))) {
x = fmod(x, y);
}
return x;
}
#endif // _WIN64
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// The Time class represents time on win32. A timestamp is represented as // The Time class represents time on win32. A timestamp is represented as
// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript // a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
@ -1183,22 +1208,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// Capture current context. // Capture current context.
CONTEXT context; CONTEXT context;
memset(&context, 0, sizeof(context)); RtlCaptureContext(&context);
context.ContextFlags = CONTEXT_CONTROL;
context.ContextFlags = CONTEXT_CONTROL;
#ifdef _WIN64
// TODO(X64): Implement context capture.
#else
__asm call x
__asm x: pop eax
__asm mov context.Eip, eax
__asm mov context.Ebp, ebp
__asm mov context.Esp, esp
// NOTE: At some point, we could use RtlCaptureContext(&context) to
// capture the context instead of inline assembler. However it is
// only available on XP, Vista, Server 2003 and Server 2008 which
// might not be sufficient.
#endif
// Initialize the stack walking // Initialize the stack walking
STACKFRAME64 stack_frame; STACKFRAME64 stack_frame;
@ -1308,7 +1318,9 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
double OS::nan_value() { double OS::nan_value() {
#ifdef _MSC_VER #ifdef _MSC_VER
static const __int64 nanval = 0xfff8000000000000; // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
// in mask set, so value equals mask.
static const __int64 nanval = kQuietNaNMask;
return *reinterpret_cast<const double*>(&nanval); return *reinterpret_cast<const double*>(&nanval);
#else // _MSC_VER #else // _MSC_VER
return NAN; return NAN;

1
deps/v8/src/platform.h

@ -111,6 +111,7 @@ namespace internal {
class Semaphore; class Semaphore;
double ceiling(double x); double ceiling(double x);
double modulo(double x, double y);
// Forward declarations. // Forward declarations.
class Socket; class Socket;

16
deps/v8/src/regexp-macro-assembler.h

@ -215,22 +215,6 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
bool at_start); bool at_start);
}; };
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "RegExpCEntryStub"; }
};
#endif // V8_NATIVE_REGEXP #endif // V8_NATIVE_REGEXP
} } // namespace v8::internal } } // namespace v8::internal

178
deps/v8/src/runtime.cc

@ -156,7 +156,7 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
// Deep copy local elements. // Deep copy local elements.
// Pixel elements cannot be created using an object literal. // Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasPixelElements()); ASSERT(!copy->HasPixelElements() && !copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) { switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: { case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements()); FixedArray* elements = FixedArray::cast(copy->elements());
@ -577,8 +577,8 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
HandleScope scope; HandleScope scope;
Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global()); Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
CONVERT_ARG_CHECKED(FixedArray, pairs, 0); Handle<Context> context = args.at<Context>(0);
Handle<Context> context = args.at<Context>(1); CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
bool is_eval = Smi::cast(args[2])->value() == 1; bool is_eval = Smi::cast(args[2])->value() == 1;
// Compute the property attributes. According to ECMA-262, section // Compute the property attributes. According to ECMA-262, section
@ -1357,8 +1357,9 @@ class ReplacementStringBuilder {
StringBuilderSubstringPosition::encode(from); StringBuilderSubstringPosition::encode(from);
AddElement(Smi::FromInt(encoded_slice)); AddElement(Smi::FromInt(encoded_slice));
} else { } else {
Handle<String> slice = Factory::NewStringSlice(subject_, from, to); // Otherwise encode as two smis.
AddElement(*slice); AddElement(Smi::FromInt(-length));
AddElement(Smi::FromInt(from));
} }
IncrementCharacterCount(length); IncrementCharacterCount(length);
} }
@ -3742,14 +3743,7 @@ static Object* Runtime_NumberMod(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]); CONVERT_DOUBLE_CHECKED(y, args[1]);
#if defined WIN32 || defined _WIN64 x = modulo(x, y);
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
// dividend is a zero and divisor is nonzero finite => result equals dividend
if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
!(x == 0 && (y != 0 && isfinite(y))))
#endif
x = fmod(x, y);
// NewNumberFromDouble may return a Smi instead of a Number object // NewNumberFromDouble may return a Smi instead of a Number object
return Heap::NewNumberFromDouble(x); return Heap::NewNumberFromDouble(x);
} }
@ -3773,9 +3767,21 @@ static inline void StringBuilderConcatHelper(String* special,
for (int i = 0; i < array_length; i++) { for (int i = 0; i < array_length; i++) {
Object* element = fixed_array->get(i); Object* element = fixed_array->get(i);
if (element->IsSmi()) { if (element->IsSmi()) {
// Smi encoding of position and length.
int encoded_slice = Smi::cast(element)->value(); int encoded_slice = Smi::cast(element)->value();
int pos = StringBuilderSubstringPosition::decode(encoded_slice); int pos;
int len = StringBuilderSubstringLength::decode(encoded_slice); int len;
if (encoded_slice > 0) {
// Position and length encoded in one smi.
pos = StringBuilderSubstringPosition::decode(encoded_slice);
len = StringBuilderSubstringLength::decode(encoded_slice);
} else {
// Position and length encoded in two smis.
Object* obj = fixed_array->get(++i);
ASSERT(obj->IsSmi());
pos = Smi::cast(obj)->value();
len = -encoded_slice;
}
String::WriteToFlat(special, String::WriteToFlat(special,
sink + position, sink + position,
pos, pos,
@ -3796,6 +3802,10 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]); CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(String, special, args[1]); CONVERT_CHECKED(String, special, args[1]);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
int special_length = special->length(); int special_length = special->length();
Object* smi_array_length = array->length(); Object* smi_array_length = array->length();
if (!smi_array_length->IsSmi()) { if (!smi_array_length->IsSmi()) {
@ -3823,13 +3833,29 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
for (int i = 0; i < array_length; i++) { for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i); Object* elt = fixed_array->get(i);
if (elt->IsSmi()) { if (elt->IsSmi()) {
// Smi encoding of position and length.
int len = Smi::cast(elt)->value(); int len = Smi::cast(elt)->value();
int pos = len >> 11; if (len > 0) {
len &= 0x7ff; // Position and length encoded in one smi.
if (pos + len > special_length) { int pos = len >> 11;
return Top::Throw(Heap::illegal_argument_symbol()); len &= 0x7ff;
if (pos + len > special_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
position += len;
} else {
// Position and length encoded in two smis.
position += (-len);
// Get the position and check that it is also a smi.
i++;
if (i >= array_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
Object* pos = fixed_array->get(i);
if (!pos->IsSmi()) {
return Top::Throw(Heap::illegal_argument_symbol());
}
} }
position += len;
} else if (elt->IsString()) { } else if (elt->IsString()) {
String* element = String::cast(elt); String* element = String::cast(elt);
int element_length = element->length(); int element_length = element->length();
@ -4367,8 +4393,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
static Object* Runtime_NewClosure(Arguments args) { static Object* Runtime_NewClosure(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, boilerplate, 0); CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(Context, context, 1); CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
Handle<JSFunction> result = Handle<JSFunction> result =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromBoilerplate(boilerplate, context);
@ -4804,6 +4830,12 @@ static Object* Runtime_ReThrow(Arguments args) {
} }
static Object* Runtime_PromoteScheduledException(Arguments args) {
ASSERT_EQ(0, args.length());
return Top::PromoteScheduledException();
}
static Object* Runtime_ThrowReferenceError(Arguments args) { static Object* Runtime_ThrowReferenceError(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
@ -5273,6 +5305,47 @@ class ArrayConcatVisitor {
}; };
template<class ExternalArrayClass, class ElementType>
static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
bool elements_are_ints,
bool elements_are_guaranteed_smis,
uint32_t range,
ArrayConcatVisitor* visitor) {
Handle<ExternalArrayClass> array(
ExternalArrayClass::cast(receiver->elements()));
uint32_t len = Min(static_cast<uint32_t>(array->length()), range);
if (visitor != NULL) {
if (elements_are_ints) {
if (elements_are_guaranteed_smis) {
for (uint32_t j = 0; j < len; j++) {
Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
visitor->visit(j, e);
}
} else {
for (uint32_t j = 0; j < len; j++) {
int64_t val = static_cast<int64_t>(array->get(j));
if (Smi::IsValid(static_cast<intptr_t>(val))) {
Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
visitor->visit(j, e);
} else {
Handle<Object> e(
Heap::AllocateHeapNumber(static_cast<ElementType>(val)));
visitor->visit(j, e);
}
}
}
} else {
for (uint32_t j = 0; j < len; j++) {
Handle<Object> e(Heap::AllocateHeapNumber(array->get(j)));
visitor->visit(j, e);
}
}
}
return len;
}
/** /**
* A helper function that visits elements of a JSObject. Only elements * A helper function that visits elements of a JSObject. Only elements
* whose index between 0 and range (exclusive) are visited. * whose index between 0 and range (exclusive) are visited.
@ -5322,6 +5395,48 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
} }
break; break;
} }
case JSObject::EXTERNAL_BYTE_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalByteArray, int8_t>(
receiver, true, true, range, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
receiver, true, true, range, visitor);
break;
}
case JSObject::EXTERNAL_SHORT_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalShortArray, int16_t>(
receiver, true, true, range, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
receiver, true, true, range, visitor);
break;
}
case JSObject::EXTERNAL_INT_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalIntArray, int32_t>(
receiver, true, false, range, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
receiver, true, false, range, visitor);
break;
}
case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
num_of_elements =
IterateExternalArrayElements<ExternalFloatArray, float>(
receiver, false, false, range, visitor);
break;
}
case JSObject::DICTIONARY_ELEMENTS: { case JSObject::DICTIONARY_ELEMENTS: {
Handle<NumberDictionary> dict(receiver->element_dictionary()); Handle<NumberDictionary> dict(receiver->element_dictionary());
uint32_t capacity = dict->Capacity(); uint32_t capacity = dict->Capacity();
@ -7659,6 +7774,18 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
} }
// Returns V8 version as a string.
static Object* Runtime_GetV8Version(Arguments args) {
ASSERT_EQ(args.length(), 0);
NoHandleAllocation ha;
const char* version_string = v8::V8::GetVersion();
return Heap::AllocateStringFromAscii(CStrVector(version_string), NOT_TENURED);
}
static Object* Runtime_Abort(Arguments args) { static Object* Runtime_Abort(Arguments args) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) + OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
@ -7670,6 +7797,13 @@ static Object* Runtime_Abort(Arguments args) {
} }
static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) {
ASSERT(args.length() == 0);
HandleScope::DeleteExtensions();
return Heap::undefined_value();
}
#ifdef DEBUG #ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode // ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode. // Exclude the code in release mode.

4
deps/v8/src/runtime.h

@ -175,6 +175,7 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \ F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \ F(GetScript, 1, 1) \
F(CollectStackTrace, 2, 1) \ F(CollectStackTrace, 2, 1) \
F(GetV8Version, 0, 1) \
\ \
F(ClassOf, 1, 1) \ F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \ F(SetCode, 2, 1) \
@ -233,6 +234,7 @@ namespace internal {
F(ReThrow, 1, 1) \ F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \ F(ThrowReferenceError, 1, 1) \
F(StackGuard, 1, 1) \ F(StackGuard, 1, 1) \
F(PromoteScheduledException, 0, 1) \
\ \
/* Contexts */ \ /* Contexts */ \
F(NewContext, 1, 1) \ F(NewContext, 1, 1) \
@ -262,6 +264,8 @@ namespace internal {
F(Log, 2, 1) \ F(Log, 2, 1) \
/* ES5 */ \ /* ES5 */ \
F(LocalKeys, 1, 1) \ F(LocalKeys, 1, 1) \
/* Handle scopes */ \
F(DeleteHandleScopeExtensions, 0, 1) \
\ \
/* Pseudo functions - handled as macros by parser */ \ /* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1) F(IS_VAR, 1, 1)

5
deps/v8/src/runtime.js

@ -128,7 +128,10 @@ function COMPARE(x, ncr) {
if (IS_STRING(a) && IS_STRING(b)) { if (IS_STRING(a) && IS_STRING(b)) {
return %StringCompare(a, b); return %StringCompare(a, b);
} else { } else {
return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr); var a_number = %ToNumber(a);
var b_number = %ToNumber(b);
if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
return %NumberCompare(a_number, b_number, ncr);
} }
} }

508
deps/v8/src/serialize.cc

@ -1417,7 +1417,27 @@ void Deserializer::Synchronize(const char* tag) {
#endif #endif
class NoGlobalHandlesChecker : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {
ASSERT(false);
}
};
class GlobalHandleDestroyer : public ObjectVisitor {
void VisitPointers(Object**start, Object**end) {
while (start < end) {
GlobalHandles::Destroy(start++);
}
}
};
void Deserializer::Deserialize() { void Deserializer::Deserialize() {
// No global handles.
NoGlobalHandlesChecker checker;
GlobalHandles::IterateRoots(&checker);
// No active threads. // No active threads.
ASSERT_EQ(NULL, ThreadState::FirstInUse()); ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles. // No active handles.
@ -1428,6 +1448,10 @@ void Deserializer::Deserialize() {
GetHeader(); GetHeader();
Heap::IterateRoots(this); Heap::IterateRoots(this);
GetContextStack(); GetContextStack();
// Any global handles that have been set up by deserialization are leaked
// since noone is keeping track of them. So we discard them now.
GlobalHandleDestroyer destroyer;
GlobalHandles::IterateRoots(&destroyer);
} }
@ -1740,4 +1764,488 @@ Object* Deserializer::Resolve(Address encoded) {
} }
Deserializer2::Deserializer2(SnapshotByteSource* source)
: source_(source),
external_reference_decoder_(NULL) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
}
// This routine both allocates a new object, and also keeps
// track of where objects have been allocated so that we can
// fix back references when deserializing.
Address Deserializer2::Allocate(int space_index, int size) {
HeapObject* new_object;
int old_fullness = CurrentAllocationAddress(space_index);
// When we start a new page we need to record its location.
bool record_page = (old_fullness == 0);
if (SpaceIsPaged(space_index)) {
PagedSpace* space;
switch (space_index) {
case OLD_DATA_SPACE: space = Heap::old_data_space(); break;
case OLD_POINTER_SPACE: space = Heap::old_pointer_space(); break;
case MAP_SPACE: space = Heap::map_space(); break;
case CODE_SPACE: space = Heap::code_space(); break;
case CELL_SPACE: space = Heap::cell_space(); break;
default: UNREACHABLE(); space = NULL; break;
}
ASSERT(size <= Page::kPageSize - Page::kObjectStartOffset);
int current_page = old_fullness >> Page::kPageSizeBits;
int new_fullness = old_fullness + size;
int new_page = new_fullness >> Page::kPageSizeBits;
// What is our new position within the current page.
int intra_page_offset = new_fullness - current_page * Page::kPageSize;
if (intra_page_offset > Page::kPageSize - Page::kObjectStartOffset) {
// This object will not fit in a page and we have to move to the next.
new_page = current_page + 1;
old_fullness = new_page << Page::kPageSizeBits;
new_fullness = old_fullness + size;
record_page = true;
}
fullness_[space_index] = new_fullness;
Object* new_allocation = space->AllocateRaw(size);
new_object = HeapObject::cast(new_allocation);
ASSERT(!new_object->IsFailure());
ASSERT((reinterpret_cast<intptr_t>(new_object->address()) &
Page::kPageAlignmentMask) ==
(old_fullness & Page::kPageAlignmentMask) +
Page::kObjectStartOffset);
} else if (SpaceIsLarge(space_index)) {
ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
fullness_[LO_SPACE]++;
LargeObjectSpace* lo_space = Heap::lo_space();
Object* new_allocation;
if (space_index == kLargeData) {
new_allocation = lo_space->AllocateRaw(size);
} else if (space_index == kLargeFixedArray) {
new_allocation = lo_space->AllocateRawFixedArray(size);
} else {
ASSERT(space_index == kLargeCode);
new_allocation = lo_space->AllocateRawCode(size);
}
ASSERT(!new_allocation->IsFailure());
new_object = HeapObject::cast(new_allocation);
record_page = true;
// The page recording below records all large objects in the same space.
space_index = LO_SPACE;
} else {
ASSERT(space_index == NEW_SPACE);
Object* new_allocation = Heap::new_space()->AllocateRaw(size);
fullness_[space_index] += size;
ASSERT(!new_allocation->IsFailure());
new_object = HeapObject::cast(new_allocation);
}
Address address = new_object->address();
if (record_page) {
pages_[space_index].Add(address);
}
return address;
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* Deserializer2::GetAddress(int space) {
int offset = source_->GetInt();
if (SpaceIsLarge(space)) {
// Large spaces have one object per 'page'.
return HeapObject::FromAddress(
pages_[LO_SPACE][fullness_[LO_SPACE] - offset]);
}
offset <<= kObjectAlignmentBits;
if (space == NEW_SPACE) {
// New space has only one space - numbered 0.
return HeapObject::FromAddress(
pages_[space][0] + fullness_[space] - offset);
}
ASSERT(SpaceIsPaged(space));
int virtual_address = fullness_[space] - offset;
int page_of_pointee = (virtual_address) >> Page::kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(virtual_address & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
}
void Deserializer2::Deserialize() {
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
LinearAllocationScope allocate_linearly;
// No active threads.
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
ASSERT(external_reference_decoder_ == NULL);
external_reference_decoder_ = new ExternalReferenceDecoder();
Heap::IterateRoots(this);
ASSERT(source_->AtEOF());
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
}
// This is called on the roots. It is the driver of the deserialization
// process.
void Deserializer2::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
DataType data = static_cast<DataType>(source_->Get());
if (data == SMI_SERIALIZATION) {
*current = Smi::FromInt(source_->GetInt() - kSmiBias);
} else if (data == BACKREF_SERIALIZATION) {
int space = source_->Get();
*current = GetAddress(space);
} else {
ASSERT(data == OBJECT_SERIALIZATION);
ReadObject(current);
}
}
}
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
// written very late, which means the ByteArray map is not set up by the
// time we need to use it to mark the space at the end of a page free (by
// making it into a byte array).
bool Deserializer2::ReadObject(Object** write_back) {
int space = source_->Get();
int size = source_->GetInt() << kObjectAlignmentBits;
Address address = Allocate(space, size);
*write_back = HeapObject::FromAddress(address);
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
while (current < limit) {
DataType data = static_cast<DataType>(source_->Get());
switch (data) {
case SMI_SERIALIZATION:
*current++ = Smi::FromInt(source_->GetInt() - kSmiBias);
break;
case RAW_DATA_SERIALIZATION: {
int size = source_->GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
for (int j = 0; j < size; j++) {
*raw_data_out++ = source_->Get();
}
current = reinterpret_cast<Object**>(raw_data_out);
break;
}
case OBJECT_SERIALIZATION: {
// Recurse to unpack an object that is forward-referenced from here.
bool in_new_space = ReadObject(current);
if (in_new_space && space != NEW_SPACE) {
Heap::RecordWrite(address,
reinterpret_cast<Address>(current) - address);
}
current++;
break;
}
case CODE_OBJECT_SERIALIZATION: {
Object* new_code_object = NULL;
ReadObject(&new_code_object);
Code* code_object = reinterpret_cast<Code*>(new_code_object);
// Setting a branch/call to another code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
code_object->instruction_start());
location_of_branch_data += Assembler::kCallTargetSize;
current = reinterpret_cast<Object**>(location_of_branch_data);
break;
}
case BACKREF_SERIALIZATION: {
// Write a backreference to an object we unpacked earlier.
int backref_space = source_->Get();
if (backref_space == NEW_SPACE && space != NEW_SPACE) {
Heap::RecordWrite(address,
reinterpret_cast<Address>(current) - address);
}
*current++ = GetAddress(backref_space);
break;
}
case CODE_BACKREF_SERIALIZATION: {
int backref_space = source_->Get();
// Can't use Code::cast because heap is not set up yet and assertions
// will fail.
Code* code_object = reinterpret_cast<Code*>(GetAddress(backref_space));
// Setting a branch/call to previously decoded code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
code_object->instruction_start());
location_of_branch_data += Assembler::kCallTargetSize;
current = reinterpret_cast<Object**>(location_of_branch_data);
break;
}
case EXTERNAL_REFERENCE_SERIALIZATION: {
int reference_id = source_->GetInt();
Address address = external_reference_decoder_->Decode(reference_id);
*current++ = reinterpret_cast<Object*>(address);
break;
}
default:
UNREACHABLE();
}
}
ASSERT(current == limit);
return space == NEW_SPACE;
}
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
for (int shift = max_shift; shift > 0; shift -= 7) {
if (integer >= 1u << shift) {
Put(((integer >> shift) & 0x7f) | 0x80, "intpart");
}
}
Put(integer & 0x7f, "intlastpart");
}
#ifdef DEBUG
void Deserializer2::Synchronize(const char* tag) {
int data = source_->Get();
// If this assert fails then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
ASSERT(data == SYNCHRONIZE);
do {
int character = source_->Get();
if (character == 0) break;
if (FLAG_debug_serialization) {
PrintF("%c", character);
}
} while (true);
if (FLAG_debug_serialization) {
PrintF("\n");
}
}
void Serializer2::Synchronize(const char* tag) {
sink_->Put(SYNCHRONIZE, tag);
int character;
do {
character = *tag++;
sink_->Put(character, "tagcharacter");
} while (character != 0);
}
#endif
Serializer2::Serializer2(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
}
void Serializer2::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
ASSERT(external_reference_encoder_ == NULL);
external_reference_encoder_ = new ExternalReferenceEncoder();
Heap::IterateRoots(this);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
}
void Serializer2::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
SerializeObject(*current, TAGGED_REPRESENTATION);
}
}
void Serializer2::SerializeObject(
Object* o,
ReferenceRepresentation reference_representation) {
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
MapWord map_word = heap_object->map_word();
if (map_word.IsSerializationAddress()) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int offset =
CurrentAllocationAddress(space) - map_word.ToSerializationAddress();
// If we are actually dealing with real offsets (and not a numbering of
// all objects) then we should shift out the bits that are always 0.
if (!SpaceIsLarge(space)) offset >>= kObjectAlignmentBits;
if (reference_representation == CODE_TARGET_REPRESENTATION) {
sink_->Put(CODE_BACKREF_SERIALIZATION, "BackRefCodeSerialization");
} else {
ASSERT(reference_representation == TAGGED_REPRESENTATION);
sink_->Put(BACKREF_SERIALIZATION, "BackRefSerialization");
}
sink_->Put(space, "space");
sink_->PutInt(offset, "offset");
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
heap_object,
sink_,
reference_representation);
serializer.Serialize();
}
} else {
// Serialize a Smi.
unsigned int value = Smi::cast(o)->value() + kSmiBias;
sink_->Put(SMI_SERIALIZATION, "SmiSerialization");
sink_->PutInt(value, "smi");
}
}
void Serializer2::ObjectSerializer::Serialize() {
int space = Serializer2::SpaceOfObject(object_);
int size = object_->Size();
if (reference_representation_ == TAGGED_REPRESENTATION) {
sink_->Put(OBJECT_SERIALIZATION, "ObjectSerialization");
} else {
ASSERT(reference_representation_ == CODE_TARGET_REPRESENTATION);
sink_->Put(CODE_OBJECT_SERIALIZATION, "ObjectSerialization");
}
sink_->Put(space, "space");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
// Get the map before overwriting it.
Map* map = object_->map();
// Mark this object as already serialized.
object_->set_map_word(
MapWord::FromSerializationAddress(serializer_->Allocate(space, size)));
// Serialize the map (first word of the object).
serializer_->SerializeObject(map, TAGGED_REPRESENTATION);
// Serialize the rest of the object.
ASSERT(bytes_processed_so_far_ == 0);
bytes_processed_so_far_ = kPointerSize;
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
}
void Serializer2::ObjectSerializer::VisitPointers(Object** start,
Object** end) {
Address pointers_start = reinterpret_cast<Address>(start);
OutputRawData(pointers_start);
for (Object** current = start; current < end; current++) {
serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
}
bytes_processed_so_far_ += (end - start) * kPointerSize;
}
void Serializer2::ObjectSerializer::VisitExternalReferences(Address* start,
Address* end) {
Address references_start = reinterpret_cast<Address>(start);
OutputRawData(references_start);
for (Address* current = start; current < end; current++) {
sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "External reference");
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
}
bytes_processed_so_far_ += (end - start) * kPointerSize;
}
void Serializer2::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
bytes_processed_so_far_ += Assembler::kCallTargetSize;
}
void Serializer2::ObjectSerializer::OutputRawData(Address up_to) {
Address object_start = object_->address();
int up_to_offset = up_to - object_start;
int skipped = up_to_offset - bytes_processed_so_far_;
// This assert will fail if the reloc info gives us the target_address_address
// locations in a non-ascending order. Luckily that doesn't happen.
ASSERT(skipped >= 0);
if (skipped != 0) {
sink_->Put(RAW_DATA_SERIALIZATION, "raw data");
sink_->PutInt(skipped, "length");
for (int i = 0; i < skipped; i++) {
unsigned int data = object_start[bytes_processed_so_far_ + i];
sink_->Put(data, "byte");
}
}
bytes_processed_so_far_ += skipped;
}
int Serializer2::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
if (Heap::InSpace(object, s)) {
if (i == LO_SPACE) {
if (object->IsCode()) {
return kLargeCode;
} else if (object->IsFixedArray()) {
return kLargeFixedArray;
} else {
return kLargeData;
}
}
return i;
}
}
UNREACHABLE();
return 0;
}
int Serializer2::SpaceOfAlreadySerializedObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
if (Heap::InSpace(object, s)) {
return i;
}
}
UNREACHABLE();
return 0;
}
int Serializer2::Allocate(int space, int size) {
ASSERT(space >= 0 && space < kNumberOfSpaces);
if (SpaceIsLarge(space)) {
// In large object space we merely number the objects instead of trying to
// determine some sort of address.
return fullness_[LO_SPACE]++;
}
if (SpaceIsPaged(space)) {
// Paged spaces are a little special. We encode their addresses as if the
// pages were all contiguous and each page were filled up in the range
// 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
// and allocation does not start at offset 0 in the page, but this scheme
// means the deserializer can get the page number quickly by shifting the
// serialized address.
ASSERT(IsPowerOf2(Page::kPageSize));
int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
ASSERT(size <= Page::kObjectAreaSize);
if (used_in_this_page + size > Page::kObjectAreaSize) {
fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
}
}
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
}
} } // namespace v8::internal } } // namespace v8::internal

230
deps/v8/src/serialize.h

@ -262,7 +262,18 @@ class SnapshotReader {
// A Deserializer reads a snapshot and reconstructs the Object graph it defines. // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class Deserializer: public ObjectVisitor {
// TODO(erikcorry): Get rid of this superclass when we are using the new
// snapshot code exclusively.
class GenericDeserializer: public ObjectVisitor {
public:
virtual void GetLog() = 0;
virtual void Deserialize() = 0;
};
// TODO(erikcorry): Get rid of this class.
class Deserializer: public GenericDeserializer {
public: public:
// Create a deserializer. The snapshot is held in str and has size len. // Create a deserializer. The snapshot is held in str and has size len.
Deserializer(const byte* str, int len); Deserializer(const byte* str, int len);
@ -339,6 +350,223 @@ class Deserializer: public ObjectVisitor {
DISALLOW_COPY_AND_ASSIGN(Deserializer); DISALLOW_COPY_AND_ASSIGN(Deserializer);
}; };
class SnapshotByteSource {
public:
SnapshotByteSource(const byte* array, int length)
: data_(array), length_(length), position_(0) { }
bool HasMore() { return position_ < length_; }
int Get() {
ASSERT(position_ < length_);
return data_[position_++];
}
int GetInt() {
// A little unwind to catch the really small ints.
int snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return snapshot_byte;
}
uintptr_t accumulator = (snapshot_byte & 0x7f) << 7;
while (true) {
snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return accumulator | snapshot_byte;
}
accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
}
UNREACHABLE();
return accumulator;
}
bool AtEOF() {
return position_ == length_;
}
private:
const byte* data_;
int length_;
int position_;
};
// The SerDes class is a common superclass for Serializer2 and Deserializer2
// which is used to store common constants and methods used by both.
// TODO(erikcorry): This should inherit from ObjectVisitor.
class SerDes: public GenericDeserializer {
protected:
enum DataType {
SMI_SERIALIZATION,
RAW_DATA_SERIALIZATION,
OBJECT_SERIALIZATION,
CODE_OBJECT_SERIALIZATION,
BACKREF_SERIALIZATION,
CODE_BACKREF_SERIALIZATION,
EXTERNAL_REFERENCE_SERIALIZATION,
SYNCHRONIZE
};
// Our Smi encoding is much more efficient for small positive integers than it
// is for negative numbers so we add a bias before encoding and subtract it
// after encoding so that popular small negative Smis are efficiently encoded.
static const int kSmiBias = 16;
static const int kLargeData = LAST_SPACE;
static const int kLargeCode = kLargeData + 1;
static const int kLargeFixedArray = kLargeCode + 1;
static const int kNumberOfSpaces = kLargeFixedArray + 1;
static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class Deserializer2: public SerDes {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer2(SnapshotByteSource* source);
virtual ~Deserializer2() { }
// Deserialize the snapshot into an empty heap.
void Deserialize();
void GetLog() { } // TODO(erikcorry): Get rid of this.
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
private:
virtual void VisitPointers(Object** start, Object** end);
virtual void VisitExternalReferences(Address* start, Address* end) {
UNREACHABLE();
}
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
UNREACHABLE();
}
int CurrentAllocationAddress(int space) {
// The three different kinds of large objects have different tags in the
// snapshot so the deserializer knows which kind of object to allocate,
// but they share a fullness_ entry.
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
HeapObject* GetAddress(int space);
Address Allocate(int space, int size);
bool ReadObject(Object** write_back);
// Keep track of the pages in the paged spaces.
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
List<Address> pages_[SerDes::kNumberOfSpaces];
SnapshotByteSource* source_;
ExternalReferenceDecoder* external_reference_decoder_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
// sense in large object space.
int fullness_[LAST_SPACE + 1];
DISALLOW_COPY_AND_ASSIGN(Deserializer2);
};
class SnapshotByteSink {
public:
virtual ~SnapshotByteSink() { }
virtual void Put(int byte, const char* description) = 0;
void PutInt(uintptr_t integer, const char* description);
};
class Serializer2 : public SerDes {
public:
explicit Serializer2(SnapshotByteSink* sink);
// Serialize the current state of the heap. This operation destroys the
// heap contents.
void Serialize();
void VisitPointers(Object** start, Object** end);
void GetLog() { } // TODO(erikcorry): Get rid of this.
void Deserialize() { } // TODO(erikcorry): Get rid of this.
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
private:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer2* serializer,
Object* o,
SnapshotByteSink* sink,
ReferenceRepresentation representation)
: serializer_(serializer),
object_(HeapObject::cast(o)),
sink_(sink),
reference_representation_(representation),
bytes_processed_so_far_(0) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitExternalReferences(Address* start, Address* end);
void VisitCodeTarget(RelocInfo* target);
private:
void OutputRawData(Address up_to);
Serializer2* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
ReferenceRepresentation reference_representation_;
int bytes_processed_so_far_;
};
void SerializeObject(Object* o, ReferenceRepresentation representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
// to indicate to the deserializer what kind of large object allocation
// to make.
static int SpaceOfObject(HeapObject* object);
// This just returns the space of the object. It will return LO_SPACE
// for all large objects since you can't check the type of the object
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size);
int CurrentAllocationAddress(int space) {
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
// sense in large object space.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
friend class ObjectSerializer;
friend class Deserializer2;
DISALLOW_COPY_AND_ASSIGN(Serializer2);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SERIALIZE_H_ #endif // V8_SERIALIZE_H_

56
deps/v8/src/snapshot-common.cc

@ -32,6 +32,7 @@
#include "api.h" #include "api.h"
#include "serialize.h" #include "serialize.h"
#include "snapshot.h" #include "snapshot.h"
#include "platform.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -43,6 +44,13 @@ bool Snapshot::Deserialize(const byte* content, int len) {
} }
bool Snapshot::Deserialize2(const byte* content, int len) {
SnapshotByteSource source(content, len);
Deserializer2 deserializer(&source);
return V8::Initialize(&deserializer);
}
bool Snapshot::Initialize(const char* snapshot_file) { bool Snapshot::Initialize(const char* snapshot_file) {
if (snapshot_file) { if (snapshot_file) {
int len; int len;
@ -58,6 +66,20 @@ bool Snapshot::Initialize(const char* snapshot_file) {
} }
bool Snapshot::Initialize2(const char* snapshot_file) {
if (snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
Deserialize2(str, len);
DeleteArray(str);
} else if (size_ > 0) {
Deserialize2(data_, size_);
}
return true;
}
bool Snapshot::WriteToFile(const char* snapshot_file) { bool Snapshot::WriteToFile(const char* snapshot_file) {
Serializer ser; Serializer ser;
ser.Serialize(); ser.Serialize();
@ -72,4 +94,38 @@ bool Snapshot::WriteToFile(const char* snapshot_file) {
} }
class FileByteSink : public SnapshotByteSink {
public:
explicit FileByteSink(const char* snapshot_file) {
fp_ = OS::FOpen(snapshot_file, "wb");
if (fp_ == NULL) {
PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
exit(1);
}
}
virtual ~FileByteSink() {
if (fp_ != NULL) {
fclose(fp_);
}
}
virtual void Put(int byte, const char* description) {
if (fp_ != NULL) {
fputc(byte, fp_);
}
}
private:
FILE* fp_;
};
bool Snapshot::WriteToFile2(const char* snapshot_file) {
FileByteSink file(snapshot_file);
Serializer2 ser(&file);
ser.Serialize();
return true;
}
} } // namespace v8::internal } } // namespace v8::internal

3
deps/v8/src/snapshot.h

@ -37,6 +37,7 @@ class Snapshot {
// NULL, use the internal snapshot instead. Returns false if no snapshot // NULL, use the internal snapshot instead. Returns false if no snapshot
// could be found. // could be found.
static bool Initialize(const char* snapshot_file = NULL); static bool Initialize(const char* snapshot_file = NULL);
static bool Initialize2(const char* snapshot_file = NULL);
// Returns whether or not the snapshot is enabled. // Returns whether or not the snapshot is enabled.
static bool IsEnabled() { return size_ != 0; } static bool IsEnabled() { return size_ != 0; }
@ -44,12 +45,14 @@ class Snapshot {
// Write snapshot to the given file. Returns true if snapshot was written // Write snapshot to the given file. Returns true if snapshot was written
// successfully. // successfully.
static bool WriteToFile(const char* snapshot_file); static bool WriteToFile(const char* snapshot_file);
static bool WriteToFile2(const char* snapshot_file);
private: private:
static const byte data_[]; static const byte data_[];
static int size_; static int size_;
static bool Deserialize(const byte* content, int len); static bool Deserialize(const byte* content, int len);
static bool Deserialize2(const byte* content, int len);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
}; };

7
deps/v8/src/spaces-inl.h

@ -360,6 +360,13 @@ Object* NewSpace::AllocateRawInternal(int size_in_bytes,
return obj; return obj;
} }
bool FreeListNode::IsFreeListNode(HeapObject* object) {
return object->map() == Heap::raw_unchecked_byte_array_map()
|| object->map() == Heap::raw_unchecked_one_pointer_filler_map()
|| object->map() == Heap::raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SPACES_INL_H_ #endif // V8_SPACES_INL_H_

42
deps/v8/src/spaces.cc

@ -982,7 +982,7 @@ bool NewSpace::Setup(Address start, int size) {
// To support fast containment testing in the new space, the size of // To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size. // this chunk must be a power of two and it must be aligned to its size.
int initial_semispace_capacity = Heap::InitialSemiSpaceSize(); int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
int maximum_semispace_capacity = Heap::SemiSpaceSize(); int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity)); ASSERT(IsPowerOf2(maximum_semispace_capacity));
@ -998,7 +998,7 @@ bool NewSpace::Setup(Address start, int size) {
#undef SET_NAME #undef SET_NAME
#endif #endif
ASSERT(size == 2 * maximum_semispace_capacity); ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0)); ASSERT(IsAddressAligned(start, size, 0));
if (!to_space_.Setup(start, if (!to_space_.Setup(start,
@ -1527,7 +1527,9 @@ void FreeListNode::set_size(int size_in_bytes) {
// correct size. // correct size.
if (size_in_bytes > ByteArray::kAlignedSize) { if (size_in_bytes > ByteArray::kAlignedSize) {
set_map(Heap::raw_unchecked_byte_array_map()); set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); // Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) { } else if (size_in_bytes == kPointerSize) {
set_map(Heap::raw_unchecked_one_pointer_filler_map()); set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) { } else if (size_in_bytes == 2 * kPointerSize) {
@ -1535,13 +1537,13 @@ void FreeListNode::set_size(int size_in_bytes) {
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
ASSERT(Size() == size_in_bytes); // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
// deserialization because the byte array map is not done yet.
} }
Address FreeListNode::next() { Address FreeListNode::next() {
ASSERT(map() == Heap::raw_unchecked_byte_array_map() || ASSERT(IsFreeListNode(this));
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) { if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize); ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset); return Memory::Address_at(address() + kNextOffset);
@ -1552,8 +1554,7 @@ Address FreeListNode::next() {
void FreeListNode::set_next(Address next) { void FreeListNode::set_next(Address next) {
ASSERT(map() == Heap::raw_unchecked_byte_array_map() || ASSERT(IsFreeListNode(this));
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) { if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize); ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next; Memory::Address_at(address() + kNextOffset) = next;
@ -1830,13 +1831,16 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes); return AllocateInNextPage(current_page, size_in_bytes);
} }
// There is no next page in this space. Try free list allocation. // There is no next page in this space. Try free list allocation unless that
int wasted_bytes; // is currently forbidden.
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); if (!Heap::linear_allocation()) {
accounting_stats_.WasteBytes(wasted_bytes); int wasted_bytes;
if (!result->IsFailure()) { Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.AllocateBytes(size_in_bytes); accounting_stats_.WasteBytes(wasted_bytes);
return HeapObject::cast(result); if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
}
} }
// Free list allocation failed and there is no next page. Fail if we have // Free list allocation failed and there is no next page. Fail if we have
@ -2232,10 +2236,10 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes); return AllocateInNextPage(current_page, size_in_bytes);
} }
// There is no next page in this space. Try free list allocation. // There is no next page in this space. Try free list allocation unless
// The fixed space free list implicitly assumes that all free blocks // that is currently forbidden. The fixed space free list implicitly assumes
// are of the fixed size. // that all free blocks are of the fixed size.
if (size_in_bytes == object_size_in_bytes_) { if (!Heap::linear_allocation()) {
Object* result = free_list_.Allocate(); Object* result = free_list_.Allocate();
if (!result->IsFailure()) { if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes); accounting_stats_.AllocateBytes(size_in_bytes);

14
deps/v8/src/spaces.h

@ -862,6 +862,10 @@ class PagedSpace : public Space {
// Current capacity without growing (Size() + Available() + Waste()). // Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); } int Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
int CommittedMemory() { return Capacity(); }
// Available bytes without growing. // Available bytes without growing.
int Available() { return accounting_stats_.Available(); } int Available() { return accounting_stats_.Available(); }
@ -1252,11 +1256,19 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace. // Return the allocated bytes in the active semispace.
virtual int Size() { return top() - bottom(); } virtual int Size() { return top() - bottom(); }
// Return the current capacity of a semispace. // Return the current capacity of a semispace.
int Capacity() { int Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity()); ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity(); return to_space_.Capacity();
} }
// Return the total amount of memory committed for new space.
int CommittedMemory() {
if (from_space_.is_committed()) return 2 * Capacity();
return Capacity();
}
// Return the available bytes without growing in the active semispace. // Return the available bytes without growing in the active semispace.
int Available() { return Capacity() - Size(); } int Available() { return Capacity() - Size(); }
@ -1423,6 +1435,8 @@ class FreeListNode: public HeapObject {
return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
} }
static inline bool IsFreeListNode(HeapObject* object);
// Set the size in bytes, which can be read with HeapObject::Size(). This // Set the size in bytes, which can be read with HeapObject::Size(). This
// function also writes a map to the first word of the block so that it // function also writes a map to the first word of the block so that it
// looks like a heap object to the garbage collector and heap iteration // looks like a heap object to the garbage collector and heap iteration

2
deps/v8/src/string-stream.cc

@ -188,7 +188,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
void StringStream::PrintObject(Object* o) { void StringStream::PrintObject(Object* o) {
o->ShortPrint(this); o->ShortPrint(this);
if (o->IsString()) { if (o->IsString()) {
if (String::cast(o)->length() <= String::kMaxMediumStringSize) { if (String::cast(o)->length() <= String::kMaxMediumSize) {
return; return;
} }
} else if (o->IsNumber() || o->IsOddball()) { } else if (o->IsNumber() || o->IsOddball()) {

9
deps/v8/src/string.js

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -810,10 +810,13 @@ ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
var len = end - start; var len = end - start;
if (len == 0) return; if (len == 0) return;
var elements = this.elements; var elements = this.elements;
if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) { if (start < 0x80000 && len < 0x800) {
elements[elements.length] = (start << 11) + len; elements[elements.length] = (start << 11) + len;
} else { } else {
elements[elements.length] = SubString(this.special_string, start, end); // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
// so -len is a smi.
elements[elements.length] = -len;
elements[elements.length] = start;
} }
} }

7
deps/v8/src/stub-cache.cc

@ -735,11 +735,16 @@ Handle<Code> ComputeCallMiss(int argc) {
Object* LoadCallbackProperty(Arguments args) { Object* LoadCallbackProperty(Arguments args) {
ASSERT(args[0]->IsJSObject());
ASSERT(args[1]->IsJSObject());
AccessorInfo* callback = AccessorInfo::cast(args[2]); AccessorInfo* callback = AccessorInfo::cast(args[2]);
Address getter_address = v8::ToCData<Address>(callback->getter()); Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address); v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL); ASSERT(fun != NULL);
v8::AccessorInfo info(args.arguments()); CustomArguments custom_args(callback->data(),
JSObject::cast(args[0]),
JSObject::cast(args[1]));
v8::AccessorInfo info(custom_args.end());
HandleScope scope; HandleScope scope;
v8::Handle<v8::Value> result; v8::Handle<v8::Value> result;
{ {

57
deps/v8/src/third_party/valgrind/valgrind.h

@ -74,6 +74,7 @@
#define __VALGRIND_H #define __VALGRIND_H
#include <stdarg.h> #include <stdarg.h>
#include <stdint.h>
/* Nb: this file might be included in a file compiled with -ansi. So /* Nb: this file might be included in a file compiled with -ansi. So
we can't use C++ style "//" comments nor the "asm" keyword (instead we can't use C++ style "//" comments nor the "asm" keyword (instead
@ -232,7 +233,7 @@ typedef
typedef typedef
struct { struct {
unsigned long long int nraddr; /* where's the code? */ uint64_t nraddr; /* where's the code? */
} }
OrigFn; OrigFn;
@ -243,14 +244,14 @@ typedef
#define VALGRIND_DO_CLIENT_REQUEST( \ #define VALGRIND_DO_CLIENT_REQUEST( \
_zzq_rlval, _zzq_default, _zzq_request, \ _zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
{ volatile unsigned long long int _zzq_args[6]; \ { volatile uint64_t _zzq_args[6]; \
volatile unsigned long long int _zzq_result; \ volatile uint64_t _zzq_result; \
_zzq_args[0] = (unsigned long long int)(_zzq_request); \ _zzq_args[0] = (uint64_t)(_zzq_request); \
_zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
_zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
_zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
_zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
_zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \ /* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \ "xchgq %%rbx,%%rbx" \
@ -263,7 +264,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long long int __addr; \ volatile uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \ /* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \ "xchgq %%rcx,%%rcx" \
@ -346,8 +347,8 @@ typedef
typedef typedef
struct { struct {
unsigned long long int nraddr; /* where's the code? */ uint64_t nraddr; /* where's the code? */
unsigned long long int r2; /* what tocptr do we need? */ uint64_t r2; /* what tocptr do we need? */
} }
OrigFn; OrigFn;
@ -359,15 +360,15 @@ typedef
_zzq_rlval, _zzq_default, _zzq_request, \ _zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\ \
{ unsigned long long int _zzq_args[6]; \ { uint64_t _zzq_args[6]; \
register unsigned long long int _zzq_result __asm__("r3"); \ register uint64_t _zzq_result __asm__("r3"); \
register unsigned long long int* _zzq_ptr __asm__("r4"); \ register uint64_t* _zzq_ptr __asm__("r4"); \
_zzq_args[0] = (unsigned long long int)(_zzq_request); \ _zzq_args[0] = (uint64_t)(_zzq_request); \
_zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
_zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
_zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
_zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
_zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \ _zzq_ptr = _zzq_args; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \ /* %R3 = client_request ( %R4 ) */ \
@ -380,7 +381,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
register unsigned long long int __addr __asm__("r3"); \ register uint64_t __addr __asm__("r3"); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \ /* %R3 = guest_NRADDR */ \
"or 2,2,2" \ "or 2,2,2" \
@ -484,8 +485,8 @@ typedef
typedef typedef
struct { struct {
unsigned long long int nraddr; /* where's the code? */ uint64_t nraddr; /* where's the code? */
unsigned long long int r2; /* what tocptr do we need? */ uint64_t r2; /* what tocptr do we need? */
} }
OrigFn; OrigFn;
@ -497,9 +498,9 @@ typedef
_zzq_rlval, _zzq_default, _zzq_request, \ _zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\ \
{ unsigned long long int _zzq_args[7]; \ { uint64_t _zzq_args[7]; \
register unsigned long long int _zzq_result; \ register uint64_t _zzq_result; \
register unsigned long long int* _zzq_ptr; \ register uint64_t* _zzq_ptr; \
_zzq_args[0] = (unsigned int long long)(_zzq_request); \ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
_zzq_args[1] = (unsigned int long long)(_zzq_arg1); \ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
_zzq_args[2] = (unsigned int long long)(_zzq_arg2); \ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
@ -522,7 +523,7 @@ typedef
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
register unsigned long long int __addr; \ register uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \ /* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \ "or 2,2,2\n\t" \

4
deps/v8/src/top.h

@ -170,6 +170,10 @@ class Top {
return &thread_local_.external_caught_exception_; return &thread_local_.external_caught_exception_;
} }
static Object** scheduled_exception_address() {
return &thread_local_.scheduled_exception_;
}
static Object* scheduled_exception() { static Object* scheduled_exception() {
ASSERT(has_scheduled_exception()); ASSERT(has_scheduled_exception());
return thread_local_.scheduled_exception_; return thread_local_.scheduled_exception_;

1
deps/v8/src/v8-counters.h

@ -118,6 +118,7 @@ namespace internal {
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \ SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
/* Count how much the monomorphic keyed-load stubs are hit. */ \ /* Count how much the monomorphic keyed-load stubs are hit. */ \
SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \ SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype) \
SC(keyed_load_string_length, V8.KeyedLoadStringLength) \ SC(keyed_load_string_length, V8.KeyedLoadStringLength) \

2
deps/v8/src/v8.cc

@ -45,7 +45,7 @@ bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false; bool V8::has_been_disposed_ = false;
bool V8::has_fatal_error_ = false; bool V8::has_fatal_error_ = false;
bool V8::Initialize(Deserializer *des) { bool V8::Initialize(GenericDeserializer *des) {
bool create_heap_objects = des == NULL; bool create_heap_objects = des == NULL;
if (has_been_disposed_ || has_fatal_error_) return false; if (has_been_disposed_ || has_fatal_error_) return false;
if (IsRunning()) return true; if (IsRunning()) return true;

2
deps/v8/src/v8.h

@ -80,7 +80,7 @@ class V8 : public AllStatic {
// created from scratch. If a non-null Deserializer is given, the // created from scratch. If a non-null Deserializer is given, the
// initial state is created by reading the deserialized data into an // initial state is created by reading the deserialized data into an
// empty heap. // empty heap.
static bool Initialize(Deserializer* des); static bool Initialize(GenericDeserializer* des);
static void TearDown(); static void TearDown();
static bool IsRunning() { return is_running_; } static bool IsRunning() { return is_running_; }
// To be dead you have to have lived // To be dead you have to have lived

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1 #define MAJOR_VERSION 1
#define MINOR_VERSION 3 #define MINOR_VERSION 3
#define BUILD_NUMBER 16 #define BUILD_NUMBER 17
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

86
deps/v8/src/x64/assembler-x64.cc

@ -393,7 +393,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers, // Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large. // they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) || if ((desc.buffer_size > kMaximalBufferSize) ||
(desc.buffer_size > Heap::OldGenerationSize())) { (desc.buffer_size > Heap::MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
} }
@ -574,11 +574,11 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
emit(src.value_); emit(src.value_);
} else if (dst.is(rax)) { } else if (dst.is(rax)) {
emit(0x05 | (subcode << 3)); emit(0x05 | (subcode << 3));
emitl(src.value_); emitw(src.value_);
} else { } else {
emit(0x81); emit(0x81);
emit_modrm(subcode, dst); emit_modrm(subcode, dst);
emitl(src.value_); emitw(src.value_);
} }
} }
@ -597,7 +597,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
} else { } else {
emit(0x81); emit(0x81);
emit_operand(subcode, dst); emit_operand(subcode, dst);
emitl(src.value_); emitw(src.value_);
} }
} }
@ -1255,6 +1255,15 @@ void Assembler::movb(const Operand& dst, Register src) {
emit_operand(src, dst); emit_operand(src, dst);
} }
void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x89);
emit_operand(src, dst);
}
void Assembler::movl(Register dst, const Operand& src) { void Assembler::movl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1439,6 +1448,26 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
} }
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_32(dst, src);
emit(0x0F);
emit(0xBE);
emit_operand(dst, src);
}
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xBF);
emit_operand(dst, src);
}
void Assembler::movsxlq(Register dst, Register src) { void Assembler::movsxlq(Register dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1477,6 +1506,16 @@ void Assembler::movzxbl(Register dst, const Operand& src) {
} }
void Assembler::movzxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xB7);
emit_operand(dst, src);
}
void Assembler::movzxwl(Register dst, const Operand& src) { void Assembler::movzxwl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1970,6 +2009,14 @@ void Assembler::fstp_d(const Operand& adr) {
} }
void Assembler::fstp(int index) {
ASSERT(is_uint3(index));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_farith(0xDD, 0xD8, index);
}
void Assembler::fild_s(const Operand& adr) { void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2021,7 +2068,7 @@ void Assembler::fistp_d(const Operand& adr) {
last_pc_ = pc_; last_pc_ = pc_;
emit_optional_rex_32(adr); emit_optional_rex_32(adr);
emit(0xDF); emit(0xDF);
emit_operand(8, adr); emit_operand(7, adr);
} }
@ -2190,6 +2237,22 @@ void Assembler::fucompp() {
} }
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xDB);
emit(0xE8 + i);
}
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xDF);
emit(0xE9);
}
void Assembler::fcompp() { void Assembler::fcompp() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2258,18 +2321,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src) {
} }
void Assembler::movsd(Register dst, XMMRegister src) { void Assembler::movsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x11); // store
emit_sse_operand(src, dst);
}
void Assembler::movsd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit(0xF2); // double emit(0xF2); // double

27
deps/v8/src/x64/assembler-x64.h

@ -458,7 +458,14 @@ class Assembler : public Malloced {
// the relative displacements stored in the code. // the relative displacements stored in the code.
static inline Address target_address_at(Address pc); static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target); static inline void set_target_address_at(Address pc, Address target);
// This sets the branch destination (which is in the instruction on x64).
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
inline Handle<Object> code_target_object_handle_at(Address pc); inline Handle<Object> code_target_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
static const int kCallTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address pushed on the stack. // and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement. static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
@ -513,6 +520,10 @@ class Assembler : public Malloced {
void movb(Register dst, Immediate imm); void movb(Register dst, Immediate imm);
void movb(const Operand& dst, Register src); void movb(const Operand& dst, Register src);
// Move the low 16 bits of a 64-bit register value to a 16-bit
// memory location.
void movw(const Operand& dst, Register src);
void movl(Register dst, Register src); void movl(Register dst, Register src);
void movl(Register dst, const Operand& src); void movl(Register dst, const Operand& src);
void movl(const Operand& dst, Register src); void movl(const Operand& dst, Register src);
@ -542,10 +553,13 @@ class Assembler : public Malloced {
void movq(Register dst, ExternalReference ext); void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode); void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
void movsxbq(Register dst, const Operand& src);
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src); void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src); void movsxlq(Register dst, const Operand& src);
void movzxbq(Register dst, const Operand& src); void movzxbq(Register dst, const Operand& src);
void movzxbl(Register dst, const Operand& src); void movzxbl(Register dst, const Operand& src);
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src); void movzxwl(Register dst, const Operand& src);
// New x64 instruction to load from an immediate 64-bit pointer into RAX. // New x64 instruction to load from an immediate 64-bit pointer into RAX.
@ -913,7 +927,11 @@ class Assembler : public Malloced {
void testq(Register dst, Immediate mask); void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) { void xor_(Register dst, Register src) {
arithmetic_op(0x33, dst, src); if (dst.code() == src.code()) {
arithmetic_op_32(0x33, dst, src);
} else {
arithmetic_op(0x33, dst, src);
}
} }
void xorl(Register dst, Register src) { void xorl(Register dst, Register src) {
@ -1006,6 +1024,7 @@ class Assembler : public Malloced {
void fstp_s(const Operand& adr); void fstp_s(const Operand& adr);
void fstp_d(const Operand& adr); void fstp_d(const Operand& adr);
void fstp(int index);
void fild_s(const Operand& adr); void fild_s(const Operand& adr);
void fild_d(const Operand& adr); void fild_d(const Operand& adr);
@ -1042,6 +1061,9 @@ class Assembler : public Malloced {
void ftst(); void ftst();
void fucomp(int i); void fucomp(int i);
void fucompp(); void fucompp();
void fucomi(int i);
void fucomip();
void fcompp(); void fcompp();
void fnstsw_ax(); void fnstsw_ax();
void fwait(); void fwait();
@ -1056,8 +1078,7 @@ class Assembler : public Malloced {
// SSE2 instructions // SSE2 instructions
void movsd(const Operand& dst, XMMRegister src); void movsd(const Operand& dst, XMMRegister src);
void movsd(Register src, XMMRegister dst); void movsd(XMMRegister src, XMMRegister dst);
void movsd(XMMRegister dst, Register src);
void movsd(XMMRegister src, const Operand& dst); void movsd(XMMRegister src, const Operand& dst);
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);

78
deps/v8/src/x64/builtins-x64.cc

@ -246,6 +246,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex = const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex)); __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver); __ bind(&patch_receiver);
@ -318,47 +320,47 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(rbp, kArgumentsOffset)); __ push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
if (FLAG_check_stack) { // Check the stack for overflow or a break request.
// We need to catch preemptions right here, otherwise an unlucky preemption // We need to catch preemptions right here, otherwise an unlucky preemption
// could show up as a failed apply. // could show up as a failed apply.
Label retry_preemption; Label retry_preemption;
Label no_preemption; Label no_preemption;
__ bind(&retry_preemption); __ bind(&retry_preemption);
ExternalReference stack_guard_limit = ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit(); ExternalReference::address_of_stack_guard_limit();
__ movq(kScratchRegister, stack_guard_limit); __ movq(kScratchRegister, stack_guard_limit);
__ movq(rcx, rsp); __ movq(rcx, rsp);
__ subq(rcx, Operand(kScratchRegister, 0)); __ subq(rcx, Operand(kScratchRegister, 0));
// rcx contains the difference between the stack limit and the stack top. // rcx contains the difference between the stack limit and the stack top.
// We use it below to check that there is enough room for the arguments. // We use it below to check that there is enough room for the arguments.
__ j(above, &no_preemption); __ j(above, &no_preemption);
// Preemption! // Preemption!
// Because runtime functions always remove the receiver from the stack, we // Because runtime functions always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack. // have to fake one to avoid underflowing the stack.
__ push(rax); __ push(rax);
__ Push(Smi::FromInt(0)); __ Push(Smi::FromInt(0));
// Do call to runtime routine. // Do call to runtime routine.
__ CallRuntime(Runtime::kStackGuard, 1); __ CallRuntime(Runtime::kStackGuard, 1);
__ pop(rax); __ pop(rax);
__ jmp(&retry_preemption); __ jmp(&retry_preemption);
__ bind(&no_preemption); __ bind(&no_preemption);
Label okay; Label okay;
// Make rdx the space we need for the array when it is unrolled onto the // Make rdx the space we need for the array when it is unrolled onto the
// stack. // stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(greater, &okay); __ j(greater, &okay);
// Too bad: Out of stack space. // Too bad: Out of stack space.
__ push(Operand(rbp, kFunctionOffset)); __ push(Operand(rbp, kFunctionOffset));
__ push(rax); __ push(rax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay); __ bind(&okay);
} // End of stack check.
// Push current index and limit. // Push current index and limit.
const int kLimitOffset = const int kLimitOffset =
@ -400,6 +402,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset = const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset)); __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver. // Push the receiver.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save