Browse Source

v8: upgrade to 3.20.17

v0.11.7-release
Trevor Norris 12 years ago
parent
commit
26bc8db33f
  1. 33
      deps/v8/ChangeLog
  2. 20
      deps/v8/include/v8-profiler.h
  3. 16
      deps/v8/include/v8.h
  4. 37
      deps/v8/src/api.cc
  5. 96
      deps/v8/src/arm/lithium-arm.cc
  6. 64
      deps/v8/src/arm/lithium-arm.h
  7. 279
      deps/v8/src/arm/lithium-codegen-arm.cc
  8. 29
      deps/v8/src/arm/lithium-codegen-arm.h
  9. 7
      deps/v8/src/arm/macro-assembler-arm.cc
  10. 9
      deps/v8/src/assert-scope.h
  11. 3
      deps/v8/src/ast.cc
  12. 29
      deps/v8/src/ast.h
  13. 21
      deps/v8/src/code-stubs-hydrogen.cc
  14. 3
      deps/v8/src/code-stubs.cc
  15. 19
      deps/v8/src/compiler.cc
  16. 17
      deps/v8/src/compiler.h
  17. 2
      deps/v8/src/cpu-profiler.cc
  18. 2
      deps/v8/src/cpu-profiler.h
  19. 7
      deps/v8/src/debug.cc
  20. 293
      deps/v8/src/deoptimizer.cc
  21. 57
      deps/v8/src/deoptimizer.h
  22. 361
      deps/v8/src/effects.h
  23. 4
      deps/v8/src/execution.cc
  24. 366
      deps/v8/src/extensions/i18n/collator.cc
  25. 7
      deps/v8/src/extensions/i18n/collator.js
  26. 18
      deps/v8/src/extensions/i18n/i18n-extension.cc
  27. 418
      deps/v8/src/extensions/i18n/number-format.cc
  28. 12
      deps/v8/src/extensions/i18n/number-format.js
  29. 13
      deps/v8/src/factory.cc
  30. 6
      deps/v8/src/factory.h
  31. 2
      deps/v8/src/flag-definitions.h
  32. 511
      deps/v8/src/global-handles.cc
  33. 64
      deps/v8/src/global-handles.h
  34. 43
      deps/v8/src/heap.cc
  35. 9
      deps/v8/src/heap.h
  36. 2
      deps/v8/src/hydrogen-dce.cc
  37. 230
      deps/v8/src/hydrogen-escape-analysis.cc
  38. 35
      deps/v8/src/hydrogen-escape-analysis.h
  39. 2
      deps/v8/src/hydrogen-gvn.h
  40. 222
      deps/v8/src/hydrogen-instructions.cc
  41. 197
      deps/v8/src/hydrogen-instructions.h
  42. 27
      deps/v8/src/hydrogen-mark-deoptimize.cc
  43. 12
      deps/v8/src/hydrogen-mark-deoptimize.h
  44. 1
      deps/v8/src/hydrogen-osr.cc
  45. 10
      deps/v8/src/hydrogen-representation-changes.cc
  46. 279
      deps/v8/src/hydrogen.cc
  47. 77
      deps/v8/src/hydrogen.h
  48. 685
      deps/v8/src/i18n.cc
  49. 50
      deps/v8/src/i18n.h
  50. 55
      deps/v8/src/ia32/code-stubs-ia32.cc
  51. 342
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  52. 22
      deps/v8/src/ia32/lithium-codegen-ia32.h
  53. 122
      deps/v8/src/ia32/lithium-ia32.cc
  54. 63
      deps/v8/src/ia32/lithium-ia32.h
  55. 69
      deps/v8/src/ia32/macro-assembler-ia32.cc
  56. 13
      deps/v8/src/ia32/macro-assembler-ia32.h
  57. 2
      deps/v8/src/ia32/stub-cache-ia32.cc
  58. 1
      deps/v8/src/isolate.cc
  59. 17
      deps/v8/src/isolate.h
  60. 1
      deps/v8/src/json-stringifier.h
  61. 48
      deps/v8/src/lithium.h
  62. 1
      deps/v8/src/liveedit.cc
  63. 2
      deps/v8/src/mips/codegen-mips.cc
  64. 180
      deps/v8/src/mips/lithium-codegen-mips.cc
  65. 19
      deps/v8/src/mips/lithium-codegen-mips.h
  66. 91
      deps/v8/src/mips/lithium-mips.cc
  67. 47
      deps/v8/src/mips/lithium-mips.h
  68. 32
      deps/v8/src/mips/macro-assembler-mips.cc
  69. 14
      deps/v8/src/mips/macro-assembler-mips.h
  70. 84
      deps/v8/src/objects-printer.cc
  71. 17
      deps/v8/src/objects.cc
  72. 6
      deps/v8/src/objects.h
  73. 74
      deps/v8/src/optimizing-compiler-thread.cc
  74. 11
      deps/v8/src/optimizing-compiler-thread.h
  75. 2
      deps/v8/src/profile-generator-inl.h
  76. 6
      deps/v8/src/profile-generator.cc
  77. 2
      deps/v8/src/profile-generator.h
  78. 219
      deps/v8/src/runtime.cc
  79. 9
      deps/v8/src/runtime.h
  80. 3
      deps/v8/src/sampler.cc
  81. 9
      deps/v8/src/splay-tree-inl.h
  82. 14
      deps/v8/src/splay-tree.h
  83. 5
      deps/v8/src/types.h
  84. 236
      deps/v8/src/typing.cc
  85. 17
      deps/v8/src/typing.h
  86. 3
      deps/v8/src/v8globals.h
  87. 4
      deps/v8/src/version.cc
  88. 285
      deps/v8/src/x64/lithium-codegen-x64.cc
  89. 25
      deps/v8/src/x64/lithium-codegen-x64.h
  90. 96
      deps/v8/src/x64/lithium-x64.cc
  91. 63
      deps/v8/src/x64/lithium-x64.h
  92. 13
      deps/v8/src/x64/stub-cache-x64.cc
  93. 6
      deps/v8/src/zone-inl.h
  94. 6
      deps/v8/src/zone.h
  95. 53
      deps/v8/test/cctest/cctest.h
  96. 14
      deps/v8/test/cctest/test-api.cc
  97. 55
      deps/v8/test/cctest/test-cpu-profiler.cc
  98. 22
      deps/v8/test/cctest/test-deoptimization.cc
  99. 155
      deps/v8/test/cctest/test-global-handles.cc
  100. 1
      deps/v8/test/cctest/test-heap.cc

33
deps/v8/ChangeLog

@ -1,3 +1,36 @@
2013-08-14: Version 3.20.17
Fixed Math.round/floor that had bogus Smi representation
(Chromium issue 272564)
Performance and stability improvements on all platforms.
2013-08-13: Version 3.20.16
Fixed bug in HPhi::SimplifyConstantInput (Chromium issue 269679)
Fixed gcmole bugs in i18n code (issue 2745)
ia32: Calls to the TranscendentalCacheStub must ensure that esi is
set (issue 2827)
Made sure polymorphic element access creates non-replaying
phis. (issue 2815)
Allowed HPhis to have an invalid merge index. (issue 2815)
Fixed smi-based math floor. (Chromium issue 270268)
Deprecated self and total time getters and total sample count
getter on CpuProfileNode. (Chromium issue 267595)
Fixed Object.freeze, Object.observe wrt CountOperation and
CompoundAssignment. (issue 2774,2779)
Performance and stability improvements on all platforms.
2013-08-07: Version 3.20.14
Exposed eternal handle api.

20
deps/v8/include/v8-profiler.h

@ -61,20 +61,27 @@ class V8_EXPORT CpuProfileNode {
* Returns total (self + children) execution time of the function,
* in milliseconds, estimated by samples count.
*/
double GetTotalTime() const;
V8_DEPRECATED(double GetTotalTime() const);
/**
* Returns self execution time of the function, in milliseconds,
* estimated by samples count.
*/
double GetSelfTime() const;
V8_DEPRECATED(double GetSelfTime() const);
/** Returns the count of samples where function exists. */
double GetTotalSamplesCount() const;
V8_DEPRECATED(double GetTotalSamplesCount() const);
/** Returns the count of samples where function was currently executing. */
/** DEPRECATED. Please use GetHitCount instead.
* Returns the count of samples where function was currently executing.
*/
double GetSelfSamplesCount() const;
/**
* Returns the count of samples where the function was currently executing.
*/
unsigned GetHitCount() const;
/** Returns function entry UID. */
unsigned GetCallUid() const;
@ -192,6 +199,11 @@ class V8_EXPORT CpuProfiler {
*/
void DeleteAllCpuProfiles();
/**
* Tells the profiler whether the embedder is idle.
*/
void SetIdle(bool is_idle);
private:
CpuProfiler();
~CpuProfiler();

16
deps/v8/include/v8.h

@ -1273,13 +1273,13 @@ class V8_EXPORT StackFrame {
class V8_EXPORT JSON {
public:
/**
* Tries to parse the string |json_string| and returns it as object if
* Tries to parse the string |json_string| and returns it as value if
* successful.
*
* \param json_string The string to parse.
* \return The corresponding object if successfully parsed.
* \return The corresponding value if successfully parsed.
*/
static Local<Object> Parse(Local<String> json_string);
static Local<Value> Parse(Local<String> json_string);
};
@ -5544,14 +5544,14 @@ class Internals {
V8_INLINE(static uint8_t GetNodeFlag(internal::Object** obj, int shift)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & (1 << shift);
return *addr & static_cast<uint8_t>(1U << shift);
}
V8_INLINE(static void UpdateNodeFlag(internal::Object** obj,
bool value, int shift)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = 1 << shift;
*addr = (*addr & ~mask) | (value << shift);
uint8_t mask = static_cast<uint8_t>(1 << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
V8_INLINE(static uint8_t GetNodeState(internal::Object** obj)) {
@ -5562,7 +5562,7 @@ class Internals {
V8_INLINE(static void UpdateNodeState(internal::Object** obj,
uint8_t value)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = (*addr & ~kNodeStateMask) | value;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) {
@ -5927,7 +5927,7 @@ void ReturnValue<T>::Set(uint32_t i) {
TYPE_CHECK(T, Integer);
typedef internal::Internals I;
// Can't simply use INT32_MAX here for whatever reason.
bool fits_into_int32_t = (i & (1 << 31)) == 0;
bool fits_into_int32_t = (i & (1U << 31)) == 0;
if (V8_LIKELY(fits_into_int32_t)) {
Set(static_cast<int32_t>(i));
return;

37
deps/v8/src/api.cc

@ -781,7 +781,6 @@ void Context::Exit() {
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
isolate->set_context_exit_happened(true);
}
@ -2620,7 +2619,7 @@ bool StackFrame::IsConstructor() const {
// --- J S O N ---
Local<Object> JSON::Parse(Local<String> json_string) {
Local<Value> JSON::Parse(Local<String> json_string) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
ENTER_V8(isolate);
@ -2637,7 +2636,7 @@ Local<Object> JSON::Parse(Local<String> json_string) {
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(
i::Handle<i::JSObject>::cast(scope.CloseAndEscape(result)));
i::Handle<i::Object>::cast(scope.CloseAndEscape(result)));
}
@ -7478,8 +7477,6 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
int CpuProfileNode::GetScriptId() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptId");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
return entry->script_id();
@ -7496,8 +7493,6 @@ Handle<String> CpuProfileNode::GetScriptResourceName() const {
int CpuProfileNode::GetLineNumber() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
}
@ -7530,9 +7525,12 @@ double CpuProfileNode::GetSelfSamplesCount() const {
}
unsigned CpuProfileNode::GetHitCount() const {
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
unsigned CpuProfileNode::GetCallUid() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
}
@ -7543,15 +7541,11 @@ unsigned CpuProfileNode::GetNodeId() const {
int CpuProfileNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
@ -7572,8 +7566,6 @@ void CpuProfile::Delete() {
unsigned CpuProfile::GetUid() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
@ -7588,8 +7580,6 @@ Handle<String> CpuProfile::GetTitle() const {
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
@ -7647,6 +7637,19 @@ void CpuProfiler::DeleteAllCpuProfiles() {
}
void CpuProfiler::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
i::StateTag state = isolate->current_vm_state();
ASSERT(state == i::EXTERNAL || state == i::IDLE);
if (isolate->js_entry_sp() != NULL) return;
if (is_idle) {
isolate->set_current_vm_state(i::IDLE);
} else if (state == i::IDLE) {
isolate->set_current_vm_state(i::EXTERNAL);
}
}
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge));

96
deps/v8/src/arm/lithium-arm.cc

@ -593,8 +593,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
&argument_index_accumulator,
&objects_to_materialize));
return instr;
}
@ -813,7 +815,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
if (phi->merged_index() < last_environment->length()) {
if (phi->HasMergedIndex()) {
last_environment->SetValueAt(phi->merged_index(), phi);
}
}
@ -883,6 +885,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@ -898,11 +901,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
argument_index_accumulator,
objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@ -917,16 +922,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
needs_arguments_object_materialization = true;
op = NULL;
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@ -937,15 +942,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
if (needs_arguments_object_materialization) {
HArgumentsObject* arguments = hydrogen_env->entry() == NULL
? graph()->GetArgumentsObject()
: hydrogen_env->entry()->arguments_object();
ASSERT(arguments->IsLinked());
for (int i = 1; i < arguments->arguments_count(); ++i) {
HValue* value = arguments->arguments_values()->at(i);
ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
LOperand* op = UseAny(value);
for (int i = object_index; i < objects_to_materialize->length(); ++i) {
HValue* object_to_materialize = objects_to_materialize->at(i);
int previously_materialized_object = -1;
for (int prev = 0; prev < i; ++prev) {
if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
previously_materialized_object = prev;
break;
}
}
int length = object_to_materialize->OperandCount();
bool is_arguments = object_to_materialize->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
continue;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* value = object_to_materialize->OperandAt(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else {
ASSERT(!value->IsPushArgument());
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@ -1686,9 +1709,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(
instr->right()->representation()));
ASSERT(instr->left()->representation().Equals(r));
ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@ -1711,6 +1733,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return new(zone()) LCmpHoleAndBranch(object);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@ -2124,23 +2153,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), r0);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, r0), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
@ -2437,6 +2449,12 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
// There are no real uses of a captured object.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());

64
deps/v8/src/arm/lithium-arm.h

@ -74,6 +74,7 @@ class LCodeGen;
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@ -126,7 +127,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@ -210,7 +210,10 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false) { }
bit_field_(IsCallBits::encode(false)) {
set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@ -249,20 +252,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
// The 31 bits PositionBits is used to store the int position value. And the
// position value may be RelocInfo::kNoPosition (-1). The accessor always
// +1/-1 so that the encoded value of position in bit_field_ is always >= 0
// and can fit into the 31 bits PositionBits.
void set_position(int pos) {
bit_field_ = PositionBits::update(bit_field_, pos + 1);
}
int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
void MarkAsCall() { is_call_ = true; }
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@ -286,10 +299,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
bool is_call_;
int bit_field_;
};
@ -879,12 +895,24 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
};
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@ -1511,19 +1539,6 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedFieldPolymorphic(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@ -2718,7 +2733,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);

279
deps/v8/src/arm/lithium-codegen-arm.cc

@ -274,6 +274,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
RecordAndUpdatePosition(instr->position());
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
@ -287,6 +289,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
int pos = instructions_->at(code->instruction_index())->position();
RecordAndUpdatePosition(pos);
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@ -605,37 +611,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// TODO(mstarzinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
if (value == NULL) {
int arguments_count = environment->values()->length() - translation_size;
translation->BeginArgumentsObject(arguments_count);
for (int i = 0; i < arguments_count; ++i) {
LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(translation_size + i),
environment->HasUint32ValueAt(translation_size + i));
}
continue;
}
AddToTranslation(translation,
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32) {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer) {
if (op == LEnvironment::materialization_marker()) {
int object_index = (*object_index_pointer)++;
if (environment->ObjectIsDuplicateAt(object_index)) {
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
translation->DuplicateObject(dupe_of);
return;
}
int object_length = environment->ObjectLengthAt(object_index);
if (environment->ObjectIsArgumentsAt(object_index)) {
translation->BeginArgumentsObject(object_length);
} else {
translation->BeginCapturedObject(object_length);
}
int dematerialized_index = *dematerialized_index_pointer;
int env_offset = environment->translation_size() + dematerialized_index;
*dematerialized_index_pointer += object_length;
for (int i = 0; i < object_length; ++i) {
LOperand* value = environment->values()->at(env_offset + i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(env_offset + i),
environment->HasUint32ValueAt(env_offset + i),
object_index_pointer,
dematerialized_index_pointer);
}
return;
}
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@ -762,7 +788,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@ -785,12 +811,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
__ stop("trap_on_deopt", cc);
if (info()->ShouldTrapOnDeopt()) {
__ stop("trap_on_deopt", condition);
}
ASSERT(info()->IsStub() || frame_is_built_);
if (cc == al && frame_is_built_) {
if (condition == al && frame_is_built_) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
@ -804,17 +830,17 @@ void LCodeGen::DeoptimizeIf(Condition cc,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
}
__ b(cc, &deopt_jump_table_.last().label);
__ b(condition, &deopt_jump_table_.last().label);
}
}
void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
DeoptimizeIf(cc, environment, bailout_type);
DeoptimizeIf(condition, environment, bailout_type);
}
@ -977,6 +1003,14 @@ void LCodeGen::RecordPosition(int position) {
}
void LCodeGen::RecordAndUpdatePosition(int position) {
if (position >= 0 && position != old_position_) {
masm()->positions_recorder()->RecordPosition(position);
old_position_ = position;
}
}
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@ -2118,25 +2152,32 @@ int LCodeGen::GetNextEmittedBlock() const {
}
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
if (right_block == left_block || cc == al) {
if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
__ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
__ b(cc, chunk_->GetAssemblyLabel(left_block));
__ b(condition, chunk_->GetAssemblyLabel(left_block));
} else {
__ b(cc, chunk_->GetAssemblyLabel(left_block));
__ b(condition, chunk_->GetAssemblyLabel(left_block));
__ b(chunk_->GetAssemblyLabel(right_block));
}
}
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
int false_block = instr->FalseDestination(chunk_);
__ b(condition, chunk_->GetAssemblyLabel(false_block));
}
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ stop("LBreak");
}
@ -2392,6 +2433,26 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (instr->hydrogen()->representation().IsTagged()) {
Register input_reg = ToRegister(instr->object());
__ mov(ip, Operand(factory()->the_hole_value()));
__ cmp(input_reg, ip);
EmitBranch(instr, eq);
return;
}
DwVfpRegister input_reg = ToDoubleRegister(instr->object());
__ VFPCompareAndSetFlags(input_reg, input_reg);
EmitFalseBranch(instr, vc);
Register scratch = scratch0();
__ VmovHigh(scratch, input_reg);
__ cmp(scratch, Operand(kHoleNanUpper32));
EmitBranch(instr, eq);
}
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@ -3018,91 +3079,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
__ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
} else {
// Non-negative property indices are in the properties array.
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsConstant()) {
Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
__ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
Heap* heap = type->GetHeap();
while (*current != heap->null_value()) {
__ LoadHeapObject(result, current);
__ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
__ cmp(result, Operand(Handle<Map>(current->map())));
DeoptimizeIf(ne, env);
current =
Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
Label done;
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMap(object_map, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next;
__ b(ne, &next);
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ b(&done);
__ bind(&next);
}
}
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@ -4275,14 +4251,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
if (FLAG_debug_code && check->hydrogen()->skip_check()) {
Label done;
__ b(NegateCondition(cc), &done);
__ b(NegateCondition(condition), &done);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, check->environment());
DeoptimizeIf(condition, check->environment());
}
}
@ -4513,12 +4489,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
__ Move(r1, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
@ -4806,29 +4783,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
bool convert_hole = false;
HValue* change_input = instr->hydrogen()->value();
if (change_input->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(change_input);
convert_hole = load->UsesMustHandleHole();
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
__ VmovHigh(scratch, input_reg);
__ cmp(scratch, Operand(kHoleNanUpper32));
// If not the hole NaN, force the NaN to be canonical.
__ VFPCanonicalizeNaN(input_reg, ne);
__ b(ne, &no_special_nan_handling);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
}
__ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@ -4842,7 +4796,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
__ bind(&done);
}
@ -4882,7 +4835,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DwVfpRegister result_reg,
bool allow_undefined_as_nan,
bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
@ -4892,9 +4845,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
@ -4902,7 +4853,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
if (!allow_undefined_as_nan) {
if (!can_convert_undefined_to_nan) {
DeoptimizeIf(ne, env);
} else {
Label heap_number, convert;
@ -4911,11 +4862,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
__ b(eq, &convert);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(input_reg, Operand(ip));
}
DeoptimizeIf(ne, env);
__ bind(&convert);
@ -5056,21 +5002,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DwVfpRegister result_reg = ToDoubleRegister(result);
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
mode = NUMBER_CANDIDATE_IS_SMI;
} else if (value->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(value);
if (load->UsesMustHandleHole()) {
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
}
}
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@ -5200,7 +5137,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
Handle<Cell> cell = isolate()->factory()->NewCell(target);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
@ -5660,6 +5597,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(al, instr->environment(), type);
}

29
deps/v8/src/arm/lithium-codegen-arm.h

@ -66,7 +66,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -280,16 +281,19 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc,
void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void DeoptimizeIf(Condition condition, LEnvironment* environment);
void ApplyCheckIf(Condition condition, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32);
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -315,11 +319,14 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
bool allow_undefined_as_nan,
@ -355,12 +362,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@ -417,6 +418,8 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,

7
deps/v8/src/arm/macro-assembler-arm.cc

@ -375,17 +375,14 @@ void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
!Heap::RootCanBeWrittenAfterInitialization(index) &&
isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
!predictable_code_size()) {
Handle<Object> root(isolate()->heap()->roots_array_start()[index],
isolate());
if (!isolate()->heap()->InNewSpace(*root)) {
// The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value.
Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
mov(destination, Operand(root), LeaveCC, cond);
return;
}
}
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}

9
deps/v8/src/assert-scope.h

@ -41,6 +41,7 @@ enum PerThreadAssertType {
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
DEFERRED_HANDLE_DEREFERENCE_ASSERT,
CODE_DEPENDENCY_CHANGE_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
@ -170,6 +171,14 @@ typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
DisallowCodeDependencyChange;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_

3
deps/v8/src/ast.cc

@ -273,7 +273,8 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
uint32_t hash = literal->Hash();
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED &&
if ((property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
property->kind() == ObjectLiteral::Property::COMPUTED) &&
table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {

29
deps/v8/src/ast.h

@ -259,6 +259,7 @@ class Statement: public AstNode {
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
int statement_pos() const { return statement_pos_; }
@ -388,7 +389,7 @@ class Expression: public AstNode {
protected:
explicit Expression(Isolate* isolate)
: bounds_(Type::None(), Type::Any(), isolate),
: bounds_(Bounds::Unbounded(isolate)),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
@ -458,6 +459,11 @@ class Block: public BreakableStatement {
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
virtual bool IsJump() const {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
}
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@ -1008,6 +1014,7 @@ class ExpressionStatement: public Statement {
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
virtual bool IsJump() const { return expression_->IsThrow(); }
protected:
explicit ExpressionStatement(Expression* expression)
@ -1018,7 +1025,16 @@ class ExpressionStatement: public Statement {
};
class ContinueStatement: public Statement {
class JumpStatement: public Statement {
public:
virtual bool IsJump() const { return true; }
protected:
JumpStatement() {}
};
class ContinueStatement: public JumpStatement {
public:
DECLARE_NODE_TYPE(ContinueStatement)
@ -1033,7 +1049,7 @@ class ContinueStatement: public Statement {
};
class BreakStatement: public Statement {
class BreakStatement: public JumpStatement {
public:
DECLARE_NODE_TYPE(BreakStatement)
@ -1048,7 +1064,7 @@ class BreakStatement: public Statement {
};
class ReturnStatement: public Statement {
class ReturnStatement: public JumpStatement {
public:
DECLARE_NODE_TYPE(ReturnStatement)
@ -1167,6 +1183,11 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
virtual bool IsJump() const {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
}
BailoutId IfId() const { return if_id_; }
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }

21
deps/v8/src/code-stubs-hydrogen.cc

@ -92,7 +92,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
}
~ArrayContextChecker() {
checker_.ElseDeopt();
checker_.ElseDeopt("Array constructor called from different context");
checker_.End();
}
private:
@ -233,7 +233,7 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
builder.ElseDeopt();
builder.ElseDeopt("Forced deopt to runtime");
return undefined;
}
@ -352,7 +352,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
HValue* elements = AddLoadElements(boilerplate, NULL);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
@ -387,7 +387,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
length));
}
checker.ElseDeopt();
checker.ElseDeopt("Uninitialized boilerplate literals");
checker.End();
return environment()->Pop();
@ -434,7 +434,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
environment()->Push(object);
checker.ElseDeopt();
checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
return environment()->Pop();
@ -513,7 +513,7 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
}
@ -528,7 +528,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
}
@ -844,7 +844,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
builder.ElseDeopt();
builder.ElseDeopt("Unexpected cell contents in constant global store");
builder.End();
} else {
// Load the payload of the global parameter cell. A hole indicates that the
@ -854,7 +854,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
HValue* hole_value = Add<HConstant>(hole);
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
builder.Deopt();
builder.Deopt("Unexpected cell contents in global store");
builder.Else();
Add<HStoreNamedField>(cell, access, value);
builder.End();
@ -878,7 +878,8 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
Add<HDeoptimize>(Deoptimizer::EAGER);
Add<HDeoptimize>("Deopt due to --trace-elements-transitions",
Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();

3
deps/v8/src/code-stubs.cc

@ -469,6 +469,9 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
ASSERT(from != to);
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];

19
deps/v8/src/compiler.cc

@ -120,6 +120,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
return;
}
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
@ -446,6 +447,12 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
}
if (info()->HasAbortedDueToDependencyChange()) {
info_->set_bailout_reason(kBailedOutDueToDependencyChange);
info_->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
return SetLastStatus(SUCCEEDED);
}
@ -454,6 +461,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DisallowCodeDependencyChange no_dependency_change;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
@ -474,6 +482,8 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
{ // Scope for timer.
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
@ -485,7 +495,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
DisallowDeferredHandleDereference no_deferred_handle_deref;
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
if (info()->bailout_reason() != kNoReason) {
if (info()->bailout_reason() == kNoReason) {
info()->set_bailout_reason(kCodeGenerationFailed);
}
return AbortOptimization();
@ -815,6 +825,7 @@ static bool InstallFullCode(CompilationInfo* info) {
// was flushed. By setting the code object last we avoid this.
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Code> code = info->code();
CHECK(code->kind() == Code::FUNCTION);
Handle<JSFunction> function = info->closure();
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
@ -972,7 +983,9 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
PrintF(" ** Compilation queue full, will retry optimizing ");
closure->PrintName();
PrintF(" on next run.\n");
}
return;
}
@ -1057,7 +1070,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependentMap);
info->set_bailout_reason(kBailedOutDueToDependencyChange);
status = optimizing_compiler->AbortOptimization();
} else if (status != OptimizingCompiler::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);

17
deps/v8/src/compiler.h

@ -199,6 +199,11 @@ class CompilationInfo {
return IsCompilingForDebugging::decode(flags_);
}
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
(FLAG_trap_on_stub_deopt && IsStub());
}
bool has_global_object() const {
return !closure().is_null() &&
(closure()->context()->global_object() != NULL);
@ -293,11 +298,13 @@ class CompilationInfo {
}
void AbortDueToDependencyChange() {
mode_ = DEPENDENCY_CHANGE_ABORT;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
return mode_ == DEPENDENCY_CHANGE_ABORT;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
return abort_due_to_dependency_;
}
protected:
@ -321,8 +328,7 @@ class CompilationInfo {
BASE,
OPTIMIZE,
NONOPT,
STUB,
DEPENDENCY_CHANGE_ABORT
STUB
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
@ -396,6 +402,9 @@ class CompilationInfo {
Mode mode_;
BailoutId osr_ast_id_;
// Flag whether compilation needs to be aborted due to dependency change.
bool abort_due_to_dependency_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;

2
deps/v8/src/cpu-profiler.cc

@ -106,7 +106,7 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
bool ProfilerEventsProcessor::ProcessTicks() {
while (true) {
if (!ticks_from_vm_buffer_.IsEmpty()
while (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order ==
last_processed_code_event_id_) {
TickSampleEventRecord record;

2
deps/v8/src/cpu-profiler.h

@ -241,6 +241,7 @@ class CpuProfiler : public CodeEventListener {
ProfileGenerator* generator() const { return generator_; }
ProfilerEventsProcessor* processor() const { return processor_; }
Isolate* isolate() const { return isolate_; }
private:
void StartProcessorIfNotStarted();
@ -258,7 +259,6 @@ class CpuProfiler : public CodeEventListener {
bool need_to_stop_sampler_;
bool is_profiling_;
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};

7
deps/v8/src/debug.cc

@ -409,6 +409,9 @@ bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
HandleScope scope(debug_info_->GetIsolate());
Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
if (target_code->kind() == Code::STUB) {
return target_code->major_key() == CodeStub::CallFunction;
}
return target_code->is_call_stub() || target_code->is_keyed_call_stub();
} else {
return false;
@ -2044,6 +2047,10 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
if (FLAG_parallel_recompilation) {
isolate_->optimizing_compiler_thread()->Flush();
}
Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =

293
deps/v8/src/deoptimizer.cc

@ -602,6 +602,12 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
deferred_objects_double_values_(0),
deferred_objects_(0),
deferred_heap_numbers_(0),
jsframe_functions_(0),
jsframe_has_adapted_arguments_(0),
materialized_values_(NULL),
materialized_objects_(NULL),
materialization_value_index_(0),
materialization_object_index_(0),
trace_(false) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
@ -1208,7 +1214,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
int deferred_object_index = deferred_objects_.length();
DoTranslateCommand(iterator, frame_index, output_offset);
// The allocated receiver of a construct stub frame is passed as the
// receiver parameter through the translation. It might be encoding
// a captured object, patch the slot address for a captured object.
if (i == 0 && deferred_objects_.length() > deferred_object_index) {
ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
deferred_objects_[deferred_object_index].patch_slot_address(top_address);
}
}
// Read caller's PC from the previous frame.
@ -1633,9 +1647,93 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
int object_index = materialization_object_index_++;
ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
const int length = desc.object_length();
if (desc.duplicate_object() >= 0) {
// Found a previously materialized object by de-duplication.
object_index = desc.duplicate_object();
materialized_objects_->Add(Handle<Object>());
} else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
// Use the arguments adapter frame we just built to materialize the
// arguments object. FunctionGetArguments can't throw an exception.
Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
materialized_objects_->Add(arguments);
materialization_value_index_ += length;
} else if (desc.is_arguments()) {
// Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store.
Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
Handle<JSObject> arguments =
isolate_->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
ASSERT(array->length() == length);
arguments->set_elements(*array);
materialized_objects_->Add(arguments);
for (int i = 0; i < length; ++i) {
Handle<Object> value = MaterializeNextValue();
array->set(i, *value);
}
} else {
// Dispatch on the instance type of the object to be materialized.
Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
Handle<HeapNumber> number =
Handle<HeapNumber>::cast(MaterializeNextValue());
materialized_objects_->Add(number);
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
case JS_OBJECT_TYPE: {
Handle<JSObject> object =
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
materialized_objects_->Add(object);
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArray::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
Handle<Object> value = MaterializeNextValue();
object->FastPropertyAtPut(i, *value);
}
break;
}
default:
PrintF("[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
}
}
return materialized_objects_->at(object_index);
}
Handle<Object> Deoptimizer::MaterializeNextValue() {
int value_index = materialization_value_index_++;
Handle<Object> value = materialized_values_->at(value_index);
if (*value == isolate_->heap()->arguments_marker()) {
value = MaterializeNextHeapObject();
}
return value;
}
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
// Walk all JavaScript output frames with the given frame iterator.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
JavaScriptFrame* frame = it->frame();
jsframe_functions_.Add(handle(frame->function(), isolate_));
jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
}
// Handlify all tagged object values before triggering any allocation.
List<Handle<Object> > values(deferred_objects_tagged_values_.length());
for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
@ -1652,7 +1750,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p\n",
PrintF("Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address());
@ -1660,62 +1758,52 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
Memory::Object_at(d.slot_address()) = *num;
}
// Materialize all heap numbers required for arguments objects.
// Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < values.length(); i++) {
if (!values.at(i)->IsTheHole()) continue;
double double_value = deferred_objects_double_values_[i];
Handle<Object> num = isolate_->factory()->NewNumber(double_value);
if (trace_) {
PrintF("Materializing a new heap number %p [%e] for arguments object\n",
PrintF("Materialized a new heap number %p [%e] for object\n",
reinterpret_cast<void*>(*num), double_value);
}
values.Set(i, num);
}
// Materialize arguments objects one frame at a time.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
JavaScriptFrame* frame = it->frame();
Handle<JSFunction> function(frame->function(), isolate_);
Handle<JSObject> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
materialized_objects_ = &materialized_objects;
materialized_values_ = &values;
while (materialization_object_index_ < deferred_objects_.length()) {
int object_index = materialization_object_index_;
ObjectMaterializationDescriptor descriptor =
deferred_objects_.RemoveLast();
const int length = descriptor.object_length();
if (arguments.is_null()) {
if (frame->has_adapted_arguments()) {
// Use the arguments adapter frame we just built to materialize the
// arguments object. FunctionGetArguments can't throw an exception.
arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
values.RewindBy(length);
} else {
// Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store.
arguments =
isolate_->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array =
isolate_->factory()->NewFixedArray(length);
ASSERT(array->length() == length);
for (int i = length - 1; i >= 0 ; --i) {
array->set(i, *values.RemoveLast());
}
arguments->set_elements(*array);
}
}
frame->SetExpression(i, *arguments);
ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
deferred_objects_.at(object_index);
// Find a previously materialized object by de-duplication or
// materialize a new instance of the object if necessary. Store
// the materialized object into the frame slot.
Handle<Object> object = MaterializeNextHeapObject();
Memory::Object_at(descriptor.slot_address()) = *object;
if (trace_) {
PrintF("Materializing %sarguments object of length %d for %p: ",
frame->has_adapted_arguments() ? "(adapted) " : "",
arguments->elements()->length(),
if (descriptor.is_arguments()) {
PrintF("Materialized %sarguments object of length %d for %p: ",
ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
Handle<JSObject>::cast(object)->elements()->length(),
reinterpret_cast<void*>(descriptor.slot_address()));
} else {
PrintF("Materialized captured object of size %d for %p: ",
Handle<HeapObject>::cast(object)->Size(),
reinterpret_cast<void*>(descriptor.slot_address()));
arguments->ShortPrint();
PrintF("\n");
}
object->ShortPrint();
PrintF("\n");
}
}
ASSERT(materialization_object_index_ == materialized_objects_->length());
ASSERT(materialization_value_index_ == materialized_values_->length());
}
}
@ -1786,10 +1874,10 @@ static const char* TraceValueType(bool is_smi, bool is_native = false) {
void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int object_opcode,
int object_index,
int field_index) {
disasm::NameConverter converter;
Address object_slot = deferred_objects_.last().slot_address();
Address object_slot = deferred_objects_[object_index].slot_address();
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
@ -1802,7 +1890,6 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
case Translation::ARGUMENTS_OBJECT:
UNREACHABLE();
return;
@ -1972,6 +2059,50 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
AddObjectTaggedValue(value);
return;
}
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
if (trace_) {
PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
AddObjectDuplication(0, object_index);
AddObjectTaggedValue(value);
return;
}
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
if (trace_) {
PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
AddObjectStart(0, length, is_args);
AddObjectTaggedValue(value);
// We save the object values on the side and materialize the actual
// object after the deoptimized frame is built.
int object_index = deferred_objects_.length() - 1;
for (int i = 0; i < length; i++) {
DoTranslateObject(iterator, object_index, i);
}
return;
}
}
}
@ -2211,25 +2342,48 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
case Translation::ARGUMENTS_OBJECT: {
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
object_index);
output_[frame_index]->SetFrameSlot(output_offset, value);
return;
}
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; arguments object (length = %d)\n", length);
PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
AddObjectStart(output_[frame_index]->GetTop() + output_offset, length);
AddObjectStart(output_[frame_index]->GetTop() + output_offset,
length, is_args);
output_[frame_index]->SetFrameSlot(output_offset, value);
// We save the argument values on the side and materialize the actual
// arguments object after the deoptimized frame is built.
// We save the object values on the side and materialize the actual
// object after the deoptimized frame is built.
int object_index = deferred_objects_.length() - 1;
for (int i = 0; i < length; i++) {
DoTranslateObject(iterator, Translation::ARGUMENTS_OBJECT, i);
DoTranslateObject(iterator, object_index, i);
}
return;
}
@ -2406,7 +2560,9 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
case Translation::ARGUMENTS_OBJECT: {
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
// Optimized code assumes that the argument object has not been
// materialized and so bypasses it when doing arguments access.
// We should have bailed out before starting the frame
@ -2554,9 +2710,16 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
}
void Deoptimizer::AddObjectStart(intptr_t slot_address, int length) {
void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
ObjectMaterializationDescriptor object_desc(
reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
deferred_objects_.Add(object_desc);
}
void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
ObjectMaterializationDescriptor object_desc(
reinterpret_cast<Address>(slot_address), length);
reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
deferred_objects_.Add(object_desc);
}
@ -2784,6 +2947,18 @@ void Translation::BeginArgumentsObject(int args_length) {
}
void Translation::BeginCapturedObject(int length) {
buffer_->Add(CAPTURED_OBJECT, zone());
buffer_->Add(length, zone());
}
void Translation::DuplicateObject(int object_index) {
buffer_->Add(DUPLICATED_OBJECT, zone());
buffer_->Add(object_index, zone());
}
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@ -2852,7 +3027,9 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
case ARGUMENTS_OBJECT:
case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
@ -2912,8 +3089,12 @@ const char* Translation::StringFor(Opcode opcode) {
return "DOUBLE_STACK_SLOT";
case LITERAL:
return "LITERAL";
case DUPLICATED_OBJECT:
return "DUPLICATED_OBJECT";
case ARGUMENTS_OBJECT:
return "ARGUMENTS_OBJECT";
case CAPTURED_OBJECT:
return "CAPTURED_OBJECT";
}
UNREACHABLE();
return "";
@ -2957,7 +3138,9 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
// Peeled off before getting here.
break;
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;

57
deps/v8/src/deoptimizer.h

@ -77,15 +77,31 @@ class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ObjectMaterializationDescriptor(Address slot_address, int length)
: slot_address_(slot_address), object_length_(length) { }
ObjectMaterializationDescriptor(
Address slot_address, int frame, int length, int duplicate, bool is_args)
: slot_address_(slot_address),
jsframe_index_(frame),
object_length_(length),
duplicate_object_(duplicate),
is_arguments_(is_args) { }
Address slot_address() const { return slot_address_; }
int jsframe_index() const { return jsframe_index_; }
int object_length() const { return object_length_; }
int duplicate_object() const { return duplicate_object_; }
bool is_arguments() const { return is_arguments_; }
// Only used for allocated receivers in DoComputeConstructStubFrame.
void patch_slot_address(intptr_t slot) {
slot_address_ = reinterpret_cast<Address>(slot);
}
private:
Address slot_address_;
int jsframe_index_;
int object_length_;
int duplicate_object_;
bool is_arguments_;
};
@ -372,7 +388,7 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoTranslateObject(TranslationIterator* iterator,
int object_opcode,
int object_index,
int field_index);
enum DeoptimizerTranslatedValueType {
@ -400,11 +416,28 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
void AddObjectStart(intptr_t slot_address, int argc);
void AddObjectStart(intptr_t slot_address, int argc, bool is_arguments);
void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
void AddDoubleValue(intptr_t slot_address, double value);
bool ArgumentsObjectIsAdapted(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
return jsframe_has_adapted_arguments_[reverse_jsframe_index];
}
Handle<JSFunction> ArgumentsObjectFunction(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
return jsframe_functions_[reverse_jsframe_index];
}
// Helper function for heap object materialization.
Handle<Object> MaterializeNextHeapObject();
Handle<Object> MaterializeNextValue();
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@ -455,10 +488,22 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
// Deferred values to be materialized.
List<Object*> deferred_objects_tagged_values_;
List<double> deferred_objects_double_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
List<bool> jsframe_has_adapted_arguments_;
// Materialized objects. Only used during heap object materialization.
List<Handle<Object> >* materialized_values_;
List<Handle<Object> >* materialized_objects_;
int materialization_value_index_;
int materialization_object_index_;
#ifdef DEBUG
DisallowHeapAllocation* disallow_heap_allocation_;
#endif // DEBUG
@ -712,7 +757,9 @@ class Translation BASE_EMBEDDED {
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
DUPLICATED_OBJECT,
ARGUMENTS_OBJECT,
CAPTURED_OBJECT,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@ -744,6 +791,8 @@ class Translation BASE_EMBEDDED {
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
void BeginArgumentsObject(int args_length);
void BeginCapturedObject(int length);
void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);

361
deps/v8/src/effects.h

@ -0,0 +1,361 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_EFFECTS_H_
#define V8_EFFECTS_H_
#include "v8.h"
#include "types.h"
namespace v8 {
namespace internal {
// A simple struct to represent (write) effects. A write is represented as a
// modification of type bounds (e.g. of a variable).
//
// An effect can either be definite, if the write is known to have taken place,
// or 'possible', if it was optional. The difference is relevant when composing
// effects.
//
// There are two ways to compose effects: sequentially (they happen one after
// the other) or alternatively (either one or the other happens). A definite
// effect cancels out any previous effect upon sequencing. A possible effect
// merges into a previous effect, i.e., type bounds are merged. Alternative
// composition always merges bounds. It yields a possible effect if at least
// one was only possible.
struct Effect {
enum Modality { POSSIBLE, DEFINITE };
Modality modality;
Bounds bounds;
Effect() {}
Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
// The unknown effect.
static Effect Unknown(Isolate* isolate) {
return Effect(Bounds::Unbounded(isolate), POSSIBLE);
}
static Effect Forget(Isolate* isolate) {
return Effect(Bounds::Unbounded(isolate), DEFINITE);
}
// Sequential composition, as in 'e1; e2'.
static Effect Seq(Effect e1, Effect e2, Isolate* isolate) {
if (e2.modality == DEFINITE) return e2;
return Effect(Bounds::Either(e1.bounds, e2.bounds, isolate), e1.modality);
}
// Alternative composition, as in 'cond ? e1 : e2'.
static Effect Alt(Effect e1, Effect e2, Isolate* isolate) {
return Effect(
Bounds::Either(e1.bounds, e2.bounds, isolate),
e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
}
};
// Classes encapsulating sets of effects on variables.
//
// Effects maps variables to effects and supports sequential and alternative
// composition.
//
// NestedEffects is an incremental representation that supports persistence
// through functional extension. It represents the map as an adjoin of a list
// of maps, whose tail can be shared.
//
// Both classes provide similar interfaces, implemented in parts through the
// EffectsMixin below (using sandwich style, to work around the style guide's
// MI restriction).
//
// We also (ab)use Effects/NestedEffects as a representation for abstract
// store typings. In that case, only definite effects are of interest.
template<class Var, class Base, class Effects>
class EffectsMixin: public Base {
public:
explicit EffectsMixin(Zone* zone) : Base(zone) {}
Effect Lookup(Var var) {
Locator locator;
return this->Find(var, &locator)
? locator.value() : Effect::Unknown(Base::isolate());
}
Bounds LookupBounds(Var var) {
Effect effect = Lookup(var);
return effect.modality == Effect::DEFINITE
? effect.bounds : Bounds::Unbounded(Base::isolate());
}
// Sequential composition.
void Seq(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
effect = Effect::Seq(locator.value(), effect, Base::isolate());
}
locator.set_value(effect);
}
void Seq(Effects that) {
SeqMerger<EffectsMixin> merge = { *this };
that.ForEach(&merge);
}
// Alternative composition.
void Alt(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
effect = Effect::Alt(locator.value(), effect, Base::isolate());
}
locator.set_value(effect);
}
void Alt(Effects that) {
AltWeakener<EffectsMixin> weaken = { *this, that };
this->ForEach(&weaken);
AltMerger<EffectsMixin> merge = { *this };
that.ForEach(&merge);
}
// Invalidation.
void Forget() {
Overrider override = {
Effect::Forget(Base::isolate()), Effects(Base::zone()) };
this->ForEach(&override);
Seq(override.effects);
}
protected:
typedef typename Base::Locator Locator;
template<class Self>
struct SeqMerger {
void Call(Var var, Effect effect) { self.Seq(var, effect); }
Self self;
};
template<class Self>
struct AltMerger {
void Call(Var var, Effect effect) { self.Alt(var, effect); }
Self self;
};
template<class Self>
struct AltWeakener {
void Call(Var var, Effect effect) {
if (effect.modality == Effect::DEFINITE && !other.Contains(var)) {
effect.modality = Effect::POSSIBLE;
Locator locator;
self.Insert(var, &locator);
locator.set_value(effect);
}
}
Self self;
Effects other;
};
struct Overrider {
void Call(Var var, Effect effect) { effects.Seq(var, new_effect); }
Effect new_effect;
Effects effects;
};
};
template<class Var, Var kNoVar> class Effects;
template<class Var, Var kNoVar> class NestedEffectsBase;
template<class Var, Var kNoVar>
class EffectsBase {
public:
explicit EffectsBase(Zone* zone) : map_(new(zone) Mapping(zone)) {}
bool IsEmpty() { return map_->is_empty(); }
protected:
friend class NestedEffectsBase<Var, kNoVar>;
friend class
EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
Zone* zone() { return map_->allocator().zone(); }
Isolate* isolate() { return zone()->isolate(); }
struct SplayTreeConfig {
typedef Var Key;
typedef Effect Value;
static const Var kNoKey = kNoVar;
static Effect NoValue() { return Effect(); }
static int Compare(int x, int y) { return y - x; }
};
typedef ZoneSplayTree<SplayTreeConfig> Mapping;
typedef typename Mapping::Locator Locator;
bool Contains(Var var) {
ASSERT(var != kNoVar);
return map_->Contains(var);
}
bool Find(Var var, Locator* locator) {
ASSERT(var != kNoVar);
return map_->Find(var, locator);
}
bool Insert(Var var, Locator* locator) {
ASSERT(var != kNoVar);
return map_->Insert(var, locator);
}
template<class Callback>
void ForEach(Callback* callback) {
return map_->ForEach(callback);
}
private:
Mapping* map_;
};
template<class Var, Var kNoVar>
const Var EffectsBase<Var, kNoVar>::SplayTreeConfig::kNoKey;
template<class Var, Var kNoVar>
class Effects: public
EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
public:
explicit Effects(Zone* zone)
: EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(zone)
{}
};
template<class Var, Var kNoVar>
class NestedEffectsBase {
public:
explicit NestedEffectsBase(Zone* zone) : node_(new(zone) Node(zone)) {}
template<class Callback>
void ForEach(Callback* callback) {
if (node_->previous) NestedEffectsBase(node_->previous).ForEach(callback);
node_->effects.ForEach(callback);
}
Effects<Var, kNoVar> Top() { return node_->effects; }
bool IsEmpty() {
for (Node* node = node_; node != NULL; node = node->previous) {
if (!node->effects.IsEmpty()) return false;
}
return true;
}
protected:
typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
Zone* zone() { return node_->zone; }
Isolate* isolate() { return zone()->isolate(); }
void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
void pop() { node_ = node_->previous; }
bool is_empty() { return node_ == NULL; }
bool Contains(Var var) {
ASSERT(var != kNoVar);
for (Node* node = node_; node != NULL; node = node->previous) {
if (node->effects.Contains(var)) return true;
}
return false;
}
bool Find(Var var, Locator* locator) {
ASSERT(var != kNoVar);
for (Node* node = node_; node != NULL; node = node->previous) {
if (node->effects.Find(var, locator)) return true;
}
return false;
}
bool Insert(Var var, Locator* locator);
private:
struct Node: ZoneObject {
Zone* zone;
Effects<Var, kNoVar> effects;
Node* previous;
explicit Node(Zone* zone, Node* previous = NULL)
: zone(zone), effects(zone), previous(previous) {}
};
explicit NestedEffectsBase(Node* node) : node_(node) {}
Node* node_;
};
template<class Var, Var kNoVar>
bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) {
ASSERT(var != kNoVar);
if (!node_->effects.Insert(var, locator)) return false;
Locator shadowed;
for (Node* node = node_->previous; node != NULL; node = node->previous) {
if (node->effects.Find(var, &shadowed)) {
// Initialize with shadowed entry.
locator->set_value(shadowed.value());
return false;
}
}
return true;
}
template<class Var, Var kNoVar>
class NestedEffects: public
EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
public:
explicit NestedEffects(Zone* zone) :
EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(
zone) {}
// Create an extension of the current effect set. The current set should not
// be modified while the extension is in use.
NestedEffects Push() {
NestedEffects result = *this;
result.push();
return result;
}
NestedEffects Pop() {
NestedEffects result = *this;
result.pop();
ASSERT(!this->is_empty());
return result;
}
};
} } // namespace v8::internal
#endif // V8_EFFECTS_H_

4
deps/v8/src/execution.cc

@ -206,10 +206,12 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetCaptureMessage(false);
*caught_exception = false;
// Get isolate now, because handle might be persistent
// and get destroyed in the next call.
Isolate* isolate = func->GetIsolate();
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
Isolate* isolate = func->GetIsolate();
if (*caught_exception) {
ASSERT(catcher.HasCaught());
ASSERT(isolate->has_pending_exception());

366
deps/v8/src/extensions/i18n/collator.cc

@ -1,366 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#include "collator.h"
#include "i18n-utils.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
#include "unicode/ucol.h"
namespace v8_i18n {
static icu::Collator* InitializeCollator(
v8::Handle<v8::String>, v8::Handle<v8::Object>, v8::Handle<v8::Object>);
static icu::Collator* CreateICUCollator(
const icu::Locale&, v8::Handle<v8::Object>);
static bool SetBooleanAttribute(
UColAttribute, const char*, v8::Handle<v8::Object>, icu::Collator*);
static void SetResolvedSettings(
const icu::Locale&, icu::Collator*, v8::Handle<v8::Object>);
static void SetBooleanSetting(
UColAttribute, icu::Collator*, const char*, v8::Handle<v8::Object>);
icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
v8::HandleScope handle_scope;
if (obj->HasOwnProperty(v8::String::New("collator"))) {
return static_cast<icu::Collator*>(
obj->GetAlignedPointerFromInternalField(0));
}
return NULL;
}
void Collator::DeleteCollator(v8::Isolate* isolate,
v8::Persistent<v8::Object>* object,
void* param) {
// First delete the hidden C++ object.
// Unpacking should never return NULL here. That would only happen if
// this method is used as the weak callback for persistent handles not
// pointing to a collator.
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
delete UnpackCollator(handle);
// Then dispose of the persistent handle to JS object.
object->Dispose(isolate);
}
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
return v8::ThrowException(v8::Exception::Error(
v8::String::New("Collator method called on an object "
"that is not a Collator.")));
}
// When there's an ICU error, throw a JavaScript error with |message|.
static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
}
// static
void Collator::JSInternalCompare(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsObject() ||
!args[1]->IsString() || !args[2]->IsString()) {
v8::ThrowException(v8::Exception::SyntaxError(
v8::String::New("Collator and two string arguments are required.")));
return;
}
icu::Collator* collator = UnpackCollator(args[0]->ToObject());
if (!collator) {
ThrowUnexpectedObjectError();
return;
}
v8::String::Value string_value1(args[1]);
v8::String::Value string_value2(args[2]);
const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
UErrorCode status = U_ZERO_ERROR;
UCollationResult result = collator->compare(
string1, string_value1.length(), string2, string_value2.length(), status);
if (U_FAILURE(status)) {
ThrowExceptionForICUError(
"Internal error. Unexpected failure in Collator.compare.");
return;
}
args.GetReturnValue().Set(result);
}
void Collator::JSCreateCollator(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
!args[2]->IsObject()) {
v8::ThrowException(v8::Exception::SyntaxError(
v8::String::New("Internal error, wrong parameters.")));
return;
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::ObjectTemplate> intl_collator_template =
Utils::GetTemplate(isolate);
// Create an empty object wrapper.
v8::Local<v8::Object> local_object = intl_collator_template->NewInstance();
// But the handle shouldn't be empty.
// That can happen if there was a stack overflow when creating the object.
if (local_object.IsEmpty()) {
args.GetReturnValue().Set(local_object);
return;
}
// Set collator as internal field of the resulting JS object.
icu::Collator* collator = InitializeCollator(
args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
if (!collator) {
v8::ThrowException(v8::Exception::Error(v8::String::New(
"Internal error. Couldn't create ICU collator.")));
return;
} else {
local_object->SetAlignedPointerInInternalField(0, collator);
// Make it safer to unpack later on.
v8::TryCatch try_catch;
local_object->Set(v8::String::New("collator"), v8::String::New("valid"));
if (try_catch.HasCaught()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Internal error, couldn't set property.")));
return;
}
}
v8::Persistent<v8::Object> wrapper(isolate, local_object);
// Make object handle weak so we can delete iterator once GC kicks in.
wrapper.MakeWeak<void>(NULL, &DeleteCollator);
args.GetReturnValue().Set(wrapper);
wrapper.ClearAndLeak();
}
static icu::Collator* InitializeCollator(v8::Handle<v8::String> locale,
v8::Handle<v8::Object> options,
v8::Handle<v8::Object> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
v8::String::AsciiValue bcp47_locale(locale);
if (bcp47_locale.length() != 0) {
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
return NULL;
}
icu_locale = icu::Locale(icu_result);
}
icu::Collator* collator = CreateICUCollator(icu_locale, options);
if (!collator) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
collator = CreateICUCollator(no_extension_locale, options);
// Set resolved settings (pattern, numbering system).
SetResolvedSettings(no_extension_locale, collator, resolved);
} else {
SetResolvedSettings(icu_locale, collator, resolved);
}
return collator;
}
static icu::Collator* CreateICUCollator(
const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
// Make collator from options.
icu::Collator* collator = NULL;
UErrorCode status = U_ZERO_ERROR;
collator = icu::Collator::createInstance(icu_locale, status);
if (U_FAILURE(status)) {
delete collator;
return NULL;
}
// Set flags first, and then override them with sensitivity if necessary.
SetBooleanAttribute(UCOL_NUMERIC_COLLATION, "numeric", options, collator);
// Normalization is always on, by the spec. We are free to optimize
// if the strings are already normalized (but we don't have a way to tell
// that right now).
collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
icu::UnicodeString case_first;
if (Utils::ExtractStringSetting(options, "caseFirst", &case_first)) {
if (case_first == UNICODE_STRING_SIMPLE("upper")) {
collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
} else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
} else {
// Default (false/off).
collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
}
}
icu::UnicodeString sensitivity;
if (Utils::ExtractStringSetting(options, "sensitivity", &sensitivity)) {
if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
collator->setStrength(icu::Collator::PRIMARY);
} else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
collator->setStrength(icu::Collator::SECONDARY);
} else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
collator->setStrength(icu::Collator::PRIMARY);
collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
} else {
// variant (default)
collator->setStrength(icu::Collator::TERTIARY);
}
}
bool ignore;
if (Utils::ExtractBooleanSetting(options, "ignorePunctuation", &ignore)) {
if (ignore) {
collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
}
}
return collator;
}
static bool SetBooleanAttribute(UColAttribute attribute,
const char* name,
v8::Handle<v8::Object> options,
icu::Collator* collator) {
UErrorCode status = U_ZERO_ERROR;
bool result;
if (Utils::ExtractBooleanSetting(options, name, &result)) {
collator->setAttribute(attribute, result ? UCOL_ON : UCOL_OFF, status);
if (U_FAILURE(status)) {
return false;
}
}
return true;
}
static void SetResolvedSettings(const icu::Locale& icu_locale,
icu::Collator* collator,
v8::Handle<v8::Object> resolved) {
SetBooleanSetting(UCOL_NUMERIC_COLLATION, collator, "numeric", resolved);
UErrorCode status = U_ZERO_ERROR;
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
resolved->Set(v8::String::New("caseFirst"), v8::String::New("lower"));
break;
case UCOL_UPPER_FIRST:
resolved->Set(v8::String::New("caseFirst"), v8::String::New("upper"));
break;
default:
resolved->Set(v8::String::New("caseFirst"), v8::String::New("false"));
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
resolved->Set(v8::String::New("strength"), v8::String::New("primary"));
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
resolved->Set(v8::String::New("sensitivity"), v8::String::New("case"));
} else {
resolved->Set(v8::String::New("sensitivity"), v8::String::New("base"));
}
break;
}
case UCOL_SECONDARY:
resolved->Set(v8::String::New("strength"), v8::String::New("secondary"));
resolved->Set(v8::String::New("sensitivity"), v8::String::New("accent"));
break;
case UCOL_TERTIARY:
resolved->Set(v8::String::New("strength"), v8::String::New("tertiary"));
resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
resolved->Set(v8::String::New("strength"), v8::String::New("quaternary"));
resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
break;
default:
resolved->Set(v8::String::New("strength"), v8::String::New("identical"));
resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
}
if (UCOL_SHIFTED == collator->getAttribute(UCOL_ALTERNATE_HANDLING, status)) {
resolved->Set(v8::String::New("ignorePunctuation"),
v8::Boolean::New(true));
} else {
resolved->Set(v8::String::New("ignorePunctuation"),
v8::Boolean::New(false));
}
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
resolved->Set(v8::String::New("locale"), v8::String::New(result));
} else {
// This would never happen, since we got the locale from ICU.
resolved->Set(v8::String::New("locale"), v8::String::New("und"));
}
}
static void SetBooleanSetting(UColAttribute attribute,
icu::Collator* collator,
const char* property,
v8::Handle<v8::Object> resolved) {
UErrorCode status = U_ZERO_ERROR;
if (UCOL_ON == collator->getAttribute(attribute, status)) {
resolved->Set(v8::String::New(property), v8::Boolean::New(true));
} else {
resolved->Set(v8::String::New(property), v8::Boolean::New(false));
}
}
} // namespace v8_i18n

7
deps/v8/src/extensions/i18n/collator.js

@ -35,8 +35,6 @@
* Useful for subclassing.
*/
function initializeCollator(collator, locales, options) {
native function NativeJSCreateCollator();
if (collator.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize Collator object.');
}
@ -103,7 +101,7 @@ function initializeCollator(collator, locales, options) {
usage: {value: internalOptions.usage, writable: true}
});
var internalCollator = NativeJSCreateCollator(requestedLocale,
var internalCollator = %CreateCollator(requestedLocale,
internalOptions,
resolved);
@ -204,8 +202,7 @@ function initializeCollator(collator, locales, options) {
* the sort order, or x comes after y in the sort order, respectively.
*/
function compare(collator, x, y) {
native function NativeJSInternalCompare();
return NativeJSInternalCompare(collator.collator, String(x), String(y));
return %InternalCompare(collator.collator, String(x), String(y));
};

18
deps/v8/src/extensions/i18n/i18n-extension.cc

@ -29,9 +29,7 @@
#include "i18n-extension.h"
#include "break-iterator.h"
#include "collator.h"
#include "natives.h"
#include "number-format.h"
using v8::internal::I18NNatives;
@ -47,22 +45,6 @@ Extension::Extension()
v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
v8::Handle<v8::String> name) {
// Number format and parse.
if (name->Equals(v8::String::New("NativeJSCreateNumberFormat"))) {
return v8::FunctionTemplate::New(NumberFormat::JSCreateNumberFormat);
} else if (name->Equals(v8::String::New("NativeJSInternalNumberFormat"))) {
return v8::FunctionTemplate::New(NumberFormat::JSInternalFormat);
} else if (name->Equals(v8::String::New("NativeJSInternalNumberParse"))) {
return v8::FunctionTemplate::New(NumberFormat::JSInternalParse);
}
// Collator.
if (name->Equals(v8::String::New("NativeJSCreateCollator"))) {
return v8::FunctionTemplate::New(Collator::JSCreateCollator);
} else if (name->Equals(v8::String::New("NativeJSInternalCompare"))) {
return v8::FunctionTemplate::New(Collator::JSInternalCompare);
}
// Break iterator.
if (name->Equals(v8::String::New("NativeJSCreateBreakIterator"))) {
return v8::FunctionTemplate::New(BreakIterator::JSCreateBreakIterator);

418
deps/v8/src/extensions/i18n/number-format.cc

@ -1,418 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// limitations under the License.
#include "number-format.h"
#include <string.h>
#include "i18n-utils.h"
#include "unicode/curramt.h"
#include "unicode/dcfmtsym.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/uchar.h"
#include "unicode/ucurr.h"
#include "unicode/unum.h"
#include "unicode/uversion.h"
namespace v8_i18n {
static icu::DecimalFormat* InitializeNumberFormat(v8::Handle<v8::String>,
v8::Handle<v8::Object>,
v8::Handle<v8::Object>);
static icu::DecimalFormat* CreateICUNumberFormat(const icu::Locale&,
v8::Handle<v8::Object>);
static void SetResolvedSettings(const icu::Locale&,
icu::DecimalFormat*,
v8::Handle<v8::Object>);
icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
v8::Handle<v8::Object> obj) {
v8::HandleScope handle_scope;
// v8::ObjectTemplate doesn't have HasInstance method so we can't check
// if obj is an instance of NumberFormat class. We'll check for a property
// that has to be in the object. The same applies to other services, like
// Collator and DateTimeFormat.
if (obj->HasOwnProperty(v8::String::New("numberFormat"))) {
return static_cast<icu::DecimalFormat*>(
obj->GetAlignedPointerFromInternalField(0));
}
return NULL;
}
void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
v8::Persistent<v8::Object>* object,
void* param) {
// First delete the hidden C++ object.
// Unpacking should never return NULL here. That would only happen if
// this method is used as the weak callback for persistent handles not
// pointing to a date time formatter.
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
delete UnpackNumberFormat(handle);
// Then dispose of the persistent handle to JS object.
object->Dispose(isolate);
}
void NumberFormat::JSInternalFormat(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsNumber()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Formatter and numeric value have to be specified.")));
return;
}
icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
if (!number_format) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("NumberFormat method called on an object "
"that is not a NumberFormat.")));
return;
}
// ICU will handle actual NaN value properly and return NaN string.
icu::UnicodeString result;
number_format->format(args[1]->NumberValue(), result);
args.GetReturnValue().Set(v8::String::New(
reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
}
void NumberFormat::JSInternalParse(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Formatter and string have to be specified.")));
return;
}
icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
if (!number_format) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("NumberFormat method called on an object "
"that is not a NumberFormat.")));
return;
}
// ICU will handle actual NaN value properly and return NaN string.
icu::UnicodeString string_number;
if (!Utils::V8StringToUnicodeString(args[1]->ToString(), &string_number)) {
string_number = "";
}
UErrorCode status = U_ZERO_ERROR;
icu::Formattable result;
// ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
// to be part of Chrome.
// TODO(cira): Include currency parsing code using parseCurrency call.
// We need to check if the formatter parses all currencies or only the
// one it was constructed with (it will impact the API - how to return ISO
// code and the value).
number_format->parse(string_number, result, status);
if (U_FAILURE(status)) {
return;
}
switch (result.getType()) {
case icu::Formattable::kDouble:
args.GetReturnValue().Set(result.getDouble());
return;
case icu::Formattable::kLong:
args.GetReturnValue().Set(result.getLong());
return;
case icu::Formattable::kInt64:
args.GetReturnValue().Set(static_cast<double>(result.getInt64()));
return;
default:
return;
}
}
void NumberFormat::JSCreateNumberFormat(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 ||
!args[0]->IsString() ||
!args[1]->IsObject() ||
!args[2]->IsObject()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Internal error, wrong parameters.")));
return;
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::ObjectTemplate> number_format_template =
Utils::GetTemplate(isolate);
// Create an empty object wrapper.
v8::Local<v8::Object> local_object = number_format_template->NewInstance();
// But the handle shouldn't be empty.
// That can happen if there was a stack overflow when creating the object.
if (local_object.IsEmpty()) {
args.GetReturnValue().Set(local_object);
return;
}
// Set number formatter as internal field of the resulting JS object.
icu::DecimalFormat* number_format = InitializeNumberFormat(
args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
if (!number_format) {
v8::ThrowException(v8::Exception::Error(v8::String::New(
"Internal error. Couldn't create ICU number formatter.")));
return;
} else {
local_object->SetAlignedPointerInInternalField(0, number_format);
v8::TryCatch try_catch;
local_object->Set(v8::String::New("numberFormat"),
v8::String::New("valid"));
if (try_catch.HasCaught()) {
v8::ThrowException(v8::Exception::Error(
v8::String::New("Internal error, couldn't set property.")));
return;
}
}
v8::Persistent<v8::Object> wrapper(isolate, local_object);
// Make object handle weak so we can delete iterator once GC kicks in.
wrapper.MakeWeak<void>(NULL, &DeleteNumberFormat);
args.GetReturnValue().Set(wrapper);
wrapper.ClearAndLeak();
}
static icu::DecimalFormat* InitializeNumberFormat(
v8::Handle<v8::String> locale,
v8::Handle<v8::Object> options,
v8::Handle<v8::Object> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
v8::String::AsciiValue bcp47_locale(locale);
if (bcp47_locale.length() != 0) {
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
return NULL;
}
icu_locale = icu::Locale(icu_result);
}
icu::DecimalFormat* number_format =
CreateICUNumberFormat(icu_locale, options);
if (!number_format) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
number_format = CreateICUNumberFormat(no_extension_locale, options);
// Set resolved settings (pattern, numbering system).
SetResolvedSettings(no_extension_locale, number_format, resolved);
} else {
SetResolvedSettings(icu_locale, number_format, resolved);
}
return number_format;
}
static icu::DecimalFormat* CreateICUNumberFormat(
const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
icu::DecimalFormat* number_format = NULL;
icu::UnicodeString style;
icu::UnicodeString currency;
if (Utils::ExtractStringSetting(options, "style", &style)) {
if (style == UNICODE_STRING_SIMPLE("currency")) {
Utils::ExtractStringSetting(options, "currency", &currency);
icu::UnicodeString display;
Utils::ExtractStringSetting(options, "currencyDisplay", &display);
#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
icu::NumberFormat::EStyles style;
if (display == UNICODE_STRING_SIMPLE("code")) {
style = icu::NumberFormat::kIsoCurrencyStyle;
} else if (display == UNICODE_STRING_SIMPLE("name")) {
style = icu::NumberFormat::kPluralCurrencyStyle;
} else {
style = icu::NumberFormat::kCurrencyStyle;
}
#else // ICU version is 4.8 or above (we ignore versions below 4.0).
UNumberFormatStyle style;
if (display == UNICODE_STRING_SIMPLE("code")) {
style = UNUM_CURRENCY_ISO;
} else if (display == UNICODE_STRING_SIMPLE("name")) {
style = UNUM_CURRENCY_PLURAL;
} else {
style = UNUM_CURRENCY;
}
#endif
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createInstance(icu_locale, style, status));
} else if (style == UNICODE_STRING_SIMPLE("percent")) {
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createPercentInstance(icu_locale, status));
if (U_FAILURE(status)) {
delete number_format;
return NULL;
}
// Make sure 1.1% doesn't go into 2%.
number_format->setMinimumFractionDigits(1);
} else {
// Make a decimal instance by default.
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createInstance(icu_locale, status));
}
}
if (U_FAILURE(status)) {
delete number_format;
return NULL;
}
// Set all options.
if (!currency.isEmpty()) {
number_format->setCurrency(currency.getBuffer(), status);
}
int32_t digits;
if (Utils::ExtractIntegerSetting(
options, "minimumIntegerDigits", &digits)) {
number_format->setMinimumIntegerDigits(digits);
}
if (Utils::ExtractIntegerSetting(
options, "minimumFractionDigits", &digits)) {
number_format->setMinimumFractionDigits(digits);
}
if (Utils::ExtractIntegerSetting(
options, "maximumFractionDigits", &digits)) {
number_format->setMaximumFractionDigits(digits);
}
bool significant_digits_used = false;
if (Utils::ExtractIntegerSetting(
options, "minimumSignificantDigits", &digits)) {
number_format->setMinimumSignificantDigits(digits);
significant_digits_used = true;
}
if (Utils::ExtractIntegerSetting(
options, "maximumSignificantDigits", &digits)) {
number_format->setMaximumSignificantDigits(digits);
significant_digits_used = true;
}
number_format->setSignificantDigitsUsed(significant_digits_used);
bool grouping;
if (Utils::ExtractBooleanSetting(options, "useGrouping", &grouping)) {
number_format->setGroupingUsed(grouping);
}
// Set rounding mode.
number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
return number_format;
}
static void SetResolvedSettings(const icu::Locale& icu_locale,
icu::DecimalFormat* number_format,
v8::Handle<v8::Object> resolved) {
icu::UnicodeString pattern;
number_format->toPattern(pattern);
resolved->Set(v8::String::New("pattern"),
v8::String::New(reinterpret_cast<const uint16_t*>(
pattern.getBuffer()), pattern.length()));
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
resolved->Set(v8::String::New("currency"),
v8::String::New(reinterpret_cast<const uint16_t*>(
currency.getBuffer()), currency.length()));
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
// to assume that for given locale NumberingSystem constructor produces the
// same digits as NumberFormat would.
UErrorCode status = U_ZERO_ERROR;
icu::NumberingSystem* numbering_system =
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
resolved->Set(v8::String::New("numberingSystem"), v8::String::New(ns));
} else {
resolved->Set(v8::String::New("numberingSystem"), v8::Undefined());
}
delete numbering_system;
resolved->Set(v8::String::New("useGrouping"),
v8::Boolean::New(number_format->isGroupingUsed()));
resolved->Set(v8::String::New("minimumIntegerDigits"),
v8::Integer::New(number_format->getMinimumIntegerDigits()));
resolved->Set(v8::String::New("minimumFractionDigits"),
v8::Integer::New(number_format->getMinimumFractionDigits()));
resolved->Set(v8::String::New("maximumFractionDigits"),
v8::Integer::New(number_format->getMaximumFractionDigits()));
if (resolved->HasOwnProperty(v8::String::New("minimumSignificantDigits"))) {
resolved->Set(v8::String::New("minimumSignificantDigits"), v8::Integer::New(
number_format->getMinimumSignificantDigits()));
}
if (resolved->HasOwnProperty(v8::String::New("maximumSignificantDigits"))) {
resolved->Set(v8::String::New("maximumSignificantDigits"), v8::Integer::New(
number_format->getMaximumSignificantDigits()));
}
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
resolved->Set(v8::String::New("locale"), v8::String::New(result));
} else {
// This would never happen, since we got the locale from ICU.
resolved->Set(v8::String::New("locale"), v8::String::New("und"));
}
}
} // namespace v8_i18n

12
deps/v8/src/extensions/i18n/number-format.js

@ -65,8 +65,6 @@ function getNumberOption(options, property, min, max, fallback) {
* Useful for subclassing.
*/
function initializeNumberFormat(numberFormat, locales, options) {
native function NativeJSCreateNumberFormat();
if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
throw new TypeError('Trying to re-initialize NumberFormat object.');
}
@ -148,7 +146,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
}
var formatter = NativeJSCreateNumberFormat(requestedLocale,
var formatter = %CreateNumberFormat(requestedLocale,
internalOptions,
resolved);
@ -269,15 +267,13 @@ function initializeNumberFormat(numberFormat, locales, options) {
* NumberFormat.
*/
function formatNumber(formatter, value) {
native function NativeJSInternalNumberFormat();
// Spec treats -0 and +0 as 0.
var number = Number(value);
if (number === -0) {
number = 0;
}
return NativeJSInternalNumberFormat(formatter.formatter, number);
return %InternalNumberFormat(formatter.formatter, number);
}
@ -285,9 +281,7 @@ function formatNumber(formatter, value) {
* Returns a Number that represents string value that was passed in.
*/
function parseNumber(formatter, value) {
native function NativeJSInternalNumberParse();
return NativeJSInternalNumberParse(formatter.formatter, String(value));
return %InternalNumberParse(formatter.formatter, String(value));
}

13
deps/v8/src/factory.cc

@ -1023,10 +1023,11 @@ Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
PretenureFlag pretenure) {
PretenureFlag pretenure,
bool alloc_props) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure),
isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
JSObject);
}
@ -1215,6 +1216,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
shared->set_num_literals(literals_array_size);
if (is_generator) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
shared->DisableOptimization(kGenerator);
}
return shared;
}
@ -1391,8 +1393,10 @@ Handle<JSFunction> Factory::CreateApiFunction(
Smi::cast(instance_template->internal_field_count())->value();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
InstanceType type = INVALID_TYPE;
InstanceType type;
switch (instance_type) {
case JavaScriptObject:
type = JS_OBJECT_TYPE;
@ -1407,9 +1411,10 @@ Handle<JSFunction> Factory::CreateApiFunction(
instance_size += JSGlobalProxy::kSize;
break;
default:
UNREACHABLE();
type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
NewFunction(Factory::empty_string(),

6
deps/v8/src/factory.h

@ -301,7 +301,11 @@ class Factory {
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
PretenureFlag pretenure = NOT_TENURED);
PretenureFlag pretenure = NOT_TENURED,
bool allocate_properties = true);
Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,

2
deps/v8/src/flag-definitions.h

@ -264,6 +264,8 @@ DEFINE_int(deopt_every_n_garbage_collections,
"deoptimize every n garbage collections")
DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(trap_on_stub_deopt, false,
"put a break point before deoptimizing a stub")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")

511
deps/v8/src/global-handles.cc

@ -56,9 +56,7 @@ class GlobalHandles::Node {
NORMAL, // Normal global handle.
WEAK, // Flagged as weak but not yet finalized.
PENDING, // Has been recognized as only reachable by weak handles.
NEAR_DEATH, // Callback has informed the handle is near death.
NUMBER_OF_STATES
NEAR_DEATH // Callback has informed the handle is near death.
};
// Maps handle location (slot) to the containing node.
@ -96,12 +94,13 @@ class GlobalHandles::Node {
}
#endif
void Initialize(int index, Node* first_free) {
void Initialize(int index, Node** first_free) {
index_ = static_cast<uint8_t>(index);
ASSERT(static_cast<int>(index_) == index);
set_state(FREE);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = first_free;
parameter_or_next_free_.next_free = *first_free;
*first_free = this;
}
void Acquire(Object* object) {
@ -113,6 +112,7 @@ class GlobalHandles::Node {
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
weak_reference_callback_ = NULL;
IncreaseBlockUses();
}
void Release() {
@ -126,7 +126,7 @@ class GlobalHandles::Node {
set_partially_dependent(false);
weak_reference_callback_ = NULL;
#endif
ReleaseFromBlock();
DecreaseBlockUses();
}
// Object slot accessors.
@ -205,6 +205,10 @@ class GlobalHandles::Node {
}
void clear_partially_dependent() { set_partially_dependent(false); }
// Callback accessor.
// TODO(svenpanne) Re-enable or nuke later.
// WeakReferenceCallback callback() { return callback_; }
// Callback parameter accessors.
void set_parameter(void* parameter) {
ASSERT(state() != FREE);
@ -273,7 +277,8 @@ class GlobalHandles::Node {
private:
inline NodeBlock* FindBlock();
inline GlobalHandles* GetGlobalHandles();
inline void ReleaseFromBlock();
inline void IncreaseBlockUses();
inline void DecreaseBlockUses();
// Storage for object pointer.
// Placed first to avoid offset computation.
@ -311,404 +316,163 @@ class GlobalHandles::Node {
};
class GlobalHandles::BlockListIterator {
class GlobalHandles::NodeBlock {
public:
explicit inline BlockListIterator(BlockList* anchor)
: anchor_(anchor), current_(anchor->next()) {
ASSERT(anchor->IsAnchor());
}
inline BlockList* block() const {
ASSERT(!done());
return current_;
}
inline bool done() const {
ASSERT_EQ(anchor_ == current_, current_->IsAnchor());
return anchor_ == current_;
}
inline void Advance() {
ASSERT(!done());
current_ = current_->next();
}
private:
BlockList* const anchor_;
BlockList* current_;
DISALLOW_COPY_AND_ASSIGN(BlockListIterator);
};
GlobalHandles::BlockList::BlockList()
: prev_block_(this),
next_block_(this),
first_free_(NULL),
used_nodes_(0) {}
void GlobalHandles::BlockList::InsertAsNext(BlockList* const block) {
ASSERT(block != this);
ASSERT(!block->IsAnchor());
ASSERT(block->IsDetached());
block->prev_block_ = this;
block->next_block_ = next_block_;
next_block_->prev_block_ = block;
next_block_ = block;
ASSERT(!IsDetached());
ASSERT(!block->IsDetached());
}
void GlobalHandles::BlockList::Detach() {
ASSERT(!IsAnchor());
ASSERT(!IsDetached());
prev_block_->next_block_ = next_block_;
next_block_->prev_block_ = prev_block_;
prev_block_ = this;
next_block_ = this;
ASSERT(IsDetached());
}
static const int kSize = 256;
explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
: next_(next),
used_nodes_(0),
next_used_(NULL),
prev_used_(NULL),
global_handles_(global_handles) {}
bool GlobalHandles::BlockList::HasAtLeastLength(int length) {
ASSERT(IsAnchor());
ASSERT(length > 0);
for (BlockListIterator it(this); !it.done(); it.Advance()) {
if (--length <= 0) return true;
void PutNodesOnFreeList(Node** first_free) {
for (int i = kSize - 1; i >= 0; --i) {
nodes_[i].Initialize(i, first_free);
}
return false;
}
#ifdef DEBUG
int GlobalHandles::BlockList::LengthOfFreeList() {
int count = 0;
Node* node = first_free_;
while (node != NULL) {
count++;
node = node->next_free();
}
return count;
}
#endif
Node* node_at(int index) {
ASSERT(0 <= index && index < kSize);
return &nodes_[index];
}
int GlobalHandles::BlockList::CompareBlocks(const void* a, const void* b) {
const BlockList* block_a =
*reinterpret_cast<const BlockList**>(reinterpret_cast<uintptr_t>(a));
const BlockList* block_b =
*reinterpret_cast<const BlockList**>(reinterpret_cast<uintptr_t>(b));
if (block_a->used_nodes() > block_b->used_nodes()) return -1;
if (block_a->used_nodes() == block_b->used_nodes()) return 0;
return 1;
}
class GlobalHandles::NodeBlock : public BlockList {
public:
static const int kSize = 256;
explicit NodeBlock(GlobalHandles* global_handles)
: global_handles_(global_handles) {
// Initialize nodes
Node* first_free = first_free_;
for (int i = kSize - 1; i >= 0; --i) {
nodes_[i].Initialize(i, first_free);
first_free = &nodes_[i];
void IncreaseUses() {
ASSERT(used_nodes_ < kSize);
if (used_nodes_++ == 0) {
NodeBlock* old_first = global_handles_->first_used_block_;
global_handles_->first_used_block_ = this;
next_used_ = old_first;
prev_used_ = NULL;
if (old_first == NULL) return;
old_first->prev_used_ = this;
}
first_free_ = first_free;
ASSERT(!IsAnchor());
// Link into global_handles
ASSERT(global_handles->non_full_blocks_.IsDetached());
global_handles->non_full_blocks_.InsertAsHead(this);
global_handles->number_of_blocks_++;
}
Node* Acquire(Object* o) {
ASSERT(used_nodes_ < kSize);
ASSERT(first_free_ != NULL);
ASSERT(global_handles_->non_full_blocks_.next() == this);
// Remove from free list
Node* node = first_free_;
first_free_ = node->next_free();
// Increment counters
global_handles_->isolate()->counters()->global_handles()->Increment();
global_handles_->number_of_global_handles_++;
// Initialize node with value
node->Acquire(o);
bool now_full = ++used_nodes_ == kSize;
ASSERT_EQ(now_full, first_free_ == NULL);
if (now_full) {
// Move block to tail of non_full_blocks_
Detach();
global_handles_->full_blocks_.InsertAsTail(this);
}
return node;
}
void Release(Node* node) {
void DecreaseUses() {
ASSERT(used_nodes_ > 0);
// Add to free list
node->set_next_free(first_free_);
first_free_ = node;
// Decrement counters
global_handles_->isolate()->counters()->global_handles()->Decrement();
global_handles_->number_of_global_handles_--;
bool was_full = used_nodes_-- == kSize;
ASSERT_EQ(was_full, first_free_->next_free() == NULL);
if (was_full) {
// Move this block to head of non_full_blocks_
Detach();
global_handles_->non_full_blocks_.InsertAsHead(this);
if (--used_nodes_ == 0) {
if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
if (this == global_handles_->first_used_block_) {
global_handles_->first_used_block_ = next_used_;
}
}
Node* node_at(int index) {
ASSERT(0 <= index && index < kSize);
return &nodes_[index];
}
GlobalHandles* global_handles() { return global_handles_; }
static NodeBlock* Cast(BlockList* block_list) {
ASSERT(!block_list->IsAnchor());
return static_cast<NodeBlock*>(block_list);
}
// Next block in the list of all blocks.
NodeBlock* next() const { return next_; }
static NodeBlock* From(Node* node, uint8_t index) {
uintptr_t ptr = reinterpret_cast<uintptr_t>(node - index);
ptr -= OFFSET_OF(NodeBlock, nodes_);
NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
ASSERT(block->node_at(index) == node);
return block;
}
// Next/previous block in the list of blocks with used nodes.
NodeBlock* next_used() const { return next_used_; }
NodeBlock* prev_used() const { return prev_used_; }
private:
Node nodes_[kSize];
NodeBlock* const next_;
int used_nodes_;
NodeBlock* next_used_;
NodeBlock* prev_used_;
GlobalHandles* global_handles_;
};
void GlobalHandles::BlockList::SortBlocks(GlobalHandles* global_handles,
bool prune) {
// Always sort at least 2 blocks
if (!global_handles->non_full_blocks_.HasAtLeastLength(2)) return;
// build a vector that could contain the upper bound of the block count
int number_of_blocks = global_handles->block_count();
// Build array of blocks and update number_of_blocks to actual count
ScopedVector<BlockList*> blocks(number_of_blocks);
{
int i = 0;
BlockList* anchor = &global_handles->non_full_blocks_;
for (BlockListIterator it(anchor); !it.done(); it.Advance()) {
blocks[i++] = it.block();
}
number_of_blocks = i;
}
// Nothing to do.
if (number_of_blocks <= 1) return;
// Sort blocks
qsort(blocks.start(), number_of_blocks, sizeof(blocks[0]), CompareBlocks);
// Prune empties
if (prune) {
static const double kUnusedPercentage = 0.30;
static const double kUsedPercentage = 1.30;
int total_slots = global_handles->number_of_blocks_ * NodeBlock::kSize;
const int total_used = global_handles->number_of_global_handles_;
const int target_unused = static_cast<int>(Max(
total_used * kUsedPercentage,
total_slots * kUnusedPercentage));
// Reverse through empty blocks. Note: always leave one block free.
int blocks_deleted = 0;
for (int i = number_of_blocks - 1; i > 0 && blocks[i]->IsUnused(); i--) {
// Not worth deleting
if (total_slots - total_used < target_unused) break;
blocks[i]->Detach();
delete blocks[i];
blocks_deleted++;
total_slots -= NodeBlock::kSize;
}
global_handles->number_of_blocks_ -= blocks_deleted;
number_of_blocks -= blocks_deleted;
}
// Relink all blocks
for (int i = 0; i < number_of_blocks; i++) {
blocks[i]->Detach();
global_handles->non_full_blocks_.InsertAsTail(blocks[i]);
}
#ifdef DEBUG
// Check sorting
BlockList* anchor = &global_handles->non_full_blocks_;
int last_size = NodeBlock::kSize;
for (BlockListIterator it(anchor); !it.done(); it.Advance()) {
ASSERT(it.block()->used_nodes() <= last_size);
last_size = it.block()->used_nodes();
}
#endif
}
#ifdef DEBUG
void GlobalHandles::VerifyBlockInvariants() {
int number_of_blocks = 0;
int number_of_handles = 0;
for (int i = 0; i < kAllAnchorsSize; i++) {
for (BlockListIterator it(all_anchors_[i]); !it.done(); it.Advance()) {
BlockList* block = it.block();
number_of_blocks++;
int used_nodes = block->used_nodes();
number_of_handles += used_nodes;
int unused_nodes = block->LengthOfFreeList();
ASSERT_EQ(used_nodes + unused_nodes, NodeBlock::kSize);
if (all_anchors_[i] == &full_blocks_) {
ASSERT_EQ(NodeBlock::kSize, used_nodes);
} else {
ASSERT_NE(NodeBlock::kSize, used_nodes);
}
}
}
ASSERT_EQ(number_of_handles, number_of_global_handles_);
ASSERT_EQ(number_of_blocks, number_of_blocks_);
}
#endif
void GlobalHandles::SortBlocks(bool shouldPrune) {
#ifdef DEBUG
VerifyBlockInvariants();
#endif
BlockList::SortBlocks(this, shouldPrune);
#ifdef DEBUG
VerifyBlockInvariants();
#endif
}
GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
return FindBlock()->global_handles();
}
GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
return NodeBlock::From(this, index_);
intptr_t ptr = reinterpret_cast<intptr_t>(this);
ptr = ptr - index_ * sizeof(Node);
NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
ASSERT(block->node_at(index_) == this);
return block;
}
void GlobalHandles::Node::IncreaseBlockUses() {
NodeBlock* node_block = FindBlock();
node_block->IncreaseUses();
GlobalHandles* global_handles = node_block->global_handles();
global_handles->isolate()->counters()->global_handles()->Increment();
global_handles->number_of_global_handles_++;
}
void GlobalHandles::Node::ReleaseFromBlock() {
FindBlock()->Release(this);
void GlobalHandles::Node::DecreaseBlockUses() {
NodeBlock* node_block = FindBlock();
GlobalHandles* global_handles = node_block->global_handles();
parameter_or_next_free_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
node_block->DecreaseUses();
global_handles->isolate()->counters()->global_handles()->Decrement();
global_handles->number_of_global_handles_--;
}
class GlobalHandles::NodeIterator {
public:
explicit NodeIterator(GlobalHandles* global_handles)
: all_anchors_(global_handles->all_anchors_),
block_(all_anchors_[0]),
anchor_index_(0),
node_index_(0) {
AdvanceBlock();
}
: block_(global_handles->first_used_block_),
index_(0) {}
bool done() const {
return anchor_index_ == kAllAnchorsSize;
}
bool done() const { return block_ == NULL; }
Node* node() const {
ASSERT(!done());
return NodeBlock::Cast(block_)->node_at(node_index_);
return block_->node_at(index_);
}
void Advance() {
ASSERT(!done());
if (++node_index_ < NodeBlock::kSize) return;
node_index_ = 0;
AdvanceBlock();
if (++index_ < NodeBlock::kSize) return;
index_ = 0;
block_ = block_->next_used();
}
typedef int CountArray[Node::NUMBER_OF_STATES];
static int CollectStats(GlobalHandles* global_handles, CountArray counts);
private:
void AdvanceBlock() {
ASSERT(!done());
while (true) {
block_ = block_->next();
// block is valid
if (block_ != all_anchors_[anchor_index_]) {
ASSERT(!done());
ASSERT(!block_->IsAnchor());
// skip empty blocks
if (block_->IsUnused()) continue;
return;
}
// jump lists
anchor_index_++;
if (anchor_index_ == kAllAnchorsSize) break;
block_ = all_anchors_[anchor_index_];
}
ASSERT(done());
}
BlockList* const * const all_anchors_;
BlockList* block_;
int anchor_index_;
int node_index_;
NodeBlock* block_;
int index_;
DISALLOW_COPY_AND_ASSIGN(NodeIterator);
};
int GlobalHandles::NodeIterator::CollectStats(GlobalHandles* global_handles,
CountArray counts) {
static const int kSize = Node::NUMBER_OF_STATES;
for (int i = 0; i < kSize; i++) {
counts[i] = 0;
}
int total = 0;
for (NodeIterator it(global_handles); !it.done(); it.Advance()) {
total++;
Node::State state = it.node()->state();
ASSERT(state >= 0 && state < kSize);
counts[state]++;
}
// NodeIterator skips empty blocks
int skipped = global_handles->number_of_blocks_ * NodeBlock::kSize - total;
total += skipped;
counts[Node::FREE] += total;
return total;
}
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
number_of_blocks_(0),
number_of_global_handles_(0),
first_block_(NULL),
first_used_block_(NULL),
first_free_(NULL),
post_gc_processing_count_(0),
object_group_connections_(kObjectGroupConnectionsCapacity) {
all_anchors_[0] = &full_blocks_;
all_anchors_[1] = &non_full_blocks_;
}
object_group_connections_(kObjectGroupConnectionsCapacity) {}
GlobalHandles::~GlobalHandles() {
for (int i = 0; i < kAllAnchorsSize; i++) {
BlockList* block = all_anchors_[i]->next();
while (block != all_anchors_[i]) {
BlockList* tmp = block->next();
block->Detach();
delete NodeBlock::Cast(block);
NodeBlock* block = first_block_;
while (block != NULL) {
NodeBlock* tmp = block->next();
delete block;
block = tmp;
}
}
first_block_ = NULL;
}
Handle<Object> GlobalHandles::Create(Object* value) {
if (non_full_blocks_.IsDetached()) {
new NodeBlock(this);
ASSERT(!non_full_blocks_.IsDetached());
if (first_free_ == NULL) {
first_block_ = new NodeBlock(this, first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
}
ASSERT(non_full_blocks_.IsAnchor());
ASSERT(!non_full_blocks_.next()->IsAnchor());
Node* result = NodeBlock::Cast(non_full_blocks_.next())->Acquire(value);
ASSERT(first_free_ != NULL);
// Take the first node in the free list.
Node* result = first_free_;
first_free_ = result->next_free();
result->Acquire(value);
if (isolate_->heap()->InNewSpace(value) &&
!result->is_in_new_space_list()) {
new_space_nodes_.Add(result);
@ -898,35 +662,24 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
} else {
// Must cache all blocks, as NodeIterator can't survive mutation.
List<NodeBlock*> blocks(number_of_blocks_);
for (int i = 0; i < kAllAnchorsSize; i++) {
for (BlockListIterator it(all_anchors_[i]); !it.done(); it.Advance()) {
blocks.Add(NodeBlock::Cast(it.block()));
}
}
for (int block_index = 0; block_index < blocks.length(); block_index++) {
NodeBlock* block = blocks[block_index];
for (int node_index = 0; node_index < NodeBlock::kSize; node_index++) {
Node* node = block->node_at(node_index);
if (!node->IsRetainer()) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more.
continue;
}
node->clear_partially_dependent();
if (node->PostGarbageCollectionProcessing(isolate_)) {
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
return next_gc_likely_to_collect_more;
}
}
if (!node->IsRetainer()) {
if (!it.node()->IsRetainer()) {
next_gc_likely_to_collect_more = true;
}
}
}
}
// Update the list of new space nodes.
int last = 0;
for (int i = 0; i < new_space_nodes_.length(); ++i) {
@ -946,8 +699,6 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
new_space_nodes_.Rewind(last);
bool shouldPruneBlocks = collector != SCAVENGER;
SortBlocks(shouldPruneBlocks);
return next_gc_likely_to_collect_more;
}
@ -1015,30 +766,48 @@ int GlobalHandles::NumberOfGlobalObjectWeakHandles() {
void GlobalHandles::RecordStats(HeapStats* stats) {
NodeIterator::CountArray counts;
int total = NodeIterator::CollectStats(this, counts);
*stats->global_handle_count = total;
*stats->weak_global_handle_count = counts[Node::WEAK];
*stats->pending_global_handle_count = counts[Node::PENDING];
*stats->near_death_global_handle_count = counts[Node::NEAR_DEATH];
*stats->free_global_handle_count = counts[Node::FREE];
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
*stats->pending_global_handle_count = 0;
*stats->near_death_global_handle_count = 0;
*stats->free_global_handle_count = 0;
for (NodeIterator it(this); !it.done(); it.Advance()) {
*stats->global_handle_count += 1;
if (it.node()->state() == Node::WEAK) {
*stats->weak_global_handle_count += 1;
} else if (it.node()->state() == Node::PENDING) {
*stats->pending_global_handle_count += 1;
} else if (it.node()->state() == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count += 1;
} else if (it.node()->state() == Node::FREE) {
*stats->free_global_handle_count += 1;
}
}
}
#ifdef DEBUG
void GlobalHandles::PrintStats() {
NodeIterator::CountArray counts;
int total = NodeIterator::CollectStats(this, counts);
size_t total_consumed = sizeof(NodeBlock) * number_of_blocks_;
int total = 0;
int weak = 0;
int pending = 0;
int near_death = 0;
int destroyed = 0;
for (NodeIterator it(this); !it.done(); it.Advance()) {
total++;
if (it.node()->state() == Node::WEAK) weak++;
if (it.node()->state() == Node::PENDING) pending++;
if (it.node()->state() == Node::NEAR_DEATH) near_death++;
if (it.node()->state() == Node::FREE) destroyed++;
}
PrintF("Global Handle Statistics:\n");
PrintF(" allocated blocks = %d\n", number_of_blocks_);
PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", total_consumed);
PrintF(" # normal = %d\n", counts[Node::NORMAL]);
PrintF(" # weak = %d\n", counts[Node::WEAK]);
PrintF(" # pending = %d\n", counts[Node::PENDING]);
PrintF(" # near_death = %d\n", counts[Node::NEAR_DEATH]);
PrintF(" # free = %d\n", counts[Node::FREE]);
PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
PrintF(" # free = %d\n", destroyed);
PrintF(" # total = %d\n", total);
}

64
deps/v8/src/global-handles.h

@ -157,9 +157,6 @@ class GlobalHandles {
return number_of_global_handles_;
}
// Returns the current number of allocated blocks
int block_count() const { return number_of_blocks_; }
// Clear the weakness of a global handle.
static void ClearWeakness(Object** location);
@ -279,14 +276,11 @@ class GlobalHandles {
#ifdef DEBUG
void PrintStats();
void Print();
void VerifyBlockInvariants();
#endif
private:
explicit GlobalHandles(Isolate* isolate);
void SortBlocks(bool shouldPrune);
// Migrates data from the internal representation (object_group_connections_,
// retainer_infos_ and implicit_ref_connections_) to the public and more
// efficient representation (object_groups_ and implicit_ref_groups_).
@ -300,64 +294,20 @@ class GlobalHandles {
class Node;
class NodeBlock;
class NodeIterator;
class BlockListIterator;
// Base class for NodeBlock
class BlockList {
public:
BlockList();
~BlockList() { ASSERT(IsDetached()); }
void Detach();
void InsertAsHead(BlockList* block) {
ASSERT(IsAnchor());
InsertAsNext(block);
}
void InsertAsTail(BlockList* block) {
ASSERT(IsAnchor());
prev_block_->InsertAsNext(block);
}
inline bool IsAnchor() { return first_free_ == NULL && used_nodes_ == 0; }
inline bool IsDetached() {
ASSERT_EQ(prev_block_ == this, next_block_ == this);
return prev_block_ == this;
}
bool HasAtLeastLength(int length);
bool IsUnused() { return used_nodes_ == 0; }
int used_nodes() const { return used_nodes_; }
BlockList* next() { return next_block_; }
BlockList* prev() { return prev_block_; }
#ifdef DEBUG
int LengthOfFreeList();
#endif
static void SortBlocks(GlobalHandles* global_handles, bool prune);
protected:
BlockList* prev_block_;
BlockList* next_block_;
Node* first_free_;
int used_nodes_;
private:
// Needed for quicksort
static int CompareBlocks(const void* a, const void* b);
void InsertAsNext(BlockList* block);
DISALLOW_COPY_AND_ASSIGN(BlockList);
};
Isolate* isolate_;
// Field always containing the number of blocks allocated.
int number_of_blocks_;
// Field always containing the number of handles to global objects.
int number_of_global_handles_;
// Anchors for doubly linked lists of blocks
BlockList full_blocks_;
BlockList non_full_blocks_;
// List of all allocated node blocks.
NodeBlock* first_block_;
// List of node blocks with used nodes.
NodeBlock* first_used_block_;
// An array of all the anchors held by GlobalHandles.
// This simplifies iteration across all blocks.
static const int kAllAnchorsSize = 2;
BlockList* all_anchors_[kAllAnchorsSize];
// Free list of nodes.
Node* first_free_;
// Contains all nodes holding new space objects. Note: when the list
// is accessed, some of the objects may have been promoted already.

43
deps/v8/src/heap.cc

@ -703,6 +703,16 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
int Heap::NotifyContextDisposed() {
if (FLAG_parallel_recompilation) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
flush_monomorphic_ics_ = true;
return ++contexts_disposed_;
}
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
@ -3253,6 +3263,12 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return !RootCanBeWrittenAfterInitialization(root_index) &&
!InNewSpace(roots_array_start()[root_index]);
}
Object* RegExpResultsCache::Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
@ -4465,7 +4481,8 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
}
MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure, bool allocate_properties) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@ -4476,11 +4493,15 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
FixedArray* properties;
if (allocate_properties) {
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
if (!maybe_properties->ToObject(&properties)) return maybe_properties;
if (!maybe_properties->To(&properties)) return maybe_properties;
}
} else {
properties = empty_fixed_array();
}
// Allocate the JSObject.
@ -4492,17 +4513,15 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements() ||
JSObject::cast(obj)->HasExternalArrayElements());
return obj;
}
MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
Handle<AllocationSite> allocation_site) {
MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
Map* map, Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@ -4515,9 +4534,9 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
// Allocate the backing storage for the properties.
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
FixedArray* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
if (!maybe_properties->ToObject(&properties)) return maybe_properties;
if (!maybe_properties->To(&properties)) return maybe_properties;
}
// Allocate the JSObject.
@ -4529,9 +4548,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}

9
deps/v8/src/heap.h

@ -736,7 +736,7 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure = NOT_TENURED);
Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
Map* map, Handle<AllocationSite> allocation_site);
@ -1254,10 +1254,7 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed() {
flush_monomorphic_ics_ = true;
return ++contexts_disposed_;
}
int NotifyContextDisposed();
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@ -1618,6 +1615,8 @@ class Heap {
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
// Generated code can treat direct references to this root as constant.
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true,

2
deps/v8/src/hydrogen-dce.cc

@ -118,8 +118,10 @@ void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
HPhi* phi = worklist.RemoveLast();
HBasicBlock* block = phi->block();
phi->DeleteAndReplaceWith(NULL);
if (phi->HasMergedIndex()) {
block->RecordDeletedPhi(phi->merged_index());
}
}
}
} } // namespace v8::internal

230
deps/v8/src/hydrogen-escape-analysis.cc

@ -63,4 +63,234 @@ void HEscapeAnalysisPhase::CollectCapturedValues() {
}
HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
Zone* zone = graph()->zone();
HCapturedObject* state = new(zone) HCapturedObject(number_of_values_, zone);
state->InsertAfter(previous);
return state;
}
// Create a new state for replacing HAllocate instructions.
HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
HInstruction* previous) {
HConstant* undefined = graph()->GetConstantUndefined();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
state->SetOperandAt(index, undefined);
}
return state;
}
// Create a new state full of phis for loop header entries.
HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
HInstruction* previous, HCapturedObject* old_state) {
HBasicBlock* block = previous->block();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
HPhi* phi = NewPhiAndInsert(block, operand, index);
state->SetOperandAt(index, phi);
}
return state;
}
// Create a new state by copying an existing one.
HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
HInstruction* previous, HCapturedObject* old_state) {
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
state->SetOperandAt(index, operand);
}
return state;
}
// Insert a newly created phi into the given block and fill all incoming
// edges with the given value.
HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(
HBasicBlock* block, HValue* incoming_value, int index) {
Zone* zone = graph()->zone();
HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
for (int i = 0; i < block->predecessors()->length(); i++) {
phi->AddInput(incoming_value);
}
block->AddPhi(phi);
return phi;
}
// Performs a forward data-flow analysis of all loads and stores on the
// given captured allocation. This uses a reverse post-order iteration
// over affected basic blocks. All non-escaping instructions are handled
// and replaced during the analysis.
void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
HBasicBlock* allocate_block = allocate->block();
block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
// Iterate all blocks starting with the allocation block, since the
// allocation cannot dominate blocks that come before.
int start = allocate_block->block_id();
for (int i = start; i < graph()->blocks()->length(); i++) {
HBasicBlock* block = graph()->blocks()->at(i);
HCapturedObject* state = StateAt(block);
// Skip blocks that are not dominated by the captured allocation.
if (!allocate_block->Dominates(block) && allocate_block != block) continue;
if (FLAG_trace_escape_analysis) {
PrintF("Analyzing data-flow in B%d\n", block->block_id());
}
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
switch (instr->opcode()) {
case HValue::kAllocate: {
if (instr != allocate) continue;
state = NewStateForAllocation(allocate);
break;
}
case HValue::kLoadNamedField: {
HLoadNamedField* load = HLoadNamedField::cast(instr);
int index = load->access().offset() / kPointerSize;
if (load->object() != allocate) continue;
ASSERT(load->access().IsInobject());
HValue* replacement = state->OperandAt(index);
load->DeleteAndReplaceWith(replacement);
if (FLAG_trace_escape_analysis) {
PrintF("Replacing load #%d with #%d (%s)\n", instr->id(),
replacement->id(), replacement->Mnemonic());
}
break;
}
case HValue::kStoreNamedField: {
HStoreNamedField* store = HStoreNamedField::cast(instr);
int index = store->access().offset() / kPointerSize;
if (store->object() != allocate) continue;
ASSERT(store->access().IsInobject());
state = NewStateCopy(store, state);
state->SetOperandAt(index, store->value());
if (store->has_transition()) {
state->SetOperandAt(0, store->transition());
}
store->DeleteAndReplaceWith(NULL);
if (FLAG_trace_escape_analysis) {
PrintF("Replacing store #%d%s\n", instr->id(),
store->has_transition() ? " (with transition)" : "");
}
break;
}
case HValue::kSimulate: {
HSimulate* simulate = HSimulate::cast(instr);
// TODO(mstarzinger): This doesn't track deltas for values on the
// operand stack yet. Find a repro test case and fix this.
for (int i = 0; i < simulate->OperandCount(); i++) {
if (simulate->OperandAt(i) != allocate) continue;
simulate->SetOperandAt(i, state);
}
break;
}
case HValue::kArgumentsObject:
case HValue::kCapturedObject: {
for (int i = 0; i < instr->OperandCount(); i++) {
if (instr->OperandAt(i) != allocate) continue;
instr->SetOperandAt(i, state);
}
break;
}
case HValue::kCheckHeapObject: {
HCheckHeapObject* check = HCheckHeapObject::cast(instr);
if (check->value() != allocate) continue;
check->DeleteAndReplaceWith(NULL);
break;
}
case HValue::kCheckMaps: {
HCheckMaps* mapcheck = HCheckMaps::cast(instr);
if (mapcheck->value() != allocate) continue;
// TODO(mstarzinger): This approach breaks if the tracked map value
// is not a HConstant. Find a repro test case and fix this.
for (HUseIterator it(mapcheck->uses()); !it.Done(); it.Advance()) {
if (!it.value()->IsLoadNamedField()) continue;
HLoadNamedField* load = HLoadNamedField::cast(it.value());
ASSERT(load->typecheck() == mapcheck);
load->ClearTypeCheck();
}
ASSERT(mapcheck->HasNoUses());
mapcheck->DeleteAndReplaceWith(NULL);
break;
}
default:
// Nothing to see here, move along ...
break;
}
}
// Propagate the block state forward to all successor blocks.
for (int i = 0; i < block->end()->SuccessorCount(); i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
if (!allocate_block->Dominates(succ)) continue;
if (succ->predecessors()->length() == 1) {
// Case 1: This is the only predecessor, just reuse state.
SetStateAt(succ, state);
} else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
// Case 2: This is a state that enters a loop header, be
// pessimistic about loop headers, add phis for all values.
SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
} else if (StateAt(succ) == NULL) {
// Case 3: This is the first state propagated forward to the
// successor, leave a copy of the current state.
SetStateAt(succ, NewStateCopy(succ->first(), state));
} else {
// Case 4: This is a state that needs merging with previously
// propagated states, potentially introducing new phis lazily or
// adding values to existing phis.
HCapturedObject* succ_state = StateAt(succ);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = state->OperandAt(index);
HValue* succ_operand = succ_state->OperandAt(index);
if (succ_operand->IsPhi() && succ_operand->block() == succ) {
// Phi already exists, add operand.
HPhi* phi = HPhi::cast(succ_operand);
phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
} else if (succ_operand != operand) {
// Phi does not exist, introduce one.
HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
succ_state->SetOperandAt(index, phi);
}
}
}
}
}
// All uses have been handled.
ASSERT(allocate->HasNoUses());
allocate->DeleteAndReplaceWith(NULL);
}
void HEscapeAnalysisPhase::PerformScalarReplacement() {
for (int i = 0; i < captured_.length(); i++) {
HAllocate* allocate = HAllocate::cast(captured_.at(i));
// Compute number of scalar values and start with clean slate.
if (!allocate->size()->IsInteger32Constant()) continue;
int size_in_bytes = allocate->size()->GetInteger32Constant();
number_of_values_ = size_in_bytes / kPointerSize;
block_states_.Clear();
// Perform actual analysis steps.
AnalyzeDataFlow(allocate);
cumulative_values_ += number_of_values_;
ASSERT(allocate->HasNoUses());
ASSERT(!allocate->IsLinked());
}
}
} } // namespace v8::internal

35
deps/v8/src/hydrogen-escape-analysis.h

@ -38,17 +38,48 @@ namespace internal {
class HEscapeAnalysisPhase : public HPhase {
public:
explicit HEscapeAnalysisPhase(HGraph* graph)
: HPhase("H_Escape analysis", graph), captured_(0, zone()) { }
: HPhase("H_Escape analysis", graph),
captured_(0, zone()),
number_of_values_(0),
cumulative_values_(0),
block_states_(graph->blocks()->length(), zone()) { }
void Run() {
CollectCapturedValues();
PerformScalarReplacement();
}
private:
void CollectCapturedValues();
void CollectIfNoEscapingUses(HInstruction* instr);
void PerformScalarReplacement();
void AnalyzeDataFlow(HInstruction* instr);
ZoneList<HValue*> captured_;
HCapturedObject* NewState(HInstruction* prev);
HCapturedObject* NewStateForAllocation(HInstruction* prev);
HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
HCapturedObject* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
block_states_.Set(block->block_id(), state);
}
// List of allocations captured during collection phase.
ZoneList<HInstruction*> captured_;
// Number of scalar values tracked during scalar replacement phase.
int number_of_values_;
int cumulative_values_;
// Map of block IDs to the data-flow state at block entry during the
// scalar replacement phase.
ZoneList<HCapturedObject*> block_states_;
};

2
deps/v8/src/hydrogen-gvn.h

@ -48,7 +48,7 @@ class HGlobalValueNumberingPhase : public HPhase {
// instructions during the first pass.
if (FLAG_smi_only_arrays && removed_side_effects_) {
Analyze();
ASSERT(!removed_side_effects_);
// TODO(danno): Turn this into a fixpoint iteration.
}
}

222
deps/v8/src/hydrogen-instructions.cc

@ -388,7 +388,7 @@ HUseListNode* HUseListNode::tail() {
}
bool HValue::CheckUsesForFlag(Flag f) {
bool HValue::CheckUsesForFlag(Flag f) const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
@ -397,7 +397,7 @@ bool HValue::CheckUsesForFlag(Flag f) {
}
bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) {
bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
bool return_value = false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
@ -1285,14 +1285,26 @@ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
if (op() == kMathRound || op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
// If the input is integer32 then we replace the floor instruction
// with its input.
if (val->representation().IsSmiOrInteger32()) return val;
// If the input is smi or integer32 then we replace the instruction with its
// input.
if (val->representation().IsSmiOrInteger32()) {
if (!val->representation().Equals(representation())) {
HChange* result = new(block()->zone()) HChange(
val, representation(), false, false);
result->InsertBefore(this);
return result;
}
return val;
}
}
if (op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
if (val->IsDiv() && (val->UseCount() == 1)) {
HDiv* hdiv = HDiv::cast(val);
HValue* left = hdiv->left();
@ -1302,7 +1314,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (new_left == NULL &&
hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
new_left = new(block()->zone()) HChange(
left, Representation::Integer32(), false, false, false);
left, Representation::Integer32(), false, false);
HChange::cast(new_left)->InsertBefore(this);
}
HValue* new_right =
@ -1313,7 +1325,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
#endif
hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
new_right = new(block()->zone()) HChange(
right, Representation::Integer32(), false, false, false);
right, Representation::Integer32(), false, false);
HChange::cast(new_right)->InsertBefore(this);
}
@ -2773,6 +2785,18 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
}
void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
void HCompareHoleAndBranch::InferRepresentation(
HInferRepresentationPhase* h_infer) {
ChangeRepresentation(object()->representation());
}
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@ -2832,119 +2856,6 @@ void HLoadNamedField::PrintDataTo(StringStream* stream) {
}
// Returns true if an instance of this map can never find a property with this
// name in its prototype chain. This means all prototypes up to the top are
// fast and don't have the name in them. It would be good if we could optimize
// polymorphic loads where the property is sometimes found in the prototype
// chain.
static bool PrototypeChainCanNeverResolve(
Handle<Map> map, Handle<String> name) {
Isolate* isolate = map->GetIsolate();
Object* current = map->prototype();
while (current != isolate->heap()->null_value()) {
if (current->IsJSGlobalProxy() ||
current->IsGlobalObject() ||
!current->IsJSObject() ||
JSObject::cast(current)->map()->has_named_interceptor() ||
JSObject::cast(current)->IsAccessCheckNeeded() ||
!JSObject::cast(current)->HasFastProperties()) {
return false;
}
LookupResult lookup(isolate);
Map* map = JSObject::cast(current)->map();
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) return false;
if (!lookup.IsCacheable()) return false;
current = JSObject::cast(current)->GetPrototype();
}
return true;
}
HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
Handle<String> name,
Zone* zone)
: types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
types_unique_ids_(0, zone),
name_unique_id_(),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
SmallMapList negative_lookups;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
// Deprecated maps are updated to the current map in the type oracle.
ASSERT(!map->is_deprecated());
LookupResult lookup(map->GetIsolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case FIELD: {
int index = lookup.GetLocalFieldIndexFromMap(*map);
if (index < 0) {
SetGVNFlag(kDependsOnInobjectFields);
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
if (FLAG_track_double_fields &&
lookup.representation().IsDouble()) {
// Since the value needs to be boxed, use a generic handler for
// loading doubles.
continue;
}
types_.Add(types->at(i), zone);
break;
}
case CONSTANT:
types_.Add(types->at(i), zone);
break;
case CALLBACKS:
break;
case TRANSITION:
case INTERCEPTOR:
case NONEXISTENT:
case NORMAL:
case HANDLER:
UNREACHABLE();
break;
}
} else if (lookup.IsCacheable() &&
// For dicts the lookup on the map will fail, but the object may
// contain the property so we cannot generate a negative lookup
// (which would just be a map check and return undefined).
!map->is_dictionary_map() &&
!map->has_named_interceptor() &&
PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
}
bool need_generic =
(types->length() != negative_lookups.length() + types_.length());
if (!need_generic && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
for (int i = 0; i < negative_lookups.length(); i++) {
types_.Add(negative_lookups.at(i), zone);
}
} else {
// We don't have an easy way to handle both a call (to the generic stub) and
// a deopt in the same hydrogen instruction, so in this case we don't add
// the negative lookups which can deopt - just let the generic stub handle
// them.
SetAllSideEffects();
need_generic_ = true;
}
}
HCheckMaps* HCheckMaps::New(Zone* zone,
HValue* context,
HValue* value,
@ -2952,8 +2863,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
CompilationInfo* info,
HValue* typecheck) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
check_map->map_set_.Add(map, zone);
check_map->has_migration_target_ = map->is_migration_target();
check_map->Add(map, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->InstanceOf(map)) {
@ -2973,46 +2883,6 @@ void HCheckMaps::FinalizeUniqueValueId() {
}
void HLoadNamedFieldPolymorphic::FinalizeUniqueValueId() {
if (!types_unique_ids_.is_empty()) return;
Zone* zone = block()->zone();
types_unique_ids_.Initialize(types_.length(), zone);
for (int i = 0; i < types_.length(); i++) {
types_unique_ids_.Add(UniqueValueId(types_.at(i)), zone);
}
name_unique_id_ = UniqueValueId(name_);
}
bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
ASSERT_EQ(types_.length(), types_unique_ids_.length());
HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
if (name_unique_id_ != other->name_unique_id_) return false;
if (types_unique_ids_.length() != other->types_unique_ids_.length()) {
return false;
}
if (need_generic_ != other->need_generic_) return false;
for (int i = 0; i < types_unique_ids_.length(); i++) {
bool found = false;
for (int j = 0; j < types_unique_ids_.length(); j++) {
if (types_unique_ids_.at(j) == other->types_unique_ids_.at(i)) {
found = true;
break;
}
}
if (!found) return false;
}
return true;
}
void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
}
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@ -3085,18 +2955,8 @@ bool HLoadKeyed::UsesMustHandleHole() const {
bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
if (!IsFastDoubleElementsKind(elements_kind())) {
return false;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
return false;
}
}
return true;
return IsFastDoubleElementsKind(elements_kind()) &&
CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
}
@ -3313,7 +3173,9 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
Representation input_rep = value()->representation();
if (!input_rep.IsTagged()) rep = rep.generalize(input_rep);
if (!input_rep.IsTagged()) {
rep = rep.generalize(input_rep);
}
return rep;
}
@ -3869,10 +3731,10 @@ void HPhi::SimplifyConstantInputs() {
DoubleToInt32(operand->DoubleValue()));
integer_input->InsertAfter(operand);
SetOperandAt(i, integer_input);
} else if (operand == graph->GetConstantTrue()) {
SetOperandAt(i, graph->GetConstant1());
} else {
// This catches |false|, |undefined|, strings and objects.
} else if (operand->HasBooleanValue()) {
SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
: graph->GetConstant0());
} else if (operand->ImmortalImmovable()) {
SetOperandAt(i, graph->GetConstant0());
}
}

197
deps/v8/src/hydrogen-instructions.h

@ -86,6 +86,7 @@ class LChunkBuilder;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CapturedObject) \
V(Change) \
V(CheckFunction) \
V(CheckHeapObject) \
@ -96,6 +97,7 @@ class LChunkBuilder;
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CompareHoleAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
@ -141,7 +143,6 @@ class LChunkBuilder;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
@ -801,10 +802,10 @@ class HValue: public ZoneObject {
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f);
bool CheckUsesForFlag(Flag f) const;
// Returns true if the flag specified is set for all uses, and this set
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f);
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
GVNFlagSet gvn_flags() const { return gvn_flags_; }
void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
@ -1247,19 +1248,23 @@ class HDummyUse: public HTemplateInstruction<1> {
class HDeoptimize: public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HDeoptimize, Deoptimizer::BailoutType);
DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*,
Deoptimizer::BailoutType);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
const char* reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
explicit HDeoptimize(Deoptimizer::BailoutType type) : type_(type) {}
explicit HDeoptimize(const char* reason, Deoptimizer::BailoutType type)
: reason_(reason), type_(type) {}
const char* reason_;
Deoptimizer::BailoutType type_;
};
@ -1517,15 +1522,13 @@ class HChange: public HUnaryOperation {
HChange(HValue* value,
Representation to,
bool is_truncating_to_smi,
bool is_truncating_to_int32,
bool allow_undefined_as_nan)
bool is_truncating_to_int32)
: HUnaryOperation(value) {
ASSERT(!value->representation().IsNone());
ASSERT(!to.IsNone());
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
if (allow_undefined_as_nan) SetFlag(kAllowUndefinedAsNaN);
if (is_truncating_to_smi) SetFlag(kTruncatingToSmi);
if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
@ -1536,15 +1539,16 @@ class HChange: public HUnaryOperation {
}
}
bool can_convert_undefined_to_nan() {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
bool allow_undefined_as_nan() const {
return CheckFlag(kAllowUndefinedAsNaN);
}
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
@ -2453,7 +2457,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
// TODO(verwaest): Set representation to flexible int starting as smi.
set_representation(Representation::Integer32());
break;
case kMathAbs:
@ -2531,8 +2534,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
HValue *typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
check_map->map_set_.Add(maps->at(i), zone);
check_map->has_migration_target_ |= maps->at(i)->is_migration_target();
check_map->Add(maps->at(i), zone);
}
check_map->map_set_.Sort();
return check_map;
@ -2576,6 +2578,14 @@ class HCheckMaps: public HTemplateInstruction<2> {
}
private:
void Add(Handle<Map> map, Zone* zone) {
map_set_.Add(map, zone);
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
SetGVNFlag(kChangesNewSpacePromotion);
}
}
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
@ -2760,6 +2770,7 @@ class HCheckHeapObject: public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
virtual bool HasEscapingOperandAt(int index) { return false; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@ -3018,7 +3029,7 @@ class HPhi: public HValue {
non_phi_uses_[i] = 0;
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
ASSERT(merged_index >= 0 || merged_index == kInvalidMergedIndex);
SetFlag(kFlexibleRepresentation);
SetFlag(kAllowUndefinedAsNaN);
}
@ -3041,6 +3052,7 @@ class HPhi: public HValue {
bool HasRealUses();
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
int merged_index() const { return merged_index_; }
@ -3103,6 +3115,9 @@ class HPhi: public HValue {
void SimplifyConstantInputs();
// Marker value representing an invalid merge index.
static const int kInvalidMergedIndex = -1;
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@ -3123,21 +3138,10 @@ class HPhi: public HValue {
};
class HArgumentsObject: public HTemplateInstruction<0> {
// Common base class for HArgumentsObject and HCapturedObject.
class HDematerializedObject: public HTemplateInstruction<0> {
public:
static HArgumentsObject* New(Zone* zone,
HValue* context,
int count) {
return new(zone) HArgumentsObject(count, zone);
}
const ZoneList<HValue*>* arguments_values() const { return &values_; }
int arguments_count() const { return values_.length(); }
void AddArgument(HValue* argument, Zone* zone) {
values_.Add(NULL, zone); // Resize list.
SetOperandAt(values_.length() - 1, argument);
}
HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) const { return values_[index]; }
@ -3147,22 +3151,61 @@ class HArgumentsObject: public HTemplateInstruction<0> {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
values_[index] = value;
}
// List of values tracked by this marker.
ZoneList<HValue*> values_;
private:
virtual bool IsDeletable() const { return true; }
};
class HArgumentsObject: public HDematerializedObject {
public:
static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
return new(zone) HArgumentsObject(count, zone);
}
// The values contain a list of all elements in the arguments object
// including the receiver object, which is skipped when materializing.
const ZoneList<HValue*>* arguments_values() const { return &values_; }
int arguments_count() const { return values_.length(); }
void AddArgument(HValue* argument, Zone* zone) {
values_.Add(NULL, zone); // Resize list.
SetOperandAt(values_.length() - 1, argument);
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
private:
HArgumentsObject(int count, Zone* zone) : values_(count, zone) {
HArgumentsObject(int count, Zone* zone)
: HDematerializedObject(count, zone) {
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
};
virtual bool IsDeletable() const { return true; }
ZoneList<HValue*> values_;
class HCapturedObject: public HDematerializedObject {
public:
HCapturedObject(int length, Zone* zone)
: HDematerializedObject(length, zone) {
set_representation(Representation::Tagged());
values_.AddBlock(NULL, length, zone); // Resize list.
}
// The values contain a list of all in-object properties inside the
// captured object and is index by field index. Properties in the
// properties or elements backing store are not tracked here.
const ZoneList<HValue*>* values() const { return &values_; }
int length() const { return values_.length(); }
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
};
@ -3187,8 +3230,9 @@ class HConstant: public HTemplateInstruction<0> {
}
bool InstanceOf(Handle<Map> map) {
return handle_->IsJSObject() &&
Handle<JSObject>::cast(handle_)->map() == *map;
Handle<Object> constant_object = handle();
return constant_object->IsJSObject() &&
Handle<JSObject>::cast(constant_object)->map() == *map;
}
bool IsSpecialDouble() const {
@ -3299,6 +3343,7 @@ class HConstant: public HTemplateInstruction<0> {
return external_reference_value_;
}
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
virtual intptr_t Hashcode() {
@ -3915,6 +3960,32 @@ class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
};
class HCompareHoleAndBranch: public HTemplateControlInstruction<2, 1> {
public:
// TODO(danno): make this private when the IfBuilder properly constructs
// control flow instructions.
explicit HCompareHoleAndBranch(HValue* object) {
SetFlag(kFlexibleRepresentation);
SetFlag(kAllowUndefinedAsNaN);
SetOperandAt(0, object);
}
DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
HValue* object() { return OperandAt(0); }
virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
};
class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
public:
// TODO(danno): make this private when the IfBuilder properly constructs
@ -4298,6 +4369,11 @@ class HAdd: public HArithmeticBinaryOperation {
}
}
virtual void RepresentationChanged(Representation to) {
if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN);
HArithmeticBinaryOperation::RepresentationChanged(to);
}
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
@ -5512,6 +5588,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
void ClearTypeCheck() { SetOperandAt(1, object()); }
HObjectAccess access() const { return access_; }
Representation field_representation() const {
return access_.representation();
@ -5569,45 +5646,6 @@ class HLoadNamedField: public HTemplateInstruction<2> {
};
class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
public:
HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
Handle<String> name,
Zone* zone);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
SmallMapList* types() { return &types_; }
Handle<String> name() { return name_; }
bool need_generic() { return need_generic_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
static const int kMaxLoadPolymorphism = 4;
virtual void FinalizeUniqueValueId();
protected:
virtual bool DataEquals(HValue* value);
private:
SmallMapList types_;
Handle<String> name_;
ZoneList<UniqueValueId> types_unique_ids_;
UniqueValueId name_unique_id_;
bool need_generic_;
};
class HLoadNamedGeneric: public HTemplateInstruction<2> {
public:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
@ -6007,7 +6045,6 @@ class HStoreKeyed
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind);
virtual bool HasEscapingOperandAt(int index) { return index != 0; }
virtual Representation RequiredInputRepresentation(int index) {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
@ -6526,8 +6563,11 @@ class HToFastProperties: public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
// object literals via a runtime call.
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
@ -6535,7 +6575,6 @@ class HToFastProperties: public HUnaryOperation {
ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
function->function_id == Runtime::kCreateObjectLiteralShallow);
#endif
set_representation(Representation::Tagged());
}
virtual bool IsDeletable() const { return true; }

27
deps/v8/src/hydrogen-mark-deoptimize.cc

@ -34,14 +34,9 @@ void HMarkDeoptimizeOnUndefinedPhase::Run() {
const ZoneList<HPhi*>* phi_list = graph()->phi_list();
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
if (!use_value->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
!phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
ProcessPhi(phi);
break;
}
}
}
}
}
@ -68,4 +63,22 @@ void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
}
}
void HComputeChangeUndefinedToNaN::Run() {
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
for (int i = 0; i < blocks->length(); ++i) {
const HBasicBlock* block(blocks->at(i));
for (HInstruction* current = block->first(); current != NULL; ) {
HInstruction* next = current->next();
if (current->IsChange()) {
if (HChange::cast(current)->can_convert_undefined_to_nan()) {
current->SetFlag(HValue::kAllowUndefinedAsNaN);
}
}
current = next;
}
}
}
} } // namespace v8::internal

12
deps/v8/src/hydrogen-mark-deoptimize.h

@ -58,6 +58,18 @@ class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
};
class HComputeChangeUndefinedToNaN : public HPhase {
public:
explicit HComputeChangeUndefinedToNaN(HGraph* graph)
: HPhase("H_Compute change undefined to nan", graph) {}
void Run();
private:
DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
};
} } // namespace v8::internal
#endif // V8_HYDROGEN_MARK_DEOPTIMIZE_H_

1
deps/v8/src/hydrogen-osr.cc

@ -117,6 +117,7 @@ void HOsrBuilder::FinishOsrValues() {
const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
ASSERT(phi->HasMergedIndex());
osr_values_->at(phi->merged_index())->set_incoming_value(phi);
}
}

10
deps/v8/src/hydrogen-representation-changes.cc

@ -47,8 +47,6 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
HInstruction* new_value = NULL;
bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
bool allow_undefined_as_nan =
use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
@ -61,10 +59,8 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
}
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(value, to,
is_truncating_to_smi,
is_truncating_to_int,
allow_undefined_as_nan);
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
}
new_value->InsertBefore(next);
@ -127,7 +123,7 @@ void HRepresentationChangesPhase::Run() {
!(input_representation.IsInteger32() &&
use->CheckFlag(HValue::kTruncatingToInt32))) ||
(phi->representation().IsSmi() &&
!(input_representation.IsSmi() ||
!(input_representation.IsSmi() &&
use->CheckFlag(HValue::kTruncatingToSmi)))) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",

279
deps/v8/src/hydrogen.cc

@ -148,6 +148,16 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
}
HPhi* HBasicBlock::AddNewPhi(int merged_index) {
if (graph()->IsInsideNoSideEffectsScope()) {
merged_index = HPhi::kInvalidMergedIndex;
}
HPhi* phi = new(zone()) HPhi(merged_index, zone());
AddPhi(phi);
return phi;
}
HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
RemovableSimulate removable) {
ASSERT(HasEnvironment());
@ -203,7 +213,7 @@ void HBasicBlock::Goto(HBasicBlock* block,
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
if (add_simulate) AddSimulate(BailoutId::None());
if (add_simulate) AddNewSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(block);
Finish(instr);
}
@ -219,7 +229,7 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value,
AddInstruction(new(zone()) HLeaveInlined());
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
AddSimulate(BailoutId::None());
AddNewSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(target);
Finish(instr);
}
@ -824,14 +834,14 @@ void HGraphBuilder::IfBuilder::Else() {
}
void HGraphBuilder::IfBuilder::Deopt() {
void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
ASSERT(did_then_);
if (did_else_) {
deopt_else_ = true;
} else {
deopt_then_ = true;
}
builder_->Add<HDeoptimize>(Deoptimizer::EAGER);
builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
}
@ -904,8 +914,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
HValue* terminating,
Token::Value token) {
HEnvironment* env = builder_->environment();
phi_ = new(zone()) HPhi(env->values()->length(), zone());
header_block_->AddPhi(phi_);
phi_ = header_block_->AddNewPhi(env->values()->length());
phi_->AddInput(initial);
env->Push(initial);
builder_->current_block()->GotoNoSimulate(header_block_);
@ -982,7 +991,7 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
if (no_side_effects_scope_count_ > 0) {
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
return instr;
@ -1006,8 +1015,8 @@ void HGraphBuilder::AddIncrementCounter(StatsCounter* counter,
void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(no_side_effects_scope_count_ == 0);
current_block()->AddSimulate(id, removable);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
current_block()->AddNewSimulate(id, removable);
}
@ -1034,10 +1043,10 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
void HGraphBuilder::FinishExitWithHardDeoptimization(
HBasicBlock* continuation) {
const char* reason, HBasicBlock* continuation) {
PadEnvironmentForContinuation(current_block(), continuation);
Add<HDeoptimize>(Deoptimizer::EAGER);
if (no_side_effects_scope_count_ > 0) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
if (graph()->IsInsideNoSideEffectsScope()) {
current_block()->GotoNoSimulate(continuation);
} else {
current_block()->Goto(continuation);
@ -1108,7 +1117,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
IfBuilder key_checker(this);
key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
key_checker.Then();
key_checker.ElseDeopt();
key_checker.ElseDeopt("Key out of capacity range");
key_checker.End();
HValue* new_capacity = BuildNewElementsCapacity(key);
@ -1182,7 +1191,7 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
HInstruction* elements = AddLoadElements(object);
HInstruction* elements = AddLoadElements(object, NULL);
HInstruction* empty_fixed_array = Add<HConstant>(
isolate()->factory()->empty_fixed_array());
@ -1264,7 +1273,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
negative_checker.Then();
HInstruction* result = AddExternalArrayElementAccess(
external_elements, key, val, bounds_check, elements_kind, is_store);
negative_checker.ElseDeopt();
negative_checker.ElseDeopt("Negative key encountered");
length_checker.End();
return result;
} else {
@ -1626,13 +1635,26 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
from_elements_kind,
ALLOW_RETURN_HOLE);
ElementsKind holey_kind = IsFastSmiElementsKind(to_elements_kind)
ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
IsFastSmiElementsKind(to_elements_kind))
? FAST_HOLEY_ELEMENTS : to_elements_kind;
HInstruction* holey_store = Add<HStoreKeyed>(to_elements, key,
element, holey_kind);
// Allow NaN hole values to converted to their tagged counterparts.
if (IsFastHoleyElementsKind(to_elements_kind)) {
holey_store->SetFlag(HValue::kAllowUndefinedAsNaN);
if (IsHoleyElementsKind(from_elements_kind) &&
from_elements_kind != to_elements_kind) {
IfBuilder if_hole(this);
if_hole.If<HCompareHoleAndBranch>(element);
if_hole.Then();
HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
: graph()->GetConstantHole();
Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
if_hole.Else();
HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
if_hole.End();
} else {
HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
builder.EndBody();
@ -1691,7 +1713,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* boilerplate_elements = AddLoadElements(boilerplate, NULL);
HValue* object_elements = Add<HInnerAllocatedObject>(object, elems_offset);
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
object_elements);
@ -1751,7 +1773,7 @@ void HGraphBuilder::BuildCompareNil(
// emitted below is the actual monomorphic map.
BuildCheckMap(value, type->Classes().Current());
} else {
if_nil.Deopt();
if_nil.Deopt("Too many undetectable types");
}
}
@ -1824,7 +1846,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
builder()->BuildLoadNamedField(constructor_function_, access));
builder()->BuildLoadNamedField(constructor_function_, access, NULL));
}
HInstruction* native_context = builder()->BuildGetNativeContext();
@ -1845,7 +1867,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
builder()->BuildLoadNamedField(constructor_function_, access));
builder()->BuildLoadNamedField(constructor_function_, access, NULL));
}
@ -2049,7 +2071,8 @@ HGraph::HGraph(CompilationInfo* info)
has_soft_deoptimize_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0) {
maximum_environment_size_(0),
no_side_effects_scope_count_(0) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@ -2916,6 +2939,9 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
// Remove dead code and phis
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@ -2939,12 +2965,11 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
if (FLAG_use_range) Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
@ -3346,7 +3371,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
if (!clause->compare_type()->Is(Type::Smi())) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Non-smi switch type", Deoptimizer::SOFT);
}
HCompareNumericAndBranch* compare_ =
@ -4287,6 +4312,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int data_size = 0;
int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
HCheckMaps* type_check = NULL;
if (IsFastLiteral(original_boilerplate_object,
kMaxFastLiteralDepth,
&max_properties,
@ -4324,7 +4350,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
isolate());
Add<HCheckMaps>(literal, map, top_info());
type_check = Add<HCheckMaps>(literal, map, top_info());
}
// The array is expected in the bailout environment during computation
@ -4345,7 +4371,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
elements = AddLoadElements(literal);
elements = AddLoadElements(literal, type_check);
HValue* key = Add<HConstant>(i);
@ -4399,9 +4425,10 @@ static bool ComputeLoadStoreField(Handle<Map> type,
}
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
Add<HCheckMaps>(object, map, top_info());
return Add<HCheckMaps>(object, map, top_info());
}
@ -4567,8 +4594,8 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (count == types->length()) {
// Everything matched; can use monomorphic load.
BuildCheckHeapObject(object);
Add<HCheckMaps>(object, types);
return BuildLoadNamedField(object, access);
HCheckMaps* type_check = Add<HCheckMaps>(object, types);
return BuildLoadNamedField(object, access, type_check);
}
if (count != 0) return NULL;
@ -4589,14 +4616,44 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (!lookup.IsField()) return NULL;
BuildCheckHeapObject(object);
Add<HCheckMaps>(object, types);
HCheckMaps* type_check = Add<HCheckMaps>(object, types);
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
BuildCheckPrototypeMaps(Handle<JSObject>::cast(prototype), holder);
HValue* holder_value = Add<HConstant>(holder);
return BuildLoadNamedField(holder_value,
HObjectAccess::ForField(holder_map, &lookup, name));
HObjectAccess::ForField(holder_map, &lookup, name), type_check);
}
// Returns true if an instance of this map can never find a property with this
// name in its prototype chain. This means all prototypes up to the top are
// fast and don't have the name in them. It would be good if we could optimize
// polymorphic loads where the property is sometimes found in the prototype
// chain.
static bool PrototypeChainCanNeverResolve(
Handle<Map> map, Handle<String> name) {
Isolate* isolate = map->GetIsolate();
Object* current = map->prototype();
while (current != isolate->heap()->null_value()) {
if (current->IsJSGlobalProxy() ||
current->IsGlobalObject() ||
!current->IsJSObject() ||
JSObject::cast(current)->map()->has_named_interceptor() ||
JSObject::cast(current)->IsAccessCheckNeeded() ||
!JSObject::cast(current)->HasFastProperties()) {
return false;
}
LookupResult lookup(isolate);
Map* map = JSObject::cast(current)->map();
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) return false;
if (!lookup.IsCacheable()) return false;
current = JSObject::cast(current)->GetPrototype();
}
return true;
}
@ -4607,16 +4664,90 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
Handle<String> name) {
HInstruction* instr = TryLoadPolymorphicAsMonomorphic(
expr, object, types, name);
if (instr == NULL) {
if (instr != NULL) {
instr->set_position(expr->position());
return ast_context()->ReturnInstruction(instr, expr->id());
}
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, false) ||
(lookup.IsCacheable() &&
!map->is_dictionary_map() &&
!map->has_named_interceptor() &&
(lookup.IsConstant() ||
(!lookup.IsFound() &&
PrototypeChainCanNeverResolve(map, name))))) {
if (count == 0) {
BuildCheckHeapObject(object);
HValue* context = environment()->context();
instr = new(zone()) HLoadNamedFieldPolymorphic(
context, object, types, name, zone());
join = graph()->CreateBasicBlock();
}
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HCompareMap* compare =
new(zone()) HCompareMap(object, map, if_true, if_false);
current_block()->Finish(compare);
instr->set_position(expr->position());
return ast_context()->ReturnInstruction(instr, expr->id());
set_current_block(if_true);
// TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
if (lookup.IsField()) {
HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
HLoadNamedField* load = BuildLoadNamedField(object, access, compare);
load->set_position(expr->position());
AddInstruction(load);
if (!ast_context()->IsEffect()) Push(load);
} else if (lookup.IsConstant()) {
Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
HConstant* hconstant = Add<HConstant>(constant);
if (!ast_context()->IsEffect()) Push(hconstant);
} else {
ASSERT(!lookup.IsFound());
if (map->prototype()->IsJSObject()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder = prototype;
while (holder->map()->prototype()->IsJSObject()) {
holder = handle(JSObject::cast(holder->map()->prototype()));
}
BuildCheckPrototypeMaps(prototype, holder);
}
if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
}
current_block()->Goto(join);
set_current_block(if_false);
}
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
HInstruction* load = BuildLoadNamedGeneric(object, name, expr);
load->set_position(expr->position());
AddInstruction(load);
if (!ast_context()->IsEffect()) Push(load);
if (join != NULL) {
current_block()->Goto(join);
} else {
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
return;
}
}
ASSERT(join != NULL);
join->SetJoinId(expr->id());
set_current_block(join);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@ -4735,7 +4866,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization(join);
FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
instr->set_position(position);
@ -4783,7 +4914,10 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
HValue* value = environment()->ExpressionStackAt(0);
HValue* object = environment()->ExpressionStackAt(1);
if (expr->IsUninitialized()) Add<HDeoptimize>(Deoptimizer::SOFT);
if (expr->IsUninitialized()) {
Add<HDeoptimize>("Insufficient type feedback for property assignment",
Deoptimizer::SOFT);
}
return BuildStoreNamed(expr, expr->id(), expr->position(),
expr->AssignmentId(), prop, object, value, value);
} else {
@ -4829,7 +4963,8 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
Add<HDeoptimize>(Deoptimizer::EAGER);
Add<HDeoptimize>("Constant global variable assignment",
Deoptimizer::EAGER);
builder.End();
}
HInstruction* instr =
@ -5038,7 +5173,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
}
return BuildStoreNamed(prop, expr->id(), expr->position(),
return BuildStoreNamed(expr, expr->id(), expr->position(),
expr->AssignmentId(), prop, object, instr, instr);
} else {
// Keyed property.
@ -5270,7 +5405,8 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Handle<String> name,
Property* expr) {
if (expr->IsUninitialized()) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Insufficient feedback for generic named load",
Deoptimizer::SOFT);
}
HValue* context = environment()->context();
return new(zone()) HLoadNamedGeneric(context, object, name);
@ -5299,18 +5435,18 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
// Handle access to various length properties
if (name->Equals(isolate()->heap()->length_string())) {
if (map->instance_type() == JS_ARRAY_TYPE) {
AddCheckMap(object, map);
HCheckMaps* type_check = AddCheckMap(object, map);
return New<HLoadNamedField>(object,
HObjectAccess::ForArrayLength(map->elements_kind()));
HObjectAccess::ForArrayLength(map->elements_kind()), type_check);
}
}
LookupResult lookup(isolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsField()) {
AddCheckMap(object, map);
HCheckMaps* type_check = AddCheckMap(object, map);
return BuildLoadNamedField(object,
HObjectAccess::ForField(map, &lookup, name));
HObjectAccess::ForField(map, &lookup, name), type_check);
}
// Handle a load of a constant known function.
@ -5326,11 +5462,11 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
HCheckMaps* type_check = AddCheckMap(object, map);
BuildCheckPrototypeMaps(prototype, holder);
HValue* holder_value = Add<HConstant>(holder);
return BuildLoadNamedField(holder_value,
HObjectAccess::ForField(holder_map, &lookup, name));
HObjectAccess::ForField(holder_map, &lookup, name), type_check);
}
// Handle a load of a constant function somewhere in the prototype chain.
@ -5581,13 +5717,15 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
if (!is_store) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
current_block()->GotoNoSimulate(join);
set_current_block(other_map);
}
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
FinishExitWithHardDeoptimization(join);
FinishExitWithHardDeoptimization("Unknown type in polymorphic element access",
join);
set_current_block(join);
return is_store ? NULL : Pop();
}
@ -5623,12 +5761,14 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
} else {
if (is_store) {
if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Insufficient feedback for keyed store",
Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->IsUninitialized()) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Insufficient feedback for keyed load",
Deoptimizer::SOFT);
}
instr = BuildLoadKeyedGeneric(obj, key);
}
@ -6075,7 +6215,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// that the environment stack matches the depth on deopt that it otherwise
// would have had after a successful call.
Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
FinishExitWithHardDeoptimization(join);
FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
HValue* context = environment()->context();
HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
@ -7473,7 +7613,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
after = BuildIncrement(returns_original_input, expr);
HValue* result = returns_original_input ? Pop() : after;
return BuildStoreNamed(prop, expr->id(), expr->position(),
return BuildStoreNamed(expr, expr->id(), expr->position(),
expr->AssignmentId(), prop, object, after, result);
} else {
// Keyed property.
@ -7635,12 +7775,14 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
}
if (left_type->Is(Type::None())) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Insufficient type feedback for left side",
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous defaults.
left_type = handle(Type::Any(), isolate());
}
if (right_type->Is(Type::None())) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("Insufficient type feedback for right side",
Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
}
HInstruction* instr = NULL;
@ -7990,7 +8132,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
Add<HDeoptimize>(Deoptimizer::SOFT);
Add<HDeoptimize>("insufficient type feedback for combined type",
Deoptimizer::SOFT);
combined_type = left_type = right_type = handle(Type::Any(), isolate());
}
@ -8040,10 +8183,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
// TODO(verwaest): Remove once Representation::FromType properly
// returns Smi when the IC measures Smi.
if (left_type->Is(Type::Smi())) left_rep = Representation::Smi();
if (right_type->Is(Type::Smi())) right_rep = Representation::Smi();
HCompareNumericAndBranch* result =
new(zone()) HCompareNumericAndBranch(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
@ -9214,20 +9353,19 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
// There is already a phi for the i'th value.
HPhi* phi = HPhi::cast(value);
// Assert index is correct and that we haven't missed an incoming edge.
ASSERT(phi->merged_index() == i);
ASSERT(phi->merged_index() == i || !phi->HasMergedIndex());
ASSERT(phi->OperandCount() == block->predecessors()->length());
phi->AddInput(other->values_[i]);
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
HPhi* phi = new(zone()) HPhi(i, zone());
HPhi* phi = block->AddNewPhi(i);
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
}
phi->AddInput(other->values_[i]);
this->values_[i] = phi;
block->AddPhi(phi);
}
}
}
@ -9288,10 +9426,9 @@ HEnvironment* HEnvironment::CopyWithoutHistory() const {
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
HPhi* phi = new(zone()) HPhi(i, zone());
HPhi* phi = loop_header->AddNewPhi(i);
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
loop_header->AddPhi(phi);
}
new_env->ClearHistory();
return new_env;

77
deps/v8/src/hydrogen.h

@ -142,7 +142,8 @@ class HBasicBlock: public ZoneObject {
}
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HSimulate* AddSimulate(BailoutId ast_id,
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr);
@ -453,6 +454,10 @@ class HGraph: public ZoneObject {
uint32_instructions_->Add(instr, zone());
}
void IncrementInNoSideEffectsScope() { no_side_effects_scope_count_++; }
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
private:
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
@ -498,6 +503,7 @@ class HGraph: public ZoneObject {
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
int maximum_environment_size_;
int no_side_effects_scope_count_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@ -970,8 +976,7 @@ class HGraphBuilder {
explicit HGraphBuilder(CompilationInfo* info)
: info_(info),
graph_(NULL),
current_block_(NULL),
no_side_effects_scope_count_(0) {}
current_block_(NULL) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@ -1186,16 +1191,7 @@ class HGraphBuilder {
AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8)));
}
void AddSimulate(BailoutId id,
RemovableSimulate removable = FIXED_SIMULATE);
void IncrementInNoSideEffectsScope() {
no_side_effects_scope_count_++;
}
void DecrementInNoSideEffectsScope() {
no_side_effects_scope_count_--;
}
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
protected:
virtual bool BuildGraph() = 0;
@ -1258,10 +1254,10 @@ class HGraphBuilder {
HLoadNamedField* BuildLoadNamedField(
HValue* object,
HObjectAccess access,
HValue* typecheck = NULL);
HInstruction* BuildLoadStringLength(HValue* object, HValue* typecheck = NULL);
HValue* typecheck);
HInstruction* BuildLoadStringLength(HValue* object, HValue* typecheck);
HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
@ -1270,7 +1266,8 @@ class HGraphBuilder {
void PushAndAdd(HInstruction* instr);
void FinishExitWithHardDeoptimization(HBasicBlock* continuation);
void FinishExitWithHardDeoptimization(const char* reason,
HBasicBlock* continuation);
void AddIncrementCounter(StatsCounter* counter,
HValue* context);
@ -1374,10 +1371,10 @@ class HGraphBuilder {
void Else();
void End();
void Deopt();
void ElseDeopt() {
void Deopt(const char* reason);
void ElseDeopt(const char* reason) {
Else();
Deopt();
Deopt(reason);
}
void Return(HValue* value);
@ -1441,20 +1438,6 @@ class HGraphBuilder {
bool finished_;
};
class NoObservableSideEffectsScope {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
builder_->IncrementInNoSideEffectsScope();
}
~NoObservableSideEffectsScope() {
builder_->DecrementInNoSideEffectsScope();
}
private:
HGraphBuilder* builder_;
};
HValue* BuildNewElementsCapacity(HValue* old_capacity);
void BuildNewSpaceArrayCheck(HValue* length,
@ -1576,19 +1559,18 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
int no_side_effects_scope_count_;
};
template<>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
Deoptimizer::BailoutType type) {
const char* reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
}
if (current_block()->IsDeoptimizing()) return NULL;
HDeoptimize* instr = New<HDeoptimize>(type);
HDeoptimize* instr = New<HDeoptimize>(reason, type);
AddInstruction(instr);
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_inserted()->Increment();
@ -1601,8 +1583,8 @@ inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
template<>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
Deoptimizer::BailoutType type) {
return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(type));
const char* reason, Deoptimizer::BailoutType type) {
return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(reason, type));
}
@ -2054,7 +2036,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Property* expr,
Handle<Map> map);
void AddCheckMap(HValue* object, Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
void BuildStoreNamed(Expression* expression,
BailoutId id,
@ -2329,6 +2311,21 @@ class HTracer: public Malloced {
};
class NoObservableSideEffectsScope {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
builder_->graph()->IncrementInNoSideEffectsScope();
}
~NoObservableSideEffectsScope() {
builder_->graph()->DecrementInNoSideEffectsScope();
}
private:
HGraphBuilder* builder_;
};
} } // namespace v8::internal
#endif // V8_HYDROGEN_H_

685
deps/v8/src/i18n.cc

@ -29,33 +29,85 @@
#include "i18n.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/curramt.h"
#include "unicode/dcfmtsym.h"
#include "unicode/decimfmt.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
#include "unicode/ucol.h"
#include "unicode/ucurr.h"
#include "unicode/unum.h"
#include "unicode/uversion.h"
namespace v8 {
namespace internal {
namespace {
bool ExtractStringSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
icu::UnicodeString* setting) {
Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
MaybeObject* maybe_object = options->GetProperty(*str);
Object* object;
if (maybe_object->ToObject(&object) && object->IsString()) {
v8::String::Utf8Value utf8_string(
v8::Utils::ToLocal(Handle<String>(String::cast(object))));
*setting = icu::UnicodeString::fromUTF8(*utf8_string);
return true;
}
return false;
}
bool ExtractIntegerSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
int32_t* value) {
Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
MaybeObject* maybe_object = options->GetProperty(*str);
Object* object;
if (maybe_object->ToObject(&object) && object->IsNumber()) {
object->ToInt32(value);
return true;
}
return false;
}
bool ExtractBooleanSetting(Isolate* isolate,
Handle<JSObject> options,
const char* key,
bool* value) {
Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
MaybeObject* maybe_object = options->GetProperty(*str);
Object* object;
if (maybe_object->ToObject(&object) && object->IsBoolean()) {
*value = object->BooleanValue();
return true;
}
return false;
}
icu::SimpleDateFormat* CreateICUDateFormat(
Isolate* isolate,
const icu::Locale& icu_locale,
Handle<Object> options) {
Handle<JSObject> options) {
// Create time zone as specified by the user. We have to re-create time zone
// since calendar takes ownership.
icu::TimeZone* tz = NULL;
MaybeObject* maybe_object = options->GetProperty(
*isolate->factory()->NewStringFromAscii(CStrVector("timeZone")));
Object* timezone;
if (maybe_object->ToObject(&timezone) && timezone->IsString()) {
v8::String::Utf8Value utf8_timezone(
v8::Utils::ToLocal(Handle<String>(String::cast(timezone))));
icu::UnicodeString u_timezone(icu::UnicodeString::fromUTF8(*utf8_timezone));
tz = icu::TimeZone::createTimeZone(u_timezone);
icu::UnicodeString timezone;
if (ExtractStringSetting(isolate, options, "timeZone", &timezone)) {
tz = icu::TimeZone::createTimeZone(timezone);
} else {
tz = icu::TimeZone::createDefault();
}
@ -68,18 +120,13 @@ icu::SimpleDateFormat* CreateICUDateFormat(
// Make formatter from skeleton. Calendar and numbering system are added
// to the locale as Unicode extension (if they were specified at all).
icu::SimpleDateFormat* date_format = NULL;
Object* skeleton;
maybe_object = options->GetProperty(
*isolate->factory()->NewStringFromAscii(CStrVector("skeleton")));
if (maybe_object->ToObject(&skeleton) && skeleton->IsString()) {
v8::String::Utf8Value utf8_skeleton(
v8::Utils::ToLocal(Handle<String>(String::cast(skeleton))));
icu::UnicodeString u_skeleton(icu::UnicodeString::fromUTF8(*utf8_skeleton));
icu::UnicodeString skeleton;
if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
icu::DateTimePatternGenerator* generator =
icu::DateTimePatternGenerator::createInstance(icu_locale, status);
icu::UnicodeString pattern;
if (U_SUCCESS(status)) {
pattern = generator->getBestPattern(u_skeleton, status);
pattern = generator->getBestPattern(skeleton, status);
delete generator;
}
@ -99,7 +146,7 @@ icu::SimpleDateFormat* CreateICUDateFormat(
}
void SetResolvedSettings(Isolate* isolate,
void SetResolvedDateSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::SimpleDateFormat* date_format,
Handle<JSObject> resolved) {
@ -217,6 +264,473 @@ Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
field));
}
icu::DecimalFormat* CreateICUNumberFormat(
Isolate* isolate,
const icu::Locale& icu_locale,
Handle<JSObject> options) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
icu::DecimalFormat* number_format = NULL;
icu::UnicodeString style;
icu::UnicodeString currency;
if (ExtractStringSetting(isolate, options, "style", &style)) {
if (style == UNICODE_STRING_SIMPLE("currency")) {
icu::UnicodeString display;
ExtractStringSetting(isolate, options, "currency", &currency);
ExtractStringSetting(isolate, options, "currencyDisplay", &display);
#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
icu::NumberFormat::EStyles format_style;
if (display == UNICODE_STRING_SIMPLE("code")) {
format_style = icu::NumberFormat::kIsoCurrencyStyle;
} else if (display == UNICODE_STRING_SIMPLE("name")) {
format_style = icu::NumberFormat::kPluralCurrencyStyle;
} else {
format_style = icu::NumberFormat::kCurrencyStyle;
}
#else // ICU version is 4.8 or above (we ignore versions below 4.0).
UNumberFormatStyle format_style;
if (display == UNICODE_STRING_SIMPLE("code")) {
format_style = UNUM_CURRENCY_ISO;
} else if (display == UNICODE_STRING_SIMPLE("name")) {
format_style = UNUM_CURRENCY_PLURAL;
} else {
format_style = UNUM_CURRENCY;
}
#endif
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createInstance(icu_locale, format_style, status));
} else if (style == UNICODE_STRING_SIMPLE("percent")) {
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createPercentInstance(icu_locale, status));
if (U_FAILURE(status)) {
delete number_format;
return NULL;
}
// Make sure 1.1% doesn't go into 2%.
number_format->setMinimumFractionDigits(1);
} else {
// Make a decimal instance by default.
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createInstance(icu_locale, status));
}
}
if (U_FAILURE(status)) {
delete number_format;
return NULL;
}
// Set all options.
if (!currency.isEmpty()) {
number_format->setCurrency(currency.getBuffer(), status);
}
int32_t digits;
if (ExtractIntegerSetting(
isolate, options, "minimumIntegerDigits", &digits)) {
number_format->setMinimumIntegerDigits(digits);
}
if (ExtractIntegerSetting(
isolate, options, "minimumFractionDigits", &digits)) {
number_format->setMinimumFractionDigits(digits);
}
if (ExtractIntegerSetting(
isolate, options, "maximumFractionDigits", &digits)) {
number_format->setMaximumFractionDigits(digits);
}
bool significant_digits_used = false;
if (ExtractIntegerSetting(
isolate, options, "minimumSignificantDigits", &digits)) {
number_format->setMinimumSignificantDigits(digits);
significant_digits_used = true;
}
if (ExtractIntegerSetting(
isolate, options, "maximumSignificantDigits", &digits)) {
number_format->setMaximumSignificantDigits(digits);
significant_digits_used = true;
}
number_format->setSignificantDigitsUsed(significant_digits_used);
bool grouping;
if (ExtractBooleanSetting(isolate, options, "useGrouping", &grouping)) {
number_format->setGroupingUsed(grouping);
}
// Set rounding mode.
number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
return number_format;
}
void SetResolvedNumberSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::DecimalFormat* number_format,
Handle<JSObject> resolved) {
icu::UnicodeString pattern;
number_format->toPattern(pattern);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
isolate->factory()->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length())),
NONE,
kNonStrictMode);
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("currency")),
isolate->factory()->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
currency.length())),
NONE,
kNonStrictMode);
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
// to assume that for given locale NumberingSystem constructor produces the
// same digits as NumberFormat/Calendar would.
UErrorCode status = U_ZERO_ERROR;
icu::NumberingSystem* numbering_system =
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->NewStringFromAscii(CStrVector(ns)),
NONE,
kNonStrictMode);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->undefined_value(),
NONE,
kNonStrictMode);
}
delete numbering_system;
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
CStrVector("minimumIntegerDigits")),
isolate->factory()->NewNumberFromInt(
number_format->getMinimumIntegerDigits()),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
CStrVector("minimumFractionDigits")),
isolate->factory()->NewNumberFromInt(
number_format->getMinimumFractionDigits()),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
CStrVector("maximumFractionDigits")),
isolate->factory()->NewNumberFromInt(
number_format->getMaximumFractionDigits()),
NONE,
kNonStrictMode);
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
if (resolved->HasLocalProperty(*key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits")),
isolate->factory()->NewNumberFromInt(
number_format->getMinimumSignificantDigits()),
NONE,
kNonStrictMode);
}
key = isolate->factory()->NewStringFromAscii(
CStrVector("maximumSignificantDigits"));
if (resolved->HasLocalProperty(*key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
CStrVector("maximumSignificantDigits")),
isolate->factory()->NewNumberFromInt(
number_format->getMaximumSignificantDigits()),
NONE,
kNonStrictMode);
}
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
kNonStrictMode);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
kNonStrictMode);
}
}
icu::Collator* CreateICUCollator(
Isolate* isolate,
const icu::Locale& icu_locale,
Handle<JSObject> options) {
// Make collator from options.
icu::Collator* collator = NULL;
UErrorCode status = U_ZERO_ERROR;
collator = icu::Collator::createInstance(icu_locale, status);
if (U_FAILURE(status)) {
delete collator;
return NULL;
}
// Set flags first, and then override them with sensitivity if necessary.
bool numeric;
if (ExtractBooleanSetting(isolate, options, "numeric", &numeric)) {
collator->setAttribute(
UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF, status);
}
// Normalization is always on, by the spec. We are free to optimize
// if the strings are already normalized (but we don't have a way to tell
// that right now).
collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
icu::UnicodeString case_first;
if (ExtractStringSetting(isolate, options, "caseFirst", &case_first)) {
if (case_first == UNICODE_STRING_SIMPLE("upper")) {
collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
} else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
} else {
// Default (false/off).
collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
}
}
icu::UnicodeString sensitivity;
if (ExtractStringSetting(isolate, options, "sensitivity", &sensitivity)) {
if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
collator->setStrength(icu::Collator::PRIMARY);
} else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
collator->setStrength(icu::Collator::SECONDARY);
} else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
collator->setStrength(icu::Collator::PRIMARY);
collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
} else {
// variant (default)
collator->setStrength(icu::Collator::TERTIARY);
}
}
bool ignore;
if (ExtractBooleanSetting(isolate, options, "ignorePunctuation", &ignore)) {
if (ignore) {
collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
}
}
return collator;
}
void SetResolvedCollatorSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::Collator* collator,
Handle<JSObject> resolved) {
UErrorCode status = U_ZERO_ERROR;
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numeric")),
isolate->factory()->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
NONE,
kNonStrictMode);
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("lower")),
NONE,
kNonStrictMode);
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("upper")),
NONE,
kNonStrictMode);
break;
default:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("false")),
NONE,
kNonStrictMode);
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("primary")),
NONE,
kNonStrictMode);
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("case")),
NONE,
kNonStrictMode);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("base")),
NONE,
kNonStrictMode);
}
break;
}
case UCOL_SECONDARY:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("accent")),
NONE,
kNonStrictMode);
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
kNonStrictMode);
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
kNonStrictMode);
break;
default:
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("identical")),
NONE,
kNonStrictMode);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
kNonStrictMode);
}
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("ignorePunctuation")),
isolate->factory()->ToBoolean(collator->getAttribute(
UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
NONE,
kNonStrictMode);
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
uloc_toLanguageTag(
icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
kNonStrictMode);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
kNonStrictMode);
}
}
} // namespace
@ -261,9 +775,10 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
date_format = CreateICUDateFormat(isolate, no_extension_locale, options);
// Set resolved settings (pattern, numbering system, calendar).
SetResolvedSettings(isolate, no_extension_locale, date_format, resolved);
SetResolvedDateSettings(
isolate, no_extension_locale, date_format, resolved);
} else {
SetResolvedSettings(isolate, icu_locale, date_format, resolved);
SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
}
return date_format;
@ -273,8 +788,9 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Isolate* isolate,
Handle<JSObject> obj) {
if (obj->HasLocalProperty(
*isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")))) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
if (obj->HasLocalProperty(*key)) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@ -294,4 +810,129 @@ void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
object->Dispose(isolate);
}
icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
Isolate* isolate,
Handle<String> locale,
Handle<JSObject> options,
Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
if (bcp47_locale.length() != 0) {
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
return NULL;
}
icu_locale = icu::Locale(icu_result);
}
icu::DecimalFormat* number_format =
CreateICUNumberFormat(isolate, icu_locale, options);
if (!number_format) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
number_format = CreateICUNumberFormat(
isolate, no_extension_locale, options);
// Set resolved settings (pattern, numbering system).
SetResolvedNumberSettings(
isolate, no_extension_locale, number_format, resolved);
} else {
SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
}
return number_format;
}
icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
if (obj->HasLocalProperty(*key)) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
return NULL;
}
void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
Persistent<v8::Object>* object,
void* param) {
// First delete the hidden C++ object.
delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
object->Dispose(isolate);
}
icu::Collator* Collator::InitializeCollator(
Isolate* isolate,
Handle<String> locale,
Handle<JSObject> options,
Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
if (bcp47_locale.length() != 0) {
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
return NULL;
}
icu_locale = icu::Locale(icu_result);
}
icu::Collator* collator = CreateICUCollator(isolate, icu_locale, options);
if (!collator) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
collator = CreateICUCollator(isolate, no_extension_locale, options);
// Set resolved settings (pattern, numbering system).
SetResolvedCollatorSettings(
isolate, no_extension_locale, collator, resolved);
} else {
SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
}
return collator;
}
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("collator"));
if (obj->HasLocalProperty(*key)) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
return NULL;
}
void Collator::DeleteCollator(v8::Isolate* isolate,
Persistent<v8::Object>* object,
void* param) {
// First delete the hidden C++ object.
delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
object->Dispose(isolate);
}
} } // namespace v8::internal

50
deps/v8/src/i18n.h

@ -33,6 +33,8 @@
#include "v8.h"
namespace U_ICU_NAMESPACE {
class Collator;
class DecimalFormat;
class SimpleDateFormat;
}
@ -51,6 +53,7 @@ class I18N {
I18N();
};
class DateFormat {
public:
// Create a formatter for the specificied locale and options. Returns the
@ -74,6 +77,53 @@ class DateFormat {
DateFormat();
};
class NumberFormat {
public:
// Create a formatter for the specificied locale and options. Returns the
// resolved settings for the locale / options.
static icu::DecimalFormat* InitializeNumberFormat(
Isolate* isolate,
Handle<String> locale,
Handle<JSObject> options,
Handle<JSObject> resolved);
// Unpacks number format object from corresponding JavaScript object.
static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
Handle<JSObject> obj);
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
static void DeleteNumberFormat(v8::Isolate* isolate,
Persistent<v8::Object>* object,
void* param);
private:
NumberFormat();
};
class Collator {
public:
// Create a collator for the specificied locale and options. Returns the
// resolved settings for the locale / options.
static icu::Collator* InitializeCollator(
Isolate* isolate,
Handle<String> locale,
Handle<JSObject> options,
Handle<JSObject> resolved);
// Unpacks collator object from corresponding JavaScript object.
static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
// Release memory we allocated for the Collator once the JS object that holds
// the pointer gets garbage collected.
static void DeleteCollator(v8::Isolate* isolate,
Persistent<v8::Object>* object,
void* param);
private:
Collator();
};
} } // namespace v8::internal
#endif // V8_I18N_H_

55
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3956,11 +3956,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
__ mov(number_string_cache,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
@ -5010,9 +5006,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@ -5032,18 +5025,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
Label miss;
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
__ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(
scratch, times_pointer_size, roots_array_start));
__ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(
scratch, times_pointer_size, roots_array_start));
__ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
@ -5058,12 +5044,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
function);
__ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
__ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
} else {
// The constants for the code patching are based on no push instructions
// at the call site.
@ -5097,10 +5079,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
__ Set(eax, Immediate(0));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch,
times_pointer_size, roots_array_start), eax);
__ mov(eax, Immediate(0));
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@ -5119,10 +5099,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(
scratch, times_pointer_size, roots_array_start), eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@ -5875,11 +5853,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
// Load the string table.
Register string_table = c2;
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kStringTableRootIndex));
__ mov(string_table,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
__ LoadRoot(string_table, Heap::kStringTableRootIndex);
// Calculate capacity mask from the string table capacity.
Register mask = scratch2;
@ -5967,12 +5941,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
if (Serializer::enabled()) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
__ mov(scratch, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
__ LoadRoot(scratch, Heap::kHashSeedRootIndex);
__ SmiUntag(scratch);
__ add(scratch, character);
__ mov(hash, scratch);

342
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -355,6 +355,8 @@ bool LCodeGen::GenerateBody() {
if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
RecordAndUpdatePosition(instr->position());
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
@ -422,6 +424,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
int pos = instructions_->at(code->instruction_index())->position();
RecordAndUpdatePosition(pos);
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@ -763,37 +769,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
UNREACHABLE();
}
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// TODO(mstarzinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
if (value == NULL) {
int arguments_count = environment->values()->length() - translation_size;
translation->BeginArgumentsObject(arguments_count);
for (int i = 0; i < arguments_count; ++i) {
LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(translation_size + i),
environment->HasUint32ValueAt(translation_size + i));
}
continue;
}
AddToTranslation(translation,
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32) {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer) {
if (op == LEnvironment::materialization_marker()) {
int object_index = (*object_index_pointer)++;
if (environment->ObjectIsDuplicateAt(object_index)) {
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
translation->DuplicateObject(dupe_of);
return;
}
int object_length = environment->ObjectLengthAt(object_index);
if (environment->ObjectIsArgumentsAt(object_index)) {
translation->BeginArgumentsObject(object_length);
} else {
translation->BeginCapturedObject(object_length);
}
int dematerialized_index = *dematerialized_index_pointer;
int env_offset = environment->translation_size() + dematerialized_index;
*dematerialized_index_pointer += object_length;
for (int i = 0; i < object_length; ++i) {
LOperand* value = environment->values()->at(env_offset + i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(env_offset + i),
environment->HasUint32ValueAt(env_offset + i),
object_index_pointer,
dematerialized_index_pointer);
}
return;
}
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@ -984,7 +1010,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&done);
}
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
@ -1168,6 +1194,14 @@ void LCodeGen::RecordPosition(int position) {
}
void LCodeGen::RecordAndUpdatePosition(int position) {
if (position >= 0 && position != old_position_) {
masm()->positions_recorder()->RecordPosition(position);
old_position_ = position;
}
}
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@ -2199,6 +2233,17 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
if (cc == no_condition) {
__ jmp(chunk_->GetAssemblyLabel(false_block));
} else {
__ j(cc, chunk_->GetAssemblyLabel(false_block));
}
}
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32() || r.IsDouble()) {
@ -2449,6 +2494,51 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (instr->hydrogen()->representation().IsTagged()) {
Register input_reg = ToRegister(instr->object());
__ cmp(input_reg, factory()->the_hole_value());
EmitBranch(instr, equal);
return;
}
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
} else {
// Put the value to the top of stack
X87Register src = ToX87Register(instr->object());
X87LoadForUsage(src);
__ fld(0);
__ fld(0);
__ FCmp();
Label ok;
__ j(parity_even, &ok);
__ fstp(0);
EmitFalseBranch(instr, no_condition);
__ bind(&ok);
}
__ sub(esp, Immediate(kDoubleSize));
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
__ movdbl(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
__ add(esp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
EmitBranch(instr, equal);
}
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@ -3088,47 +3178,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
void LCodeGen::EmitLoadFieldOrConstant(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
__ mov(result, FieldOperand(object, offset + type->instance_size()));
} else {
// Non-negative property indices are in the properties array.
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsConstant()) {
Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
__ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
Heap* heap = type->GetHeap();
while (*current != heap->null_value()) {
__ LoadHeapObject(result, current);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Handle<Map>(current->map()));
DeoptimizeIf(not_equal, env);
current =
Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
}
__ mov(result, factory()->undefined_value());
}
}
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@ -3147,68 +3196,6 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
// prototype chain, which causes unbounded code generation.
static bool CompactEmit(SmallMapList* list,
Handle<String> name,
int i,
Isolate* isolate) {
Handle<Map> map = list->at(i);
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
return lookup.IsField() || lookup.IsConstant();
}
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(no_condition, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
Label done;
bool all_are_compact = true;
for (int i = 0; i < map_count; ++i) {
if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
all_are_compact = false;
break;
}
}
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMap(object, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
} else {
Label next;
bool compact = all_are_compact ? true :
CompactEmit(instr->hydrogen()->types(), name, i, isolate());
__ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
__ bind(&check_passed);
EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
__ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
if (need_generic) {
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@ -4161,6 +4148,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
// Set the context register to a GC-safe fake value. Clobbering it is
// OK because this instruction is marked as a call.
__ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@ -4169,6 +4159,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
// Set the context register to a GC-safe fake value. Clobbering it is
// OK because this instruction is marked as a call.
__ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@ -4177,6 +4170,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
// Set the context register to a GC-safe fake value. Clobbering it is
// OK because this instruction is marked as a call.
__ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@ -5049,13 +5045,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
bool convert_hole = false;
HValue* change_input = instr->hydrogen()->value();
if (change_input->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(change_input);
convert_hole = load->UsesMustHandleHole();
}
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (!use_sse2) {
// Put the value to the top of stack
@ -5063,54 +5052,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
X87LoadForUsage(src);
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ ucomisd(input_reg, input_reg);
} else {
__ fld(0);
__ fld(0);
__ FCmp();
}
__ j(parity_odd, &no_special_nan_handling);
__ sub(esp, Immediate(kDoubleSize));
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(MemOperand(esp, 0), input_reg);
} else {
__ fld(0);
__ fstp_d(MemOperand(esp, 0));
}
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
Label canonicalize;
__ j(not_equal, &canonicalize);
__ add(esp, Immediate(kDoubleSize));
__ mov(reg, factory()->the_hole_value());
if (!use_sse2) {
__ fstp(0);
}
__ jmp(&done);
__ bind(&canonicalize);
__ add(esp, Immediate(kDoubleSize));
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(input_reg, Operand::StaticVariable(nan));
} else {
__ fstp(0);
__ fld_d(Operand::StaticVariable(nan));
}
}
__ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
@ -5126,7 +5067,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ bind(&done);
}
@ -5176,23 +5116,21 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
X87Register res_reg,
bool allow_undefined_as_nan,
bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
X87PrepareToWrite(res_reg);
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!allow_undefined_as_nan) {
if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@ -5200,10 +5138,6 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
__ j(equal, &convert, Label::kNear);
__ cmp(input_reg, factory()->the_hole_value());
}
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@ -5250,22 +5184,20 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
bool allow_undefined_as_nan,
bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!allow_undefined_as_nan) {
if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@ -5273,10 +5205,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
__ j(equal, &convert, Label::kNear);
__ cmp(input_reg, factory()->the_hole_value());
}
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@ -5601,16 +5529,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->representation().IsSmi()) {
mode = NUMBER_CANDIDATE_IS_SMI;
} else if (value->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(value);
if (load->UsesMustHandleHole()) {
mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
}
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
@ -5618,7 +5539,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->can_convert_undefined_to_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@ -5626,7 +5547,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
ToX87Register(instr->result()),
instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->can_convert_undefined_to_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@ -6356,6 +6277,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(no_condition, instr->environment(), type);
}

22
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -71,7 +71,8 @@ class LCodeGen BASE_EMBEDDED {
x87_stack_depth_(0),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -281,10 +282,13 @@ class LCodeGen BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32);
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -319,10 +323,14 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
Register temp,
@ -369,12 +377,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
void EmitLoadFieldOrConstant(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@ -448,6 +450,8 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)

122
deps/v8/src/ia32/lithium-ia32.cc

@ -645,8 +645,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
&argument_index_accumulator,
&objects_to_materialize));
return instr;
}
@ -868,7 +870,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
if (phi->merged_index() < last_environment->length()) {
if (phi->HasMergedIndex()) {
last_environment->SetValueAt(phi->merged_index(), phi);
}
}
@ -938,6 +940,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@ -953,11 +956,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
argument_index_accumulator,
objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@ -972,16 +977,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
needs_arguments_object_materialization = true;
op = NULL;
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@ -992,15 +997,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
if (needs_arguments_object_materialization) {
HArgumentsObject* arguments = hydrogen_env->entry() == NULL
? graph()->GetArgumentsObject()
: hydrogen_env->entry()->arguments_object();
ASSERT(arguments->IsLinked());
for (int i = 1; i < arguments->arguments_count(); ++i) {
HValue* value = arguments->arguments_values()->at(i);
ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
LOperand* op = UseAny(value);
for (int i = object_index; i < objects_to_materialize->length(); ++i) {
HValue* object_to_materialize = objects_to_materialize->at(i);
int previously_materialized_object = -1;
for (int prev = 0; prev < i; ++prev) {
if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
previously_materialized_object = prev;
break;
}
}
int length = object_to_materialize->OperandCount();
bool is_arguments = object_to_materialize->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
continue;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* value = object_to_materialize->OperandAt(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else {
ASSERT(!value->IsPushArgument());
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@ -1392,9 +1415,8 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->right()->representation().Equals(
instr->left()->representation()));
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@ -1502,8 +1524,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(left->representation().IsSmiOrInteger32());
ASSERT(right->representation().Equals(left->representation()));
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
@ -1579,9 +1601,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->right()->representation().Equals(
instr->left()->representation()));
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@ -1601,9 +1622,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->right()->representation().Equals(
instr->left()->representation()));
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
@ -1636,9 +1656,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->right()->representation().Equals(
instr->left()->representation()));
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@ -1693,9 +1712,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(
instr->right()->representation()));
ASSERT(instr->left()->representation().Equals(r));
ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@ -1725,6 +1743,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return new(zone()) LCmpHoleAndBranch(object);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@ -2182,25 +2207,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* obj = UseFixed(instr->object(), edx);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@ -2567,6 +2573,12 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
// There are no real uses of a captured object.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());

63
deps/v8/src/ia32/lithium-ia32.h

@ -75,6 +75,7 @@ class LCodeGen;
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@ -127,7 +128,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@ -209,7 +209,10 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false) { }
bit_field_(IsCallBits::encode(false)) {
set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@ -248,19 +251,28 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
// The 31 bits PositionBits is used to store the int position value. And the
// position value may be RelocInfo::kNoPosition (-1). The accessor always
// +1/-1 so that the encoded value of position in bit_field_ is always >= 0
// and can fit into the 31 bits PositionBits.
void set_position(int pos) {
bit_field_ = PositionBits::update(bit_field_, pos + 1);
}
int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
void MarkAsCall() { is_call_ = true; }
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const {
return is_call_ ||
return IsCall() ||
(!CpuFeatures::IsSupported(SSE2) &&
// We only have rudimentary X87Stack tracking, thus in general
// cannot handle deoptimization nor phi-nodes.
@ -293,10 +305,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
bool is_call_;
int bit_field_;
};
@ -849,8 +864,20 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
};
class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
};
@ -1509,21 +1536,6 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
@ -2857,7 +2869,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);

69
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -54,6 +54,60 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
mov(destination, value);
return;
}
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(destination, Immediate(index));
mov(destination, Operand::StaticArray(destination,
times_pointer_size,
roots_array_start));
}
void MacroAssembler::StoreRoot(Register source,
Register scratch,
Heap::RootListIndex index) {
ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
source);
}
void MacroAssembler::CompareRoot(Register with,
Register scratch,
Heap::RootListIndex index) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
cmp(with, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::InNewSpace(
Register object,
Register scratch,
@ -432,21 +486,6 @@ void MacroAssembler::SafePush(const Immediate& x) {
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
// see ROOT_ACCESSOR macro in factory.h
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
// see ROOT_ACCESSOR macro in factory.h
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {

13
deps/v8/src/ia32/macro-assembler-ia32.h

@ -61,6 +61,15 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
@ -362,10 +371,6 @@ class MacroAssembler: public Assembler {
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
// Compare against a known root, e.g. undefined, null, true, ...
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);

2
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2479,6 +2479,8 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(eax, &not_smi);
// Branchless abs implementation, refer to below:
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);

1
deps/v8/src/isolate.cc

@ -1783,7 +1783,6 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),

17
deps/v8/src/isolate.h

@ -661,9 +661,9 @@ class Isolate {
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
// Bottom JS entry (see StackTracer::Trace in sampler.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
// Bottom JS entry.
Address js_entry_sp() {
return thread_local_top_.js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
@ -1062,13 +1062,6 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
bool context_exit_happened() {
return context_exit_happened_;
}
void set_context_exit_happened(bool context_exit_happened) {
context_exit_happened_ = context_exit_happened;
}
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
@ -1317,10 +1310,6 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
bool context_exit_happened_;
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;

1
deps/v8/src/json-stringifier.h

@ -601,6 +601,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
for (int i = 0; i < length; i++) {
if (i > 0) Append(',');
Handle<Object> element = Object::GetElement(object, i);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION);
if (element->IsUndefined()) {
AppendAscii("null");
} else {

48
deps/v8/src/lithium.h

@ -533,6 +533,7 @@ class LEnvironment: public ZoneObject {
values_(value_count, zone),
is_tagged_(value_count, zone),
is_uint32_(value_count, zone),
object_mapping_(0, zone),
outer_(outer),
entry_(entry),
zone_(zone) { }
@ -573,6 +574,38 @@ class LEnvironment: public ZoneObject {
return is_uint32_.Contains(index);
}
void AddNewObject(int length, bool is_arguments) {
uint32_t encoded = LengthOrDupeField::encode(length) |
IsArgumentsField::encode(is_arguments) |
IsDuplicateField::encode(false);
object_mapping_.Add(encoded, zone());
}
void AddDuplicateObject(int dupe_of) {
uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
IsDuplicateField::encode(true);
object_mapping_.Add(encoded, zone());
}
int ObjectDuplicateOfAt(int index) {
ASSERT(ObjectIsDuplicateAt(index));
return LengthOrDupeField::decode(object_mapping_[index]);
}
int ObjectLengthAt(int index) {
ASSERT(!ObjectIsDuplicateAt(index));
return LengthOrDupeField::decode(object_mapping_[index]);
}
bool ObjectIsArgumentsAt(int index) {
ASSERT(!ObjectIsDuplicateAt(index));
return IsArgumentsField::decode(object_mapping_[index]);
}
bool ObjectIsDuplicateAt(int index) {
return IsDuplicateField::decode(object_mapping_[index]);
}
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
@ -587,6 +620,14 @@ class LEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
// Marker value indicating a de-materialized object.
static LOperand* materialization_marker() { return NULL; }
// Encoding used for the object_mapping map below.
class LengthOrDupeField : public BitField<int, 0, 30> { };
class IsArgumentsField : public BitField<bool, 30, 1> { };
class IsDuplicateField : public BitField<bool, 31, 1> { };
private:
Handle<JSFunction> closure_;
FrameType frame_type_;
@ -603,6 +644,10 @@ class LEnvironment: public ZoneObject {
ZoneList<LOperand*> values_;
GrowableBitVector is_tagged_;
GrowableBitVector is_uint32_;
// Map with encoded information about materialization_marker operands.
ZoneList<uint32_t> object_mapping_;
LEnvironment* outer_;
HEnterInlined* entry_;
Zone* zone_;
@ -754,8 +799,7 @@ int StackSlotOffset(int index);
enum NumberUntagDMode {
NUMBER_CANDIDATE_IS_SMI,
NUMBER_CANDIDATE_IS_ANY_TAGGED,
NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE
NUMBER_CANDIDATE_IS_ANY_TAGGED
};

1
deps/v8/src/liveedit.cc

@ -1290,6 +1290,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
shared_info->DisableOptimization(kLiveEdit);
}
if (shared_info->debug_info()->IsDebugInfo()) {

2
deps/v8/src/mips/codegen-mips.cc

@ -205,7 +205,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ Allocate(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
__ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
// t2: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.

180
deps/v8/src/mips/lithium-codegen-mips.cc

@ -268,6 +268,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
RecordAndUpdatePosition(instr->position());
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
@ -281,6 +283,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
int pos = instructions_->at(code->instruction_index())->position();
RecordAndUpdatePosition(pos);
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@ -591,37 +597,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// TODO(mstarzinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
if (value == NULL) {
int arguments_count = environment->values()->length() - translation_size;
translation->BeginArgumentsObject(arguments_count);
for (int i = 0; i < arguments_count; ++i) {
LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(translation_size + i),
environment->HasUint32ValueAt(translation_size + i));
}
continue;
}
AddToTranslation(translation,
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32) {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer) {
if (op == LEnvironment::materialization_marker()) {
int object_index = (*object_index_pointer)++;
if (environment->ObjectIsDuplicateAt(object_index)) {
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
translation->DuplicateObject(dupe_of);
return;
}
int object_length = environment->ObjectLengthAt(object_index);
if (environment->ObjectIsArgumentsAt(object_index)) {
translation->BeginArgumentsObject(object_length);
} else {
translation->BeginCapturedObject(object_length);
}
int dematerialized_index = *dematerialized_index_pointer;
int env_offset = environment->translation_size() + dematerialized_index;
*dematerialized_index_pointer += object_length;
for (int i = 0; i < object_length; ++i) {
LOperand* value = environment->values()->at(env_offset + i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(env_offset + i),
environment->HasUint32ValueAt(env_offset + i),
object_index_pointer,
dematerialized_index_pointer);
}
return;
}
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@ -761,7 +787,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
if (info()->ShouldTrapOnDeopt()) {
Label skip;
if (cc != al) {
__ Branch(&skip, NegateCondition(cc), src1, src2);
@ -960,6 +986,14 @@ void LCodeGen::RecordPosition(int position) {
}
void LCodeGen::RecordAndUpdatePosition(int position) {
if (position >= 0 && position != old_position_) {
masm()->positions_recorder()->RecordPosition(position);
old_position_ = position;
}
}
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@ -1154,7 +1188,6 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
Register scratch,
LEnvironment* environment) {
ASSERT(!AreAliased(dividend, scratch, at, no_reg));
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
uint32_t divisor_abs = abs(divisor);
@ -2888,90 +2921,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
__ lw(result, FieldMemOperand(object, offset + type->instance_size()));
} else {
// Non-negative property indices are in the properties array.
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsConstant()) {
Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
__ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
Heap* heap = type->GetHeap();
while (*current != heap->null_value()) {
__ LoadHeapObject(result, current);
__ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
current =
Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
Label done;
__ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMapAndBranch(object_map, map, &check_passed, eq, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(al, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next;
__ Branch(&next);
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ Branch(&done);
__ bind(&next);
}
}
if (need_generic) {
__ li(a2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@ -4456,12 +4405,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
} else {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
@ -4767,7 +4717,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Move(reg, scratch0(), input_reg);
Label canonicalize;
__ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
__ li(reg, factory()->the_hole_value());
__ li(reg, factory()->undefined_value());
__ Branch(&done);
__ bind(&canonicalize);
__ Move(input_reg,
@ -5172,7 +5122,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
Handle<Cell> cell = isolate()->factory()->NewCell(target);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
@ -5234,11 +5184,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
Handle<Map> map = map_set->last();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
if (instr->hydrogen()->has_migration_target()) {
__ Branch(deferred->entry());
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
DeoptimizeIf(al, instr->environment());
DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
}
__ bind(&success);
@ -5675,6 +5625,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}

19
deps/v8/src/mips/lithium-codegen-mips.h

@ -65,7 +65,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -290,10 +291,13 @@ class LCodeGen BASE_EMBEDDED {
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(Translation* translation,
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32);
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -319,6 +323,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@ -373,12 +378,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@ -435,6 +434,8 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,

91
deps/v8/src/mips/lithium-mips.cc

@ -598,8 +598,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
&argument_index_accumulator,
&objects_to_materialize));
return instr;
}
@ -818,7 +820,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
if (phi->merged_index() < last_environment->length()) {
if (phi->HasMergedIndex()) {
last_environment->SetValueAt(phi->merged_index(), phi);
}
}
@ -888,6 +890,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@ -903,11 +906,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
argument_index_accumulator,
objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@ -922,16 +927,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
needs_arguments_object_materialization = true;
op = NULL;
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@ -942,15 +947,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
if (needs_arguments_object_materialization) {
HArgumentsObject* arguments = hydrogen_env->entry() == NULL
? graph()->GetArgumentsObject()
: hydrogen_env->entry()->arguments_object();
ASSERT(arguments->IsLinked());
for (int i = 1; i < arguments->arguments_count(); ++i) {
HValue* value = arguments->arguments_values()->at(i);
ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
LOperand* op = UseAny(value);
for (int i = object_index; i < objects_to_materialize->length(); ++i) {
HValue* object_to_materialize = objects_to_materialize->at(i);
int previously_materialized_object = -1;
for (int prev = 0; prev < i; ++prev) {
if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
previously_materialized_object = prev;
break;
}
}
int length = object_to_materialize->OperandCount();
bool is_arguments = object_to_materialize->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
continue;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* value = object_to_materialize->OperandAt(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else {
ASSERT(!value->IsPushArgument());
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@ -1389,8 +1412,6 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegisterOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value());
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@ -1607,9 +1628,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(
instr->right()->representation()));
ASSERT(instr->left()->representation().Equals(r));
ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@ -2046,23 +2066,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), a0);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, v0), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), a0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
@ -2360,6 +2363,12 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
// There are no real uses of a captured object.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());

47
deps/v8/src/mips/lithium-mips.h

@ -126,7 +126,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@ -207,7 +206,10 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false) { }
bit_field_(IsCallBits::encode(false)) {
set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@ -246,20 +248,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
// The 31 bits PositionBits is used to store the int position value. And the
// position value may be RelocInfo::kNoPosition (-1). The accessor always
// +1/-1 so that the encoded value of position in bit_field_ is always >= 0
// and can fit into the 31 bits PositionBits.
void set_position(int pos) {
bit_field_ = PositionBits::update(bit_field_, pos + 1);
}
int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
void MarkAsCall() { is_call_ = true; }
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@ -283,10 +295,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
bool is_call_;
int bit_field_;
};
@ -1489,19 +1504,6 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedFieldPolymorphic(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@ -2690,7 +2692,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);

32
deps/v8/src/mips/macro-assembler-mips.cc

@ -2923,9 +2923,7 @@ void MacroAssembler::Allocate(int object_size,
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
li(topaddr, Operand(allocation_top));
li(obj_size_reg, Operand(object_size));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@ -2944,9 +2942,23 @@ void MacroAssembler::Allocate(int object_size,
lw(t9, MemOperand(topaddr, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
And(scratch2, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, scratch2, Operand(zero_reg));
li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(scratch2, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
Addu(scratch2, result, Operand(obj_size_reg));
Addu(scratch2, result, Operand(object_size));
Branch(gc_required, Ugreater, scratch2, Operand(t9));
sw(scratch2, MemOperand(topaddr));
@ -3014,6 +3026,20 @@ void MacroAssembler::Allocate(Register object_size,
lw(t9, MemOperand(topaddr, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
And(scratch2, result, Operand(kDoubleAlignmentMask));
Label aligned;
Branch(&aligned, eq, scratch2, Operand(zero_reg));
li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
sw(scratch2, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.

14
deps/v8/src/mips/macro-assembler-mips.h

@ -51,20 +51,6 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2
};
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.

84
deps/v8/src/objects-printer.cc

@ -37,9 +37,6 @@ namespace internal {
#ifdef OBJECT_PRINT
static const char* TypeToString(InstanceType type);
void MaybeObject::Print() {
Print(stdout);
}
@ -509,83 +506,12 @@ void JSModule::JSModulePrint(FILE* out) {
static const char* TypeToString(InstanceType type) {
switch (type) {
case INVALID_TYPE: return "INVALID";
case MAP_TYPE: return "MAP";
case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
case SYMBOL_TYPE: return "SYMBOL";
case STRING_TYPE: return "TWO_BYTE_STRING";
case ASCII_STRING_TYPE: return "ASCII_STRING";
case CONS_STRING_TYPE:
case CONS_ASCII_STRING_TYPE:
return "CONS_STRING";
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ASCII_STRING_TYPE:
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return "EXTERNAL_STRING";
case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return "SHORT_EXTERNAL_STRING";
case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING";
case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING";
case CONS_INTERNALIZED_STRING_TYPE: return "CONS_INTERNALIZED_STRING";
case CONS_ASCII_INTERNALIZED_STRING_TYPE:
return "CONS_ASCII_INTERNALIZED_STRING";
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
return "EXTERNAL_INTERNALIZED_STRING";
case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
return "SHORT_EXTERNAL_INTERNALIZED_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
case FREE_SPACE_TYPE: return "FREE_SPACE";
case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_INT_ARRAY";
case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
case FILLER_TYPE: return "FILLER";
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
case ODDBALL_TYPE: return "ODDBALL";
case CELL_TYPE: return "CELL";
case PROPERTY_CELL_TYPE: return "PROPERTY_CELL";
case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
case JS_GENERATOR_OBJECT_TYPE: return "JS_GENERATOR_OBJECT";
case JS_MODULE_TYPE: return "JS_MODULE";
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
case JS_ARRAY_TYPE: return "JS_ARRAY";
case JS_PROXY_TYPE: return "JS_PROXY";
case JS_SET_TYPE: return "JS_SET";
case JS_MAP_TYPE: return "JS_MAP";
case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
case JS_WEAK_SET_TYPE: return "JS_WEAK_SET";
case JS_REGEXP_TYPE: return "JS_REGEXP";
case JS_VALUE_TYPE: return "JS_VALUE";
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
case JS_DATA_VIEW_TYPE: return "JS_DATA_VIEW";
case FOREIGN_TYPE: return "FOREIGN";
case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
default: return "UNKNOWN";
#define TYPE_TO_STRING(TYPE) case TYPE: return #TYPE;
INSTANCE_TYPE_LIST(TYPE_TO_STRING)
#undef TYPE_TO_STRING
}
UNREACHABLE();
return "UNKNOWN"; // Keep the compiler happy.
}

17
deps/v8/src/objects.cc

@ -9222,6 +9222,7 @@ void JSFunction::MarkForLazyRecompilation() {
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
// No write barrier required, since the builtin is part of the root set.
@ -9232,10 +9233,8 @@ void JSFunction::MarkForParallelRecompilation() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
if (!FLAG_parallel_recompilation) {
JSFunction::MarkForLazyRecompilation();
return;
}
ASSERT(!shared()->is_generator());
ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Marking ");
PrintName();
@ -10637,7 +10636,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
case Translation::ARGUMENTS_OBJECT: {
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator.Next();
PrintF(out, "{object_index=%d}", object_index);
break;
}
case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
int args_length = iterator.Next();
PrintF(out, "{length=%d}", args_length);
break;
@ -11348,6 +11354,7 @@ bool DependentCode::Contains(DependencyGroup group, Code* code) {
void DependentCode::DeoptimizeDependentCodeGroup(
Isolate* isolate,
DependentCode::DependencyGroup group) {
ASSERT(AllowCodeDependencyChange::IsAllowed());
DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);

6
deps/v8/src/objects.h

@ -333,6 +333,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(CONS_STRING_TYPE) \
V(CONS_ASCII_STRING_TYPE) \
V(SLICED_STRING_TYPE) \
V(SLICED_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
@ -416,6 +417,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
@ -783,7 +786,6 @@ enum InstanceType {
// Pseudo-types
FIRST_TYPE = 0x0,
LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NAME_TYPE = FIRST_TYPE,
LAST_NAME_TYPE = SYMBOL_TYPE,
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
@ -1074,7 +1076,7 @@ class MaybeObject BASE_EMBEDDED {
"bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
"bad value context for arguments value") \
V(kBailedOutDueToDependentMap, "bailed out due to dependent map") \
V(kBailedOutDueToDependencyChange, "bailed out due to dependency change") \
V(kBailoutWasNotPrepared, "bailout was not prepared") \
V(kBinaryStubGenerateFloatingPointCode, \
"BinaryStub_GenerateFloatingPointCode") \

74
deps/v8/src/optimizing-compiler-thread.cc

@ -60,12 +60,25 @@ void OptimizingCompilerThread::Run() {
OS::Sleep(FLAG_parallel_recompilation_delay);
}
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_->Signal();
switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
case CONTINUE:
break;
case STOP:
if (FLAG_trace_parallel_recompilation) {
time_spent_total_ = OS::Ticks() - epoch;
}
stop_semaphore_->Signal();
return;
case FLUSH:
// The main thread is blocked, waiting for the stop semaphore.
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
Release_Store(&queue_length_, static_cast<AtomicWord>(0));
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
stop_semaphore_->Signal();
// Return to start of consumer loop.
continue;
}
int64_t compiling_start = 0;
@ -82,7 +95,9 @@ void OptimizingCompilerThread::Run() {
void OptimizingCompilerThread::CompileNext() {
OptimizingCompiler* optimizing_compiler = NULL;
input_queue_.Dequeue(&optimizing_compiler);
bool result = input_queue_.Dequeue(&optimizing_compiler);
USE(result);
ASSERT(result);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
// The function may have already been optimized by OSR. Simply continue.
@ -102,26 +117,61 @@ void OptimizingCompilerThread::CompileNext() {
}
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
while (input_queue_.Dequeue(&optimizing_compiler)) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_->Wait();
CompilationInfo* info = optimizing_compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
function->ReplaceCode(function->shared()->code());
}
delete info;
}
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
while (output_queue_.Dequeue(&optimizing_compiler)) {
CompilationInfo* info = optimizing_compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
function->ReplaceCode(function->shared()->code());
}
delete info;
}
}
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
FlushOutputQueue(true);
}
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
if (FLAG_parallel_recompilation_delay != 0) {
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
// This is used only for testing.
while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
InstallOptimizedFunctions();
} else {
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
while (input_queue_.Dequeue(&optimizing_compiler)) {
delete optimizing_compiler->info();
}
while (output_queue_.Dequeue(&optimizing_compiler)) {
delete optimizing_compiler->info();
}
FlushInputQueue(false);
FlushOutputQueue(false);
}
if (FLAG_trace_parallel_recompilation) {

11
deps/v8/src/optimizing-compiler-thread.h

@ -54,13 +54,13 @@ class OptimizingCompilerThread : public Thread {
install_mutex_(OS::CreateMutex()),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
void Run();
void Stop();
void CompileNext();
void Flush();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
@ -92,6 +92,13 @@ class OptimizingCompilerThread : public Thread {
}
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
void FlushInputQueue(bool restore_function_code);
void FlushOutputQueue(bool restore_function_code);
void CompileNext();
#ifdef DEBUG
int thread_id_;
Mutex* thread_id_mutex_;

2
deps/v8/src/profile-generator-inl.h

@ -92,6 +92,8 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
case OTHER:
case EXTERNAL:
return program_entry_;
case IDLE:
return idle_entry_;
default: return NULL;
}
}

6
deps/v8/src/profile-generator.cc

@ -220,7 +220,7 @@ double ProfileNode::GetTotalMillis() const {
void ProfileNode::Print(int indent) {
OS::Print("%5u %5u %*c %s%s #%d %d",
OS::Print("%5u %5u %*c %s%s %d #%d",
total_ticks_, self_ticks_,
indent, ' ',
entry_->name_prefix(),
@ -614,6 +614,8 @@ const char* const ProfileGenerator::kAnonymousFunctionName =
"(anonymous function)";
const char* const ProfileGenerator::kProgramEntryName =
"(program)";
const char* const ProfileGenerator::kIdleEntryName =
"(idle)";
const char* const ProfileGenerator::kGarbageCollectorEntryName =
"(garbage collector)";
const char* const ProfileGenerator::kUnresolvedFunctionName =
@ -624,6 +626,8 @@ ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles),
program_entry_(
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
idle_entry_(
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
gc_entry_(
profiles->NewCodeEntry(Logger::BUILTIN_TAG,
kGarbageCollectorEntryName)),

2
deps/v8/src/profile-generator.h

@ -342,6 +342,7 @@ class ProfileGenerator {
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
static const char* const kIdleEntryName;
static const char* const kGarbageCollectorEntryName;
// Used to represent frames for which we have no reliable way to
// detect function.
@ -353,6 +354,7 @@ class ProfileGenerator {
CpuProfilesCollection* profiles_;
CodeMap code_map_;
CodeEntry* program_entry_;
CodeEntry* idle_entry_;
CodeEntry* gc_entry_;
CodeEntry* unresolved_entry_;

219
deps/v8/src/runtime.cc

@ -71,7 +71,10 @@
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/curramt.h"
#include "unicode/datefmt.h"
#include "unicode/dcfmtsym.h"
#include "unicode/decimfmt.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
#include "unicode/locid.h"
@ -79,7 +82,11 @@
#include "unicode/numsys.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
#include "unicode/ucol.h"
#include "unicode/ucurr.h"
#include "unicode/uloc.h"
#include "unicode/unum.h"
#include "unicode/uversion.h"
#endif
@ -3013,6 +3020,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
ASSERT(frame->function()->is_compiled());
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
@ -8487,8 +8495,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
while (function->IsMarkedForParallelRecompilation() ||
function->IsInRecompileQueue() ||
while (function->IsInRecompileQueue() ||
function->IsMarkedForInstallingRecompiledCode()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
@ -11220,6 +11227,7 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
isolate);
ASSERT(!value->IsTheHole());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
@ -11234,12 +11242,15 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
Handle<Object> value(frame_inspector->GetExpression(i), isolate);
if (value->IsTheHole()) continue;
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(isolate,
target,
Handle<String>(scope_info->StackLocalName(i)),
Handle<Object>(frame_inspector->GetExpression(i), isolate),
value,
NONE,
kNonStrictMode),
Handle<JSObject>());
@ -11266,6 +11277,7 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
ASSERT(!frame->GetParameter(i)->IsTheHole());
HandleScope scope(isolate);
Handle<Object> value = GetProperty(
isolate, target, Handle<String>(scope_info->ParameterName(i)));
@ -11274,6 +11286,7 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
if (frame->GetExpression(i)->IsTheHole()) continue;
HandleScope scope(isolate);
Handle<Object> value = GetProperty(
isolate, target, Handle<String>(scope_info->StackLocalName(i)));
@ -12008,8 +12021,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
JavaScriptFrameIterator frame_it(isolate, id);
JavaScriptFrame* frame = frame_it.frame();
Handle<JSFunction> fun =
Handle<JSFunction>(frame->function());
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(frame->function()->shared());
Handle<SharedFunctionInfo>(fun->shared());
if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
return isolate->heap()->undefined_value();
}
Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
int len = 0;
@ -12022,6 +12042,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
int current_statement_pos = break_location_iterator.statement_position();
while (!break_location_iterator.Done()) {
if (break_location_iterator.pc() > frame->pc()) {
if (break_location_iterator.IsStepInLocation(isolate)) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
JSObject::SetElement(array, len,
@ -12029,6 +12050,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
NONE, kNonStrictMode);
len++;
}
}
// Advance iterator.
break_location_iterator.Next();
if (current_statement_pos !=
@ -13603,9 +13625,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
v8::Utils::ToLocal(local_object));
// Make object handle weak so we can delete the data format once GC kicks in.
wrapper.MakeWeak<void>(NULL, &DateFormat::DeleteDateFormat);
Handle<Object> result = Utils::OpenPersistent(wrapper);
wrapper.ClearAndLeak();
return *result;
return *local_object;
}
@ -13665,6 +13686,192 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
}
return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<ObjectTemplateInfo> number_format_template =
I18N::GetTemplate(isolate);
// Create an empty object wrapper.
bool has_pending_exception = false;
Handle<JSObject> local_object = Execution::InstantiateObject(
number_format_template, &has_pending_exception);
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
// Set number formatter as internal field of the resulting JS object.
icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat(
isolate, locale, options, resolved);
if (!number_format) return isolate->ThrowIllegalOperation();
local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
RETURN_IF_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
local_object,
isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")),
isolate->factory()->NewStringFromAscii(CStrVector("valid")),
NONE));
Persistent<v8::Object> wrapper(reinterpret_cast<v8::Isolate*>(isolate),
v8::Utils::ToLocal(local_object));
// Make object handle weak so we can delete the number format once GC kicks
// in.
wrapper.MakeWeak<void>(NULL, &NumberFormat::DeleteNumberFormat);
wrapper.ClearAndLeak();
return *local_object;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
bool has_pending_exception = false;
double value = Execution::ToNumber(number, &has_pending_exception)->Number();
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
icu::DecimalFormat* number_format =
NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
if (!number_format) return isolate->ThrowIllegalOperation();
icu::UnicodeString result;
number_format->format(value, result);
return *isolate->factory()->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(result.getBuffer()),
result.length()));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
icu::DecimalFormat* number_format =
NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
if (!number_format) return isolate->ThrowIllegalOperation();
UErrorCode status = U_ZERO_ERROR;
icu::Formattable result;
// ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
// to be part of Chrome.
// TODO(cira): Include currency parsing code using parseCurrency call.
// We need to check if the formatter parses all currencies or only the
// one it was constructed with (it will impact the API - how to return ISO
// code and the value).
number_format->parse(u_number, result, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
switch (result.getType()) {
case icu::Formattable::kDouble:
return *isolate->factory()->NewNumber(result.getDouble());
case icu::Formattable::kLong:
return *isolate->factory()->NewNumberFromInt(result.getLong());
case icu::Formattable::kInt64:
return *isolate->factory()->NewNumber(
static_cast<double>(result.getInt64()));
default:
return isolate->heap()->undefined_value();
}
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
// Create an empty object wrapper.
bool has_pending_exception = false;
Handle<JSObject> local_object = Execution::InstantiateObject(
collator_template, &has_pending_exception);
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
// Set collator as internal field of the resulting JS object.
icu::Collator* collator = Collator::InitializeCollator(
isolate, locale, options, resolved);
if (!collator) return isolate->ThrowIllegalOperation();
local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
RETURN_IF_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
local_object,
isolate->factory()->NewStringFromAscii(CStrVector("collator")),
isolate->factory()->NewStringFromAscii(CStrVector("valid")),
NONE));
Persistent<v8::Object> wrapper(reinterpret_cast<v8::Isolate*>(isolate),
v8::Utils::ToLocal(local_object));
// Make object handle weak so we can delete the collator once GC kicks in.
wrapper.MakeWeak<void>(NULL, &Collator::DeleteCollator);
wrapper.ClearAndLeak();
return *local_object;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
if (!collator) return isolate->ThrowIllegalOperation();
v8::String::Value string_value1(v8::Utils::ToLocal(string1));
v8::String::Value string_value2(v8::Utils::ToLocal(string2));
const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
UErrorCode status = U_ZERO_ERROR;
UCollationResult result = collator->compare(u_string1,
string_value1.length(),
u_string2,
string_value2.length(),
status);
if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
return *isolate->factory()->NewNumberFromInt(result);
}
#endif // V8_I18N_SUPPORT

9
deps/v8/src/runtime.h

@ -548,6 +548,15 @@ namespace internal {
F(CreateDateTimeFormat, 3, 1) \
F(InternalDateFormat, 2, 1) \
F(InternalDateParse, 2, 1) \
\
/* Number format and parse. */ \
F(CreateNumberFormat, 3, 1) \
F(InternalNumberFormat, 2, 1) \
F(InternalNumberParse, 2, 1) \
\
/* Collator. */ \
F(CreateCollator, 3, 1) \
F(InternalCompare, 3, 1) \
#else
#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)

3
deps/v8/src/sampler.cc

@ -619,8 +619,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
// Avoid collecting traces while doing GC.
if (state == GC) return;
const Address js_entry_sp =
Isolate::js_entry_sp(isolate->thread_local_top());
Address js_entry_sp = isolate->js_entry_sp();
if (js_entry_sp == 0) {
// Not executing JS now.
return;

9
deps/v8/src/splay-tree-inl.h

@ -90,6 +90,12 @@ bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
}
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Contains(const Key& key) {
return FindInternal(key);
}
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
if (FindInternal(key)) {
@ -293,9 +299,10 @@ void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
if (root_ == NULL) return;
// Pre-allocate some space for tiny trees.
List<Node*, Allocator> nodes_to_visit(10, allocator_);
if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
nodes_to_visit.Add(root_, allocator_);
int pos = 0;
while (pos < nodes_to_visit.length()) {
Node* node = nodes_to_visit[pos++];

14
deps/v8/src/splay-tree.h

@ -39,9 +39,9 @@ namespace internal {
//
// typedef Key: the key type
// typedef Value: the value type
// static const kNoKey: the dummy key used when no key is set
// static const kNoValue: the dummy value used to initialize nodes
// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
// static const Key kNoKey: the dummy key used when no key is set
// static Value kNoValue(): the dummy value used to initialize nodes
// static int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
//
// The tree is also parameterized by an allocation policy
// (Allocator). The policy is used for allocating lists in the C free
@ -74,6 +74,11 @@ class SplayTree {
UNREACHABLE();
}
AllocationPolicy allocator() { return allocator_; }
// Checks if there is a mapping for the key.
bool Contains(const Key& key);
// Inserts the given key in this tree with the given value. Returns
// true if a node was inserted, otherwise false. If found the locator
// is enabled and provides access to the mapping for the key.
@ -104,6 +109,9 @@ class SplayTree {
// Remove the node with the given key from the tree.
bool Remove(const Key& key);
// Remove all keys from the tree.
void Clear() { ResetRoot(); }
bool is_empty() { return root_ == NULL; }
// Perform the splay operation for the given key. Moves the node with

5
deps/v8/src/types.h

@ -303,6 +303,11 @@ struct Bounds {
explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
// Unrestricted bounds.
static Bounds Unbounded(Isolate* isl) {
return Bounds(Type::None(), Type::Any(), isl);
}
// Meet: both b1 and b2 are known to hold.
static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
return Bounds(

236
deps/v8/src/typing.cc

@ -40,7 +40,8 @@ AstTyper::AstTyper(CompilationInfo* info)
Handle<Code>(info->closure()->shared()->code()),
Handle<Context>(info->closure()->context()->native_context()),
info->isolate(),
info->zone()) {
info->zone()),
store_(info->zone()) {
InitializeAstVisitor();
}
@ -79,12 +80,16 @@ void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
RECURSE(Visit(stmt));
if (stmt->IsJump()) break;
}
}
void AstTyper::VisitBlock(Block* stmt) {
RECURSE(VisitStatements(stmt->statements()));
if (stmt->labels() != NULL) {
store_.Forget(); // Control may transfer here via 'break l'.
}
}
@ -98,30 +103,41 @@ void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
void AstTyper::VisitIfStatement(IfStatement* stmt) {
RECURSE(Visit(stmt->condition()));
RECURSE(Visit(stmt->then_statement()));
RECURSE(Visit(stmt->else_statement()));
// Collect type feedback.
if (!stmt->condition()->ToBooleanIsTrue() &&
!stmt->condition()->ToBooleanIsFalse()) {
stmt->condition()->RecordToBooleanTypeFeedback(oracle());
}
RECURSE(Visit(stmt->condition()));
Effects then_effects = EnterEffects();
RECURSE(Visit(stmt->then_statement()));
ExitEffects();
Effects else_effects = EnterEffects();
RECURSE(Visit(stmt->else_statement()));
ExitEffects();
then_effects.Alt(else_effects);
store_.Seq(then_effects);
}
void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
// TODO(rossberg): is it worth having a non-termination effect?
}
void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
// TODO(rossberg): is it worth having a non-termination effect?
}
void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
RECURSE(Visit(stmt->expression()));
// Collect type feedback.
// TODO(rossberg): we only need this for inlining into test contexts...
stmt->expression()->RecordToBooleanTypeFeedback(oracle());
RECURSE(Visit(stmt->expression()));
// TODO(rossberg): is it worth having a non-termination effect?
}
@ -133,14 +149,18 @@ void AstTyper::VisitWithStatement(WithStatement* stmt) {
void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
RECURSE(Visit(stmt->tag()));
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchStatement::SwitchType switch_type = stmt->switch_type();
Effects local_effects(zone());
bool complex_effects = false; // True for label effects or fall-through.
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
Effects clause_effects = EnterEffects();
if (!clause->is_default()) {
Expression* label = clause->label();
RECURSE(Visit(label));
SwitchStatement::SwitchType label_switch_type =
label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
@ -149,13 +169,32 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
switch_type = label_switch_type;
else if (switch_type != label_switch_type)
switch_type = SwitchStatement::GENERIC_SWITCH;
RECURSE(Visit(label));
if (!clause_effects.IsEmpty()) complex_effects = true;
}
ZoneList<Statement*>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
ExitEffects();
if (stmts->is_empty() || stmts->last()->IsJump()) {
local_effects.Alt(clause_effects);
} else {
complex_effects = true;
}
RECURSE(VisitStatements(clause->statements()));
}
if (complex_effects) {
store_.Forget(); // Reached this in unknown state.
} else {
store_.Seq(local_effects);
}
if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
switch_type = SwitchStatement::GENERIC_SWITCH;
stmt->set_switch_type(switch_type);
// Collect type feedback.
// TODO(rossberg): can we eliminate this special case and extra loop?
if (switch_type == SwitchStatement::SMI_SWITCH) {
for (int i = 0; i < clauses->length(); ++i) {
@ -168,22 +207,31 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
RECURSE(Visit(stmt->body()));
RECURSE(Visit(stmt->cond()));
// Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
}
// TODO(rossberg): refine the unconditional Forget (here and elsewhere) by
// computing the set of variables assigned in only some of the origins of the
// control transfer (such as the loop body here).
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->body()));
RECURSE(Visit(stmt->cond()));
store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
RECURSE(Visit(stmt->cond()));
RECURSE(Visit(stmt->body()));
// Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
}
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->cond()));
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via termination or 'break'.
}
@ -191,45 +239,65 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) {
RECURSE(Visit(stmt->init()));
}
store_.Forget(); // Control may transfer here via looping.
if (stmt->cond() != NULL) {
RECURSE(Visit(stmt->cond()));
// Collect type feedback.
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
RECURSE(Visit(stmt->cond()));
}
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via 'continue'.
if (stmt->next() != NULL) {
RECURSE(Visit(stmt->next()));
}
store_.Forget(); // Control may transfer here via termination or 'break'.
}
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
stmt->RecordTypeFeedback(oracle());
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->body()));
stmt->RecordTypeFeedback(oracle());
store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
RECURSE(Visit(stmt->iterable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
Effects try_effects = EnterEffects();
RECURSE(Visit(stmt->try_block()));
ExitEffects();
Effects catch_effects = EnterEffects();
store_.Forget(); // Control may transfer here via 'throw'.
RECURSE(Visit(stmt->catch_block()));
ExitEffects();
try_effects.Alt(catch_effects);
store_.Seq(try_effects);
// At this point, only variables that were reassigned in the catch block are
// still remembered.
}
void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
RECURSE(Visit(stmt->try_block()));
store_.Forget(); // Control may transfer here via 'throw'.
RECURSE(Visit(stmt->finally_block()));
}
void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
store_.Forget(); // May do whatever.
}
@ -242,11 +310,18 @@ void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
void AstTyper::VisitConditional(Conditional* expr) {
// Collect type feedback.
expr->condition()->RecordToBooleanTypeFeedback(oracle());
RECURSE(Visit(expr->condition()));
Effects then_effects = EnterEffects();
RECURSE(Visit(expr->then_expression()));
ExitEffects();
Effects else_effects = EnterEffects();
RECURSE(Visit(expr->else_expression()));
expr->condition()->RecordToBooleanTypeFeedback(oracle());
ExitEffects();
then_effects.Alt(else_effects);
store_.Seq(then_effects);
NarrowType(expr, Bounds::Either(
expr->then_expression()->bounds(),
@ -255,7 +330,10 @@ void AstTyper::VisitConditional(Conditional* expr) {
void AstTyper::VisitVariableProxy(VariableProxy* expr) {
// TODO(rossberg): typing of variables
Variable* var = expr->var();
if (var->IsStackAllocated()) {
NarrowType(expr, store_.LookupBounds(variable_index(var)));
}
}
@ -274,8 +352,8 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0; i < properties->length(); ++i) {
ObjectLiteral::Property* prop = properties->at(i);
RECURSE(Visit(prop->value()));
// Collect type feedback.
if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
!CompileTimeValue::IsCompileTimeValue(prop->value())) ||
prop->kind() == ObjectLiteral::Property::COMPUTED) {
@ -283,6 +361,8 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->RecordTypeFeedback(oracle());
}
}
RECURSE(Visit(prop->value()));
}
NarrowType(expr, Bounds(Type::Object(), isolate_));
@ -303,29 +383,33 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
void AstTyper::VisitAssignment(Assignment* expr) {
// TODO(rossberg): Can we clean this up?
if (expr->is_compound()) {
RECURSE(Visit(expr->binary_operation()));
// Collect type feedback.
Expression* target = expr->target();
Property* prop = target->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
if (!prop->key()->IsPropertyName()) { // i.e., keyed
expr->RecordTypeFeedback(oracle(), zone());
}
}
RECURSE(Visit(expr->binary_operation()));
NarrowType(expr, expr->binary_operation()->bounds());
} else {
// Collect type feedback.
if (expr->target()->IsProperty()) {
expr->RecordTypeFeedback(oracle(), zone());
}
RECURSE(Visit(expr->target()));
RECURSE(Visit(expr->value()));
if (expr->target()->AsProperty()) {
expr->RecordTypeFeedback(oracle(), zone());
NarrowType(expr, expr->value()->bounds());
}
NarrowType(expr, expr->value()->bounds());
VariableProxy* proxy = expr->target()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
}
// TODO(rossberg): handle target variables
}
@ -333,35 +417,31 @@ void AstTyper::VisitYield(Yield* expr) {
RECURSE(Visit(expr->generator_object()));
RECURSE(Visit(expr->expression()));
// We don't know anything about the type.
// We don't know anything about the result type.
}
void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
// TODO(rossberg): is it worth having a non-termination effect?
NarrowType(expr, Bounds(Type::None(), isolate_));
}
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
expr->RecordTypeFeedback(oracle(), zone());
RECURSE(Visit(expr->obj()));
RECURSE(Visit(expr->key()));
expr->RecordTypeFeedback(oracle(), zone());
// We don't know anything about the type.
// We don't know anything about the result type.
}
void AstTyper::VisitCall(Call* expr) {
RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE(Visit(arg));
}
// Collect type feedback.
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
if (prop != NULL) {
@ -371,11 +451,26 @@ void AstTyper::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
}
// We don't know anything about the type.
RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE(Visit(arg));
}
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
store_.Forget(); // Eval could do whatever to local variables.
}
// We don't know anything about the result type.
}
void AstTyper::VisitCallNew(CallNew* expr) {
// Collect type feedback.
expr->RecordTypeFeedback(oracle());
RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
@ -383,9 +478,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
expr->RecordTypeFeedback(oracle());
// We don't know anything about the type.
// We don't know anything about the result type.
}
@ -396,19 +489,19 @@ void AstTyper::VisitCallRuntime(CallRuntime* expr) {
RECURSE(Visit(arg));
}
// We don't know anything about the type.
// We don't know anything about the result type.
}
void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
RECURSE(Visit(expr->expression()));
// Collect type feedback.
if (expr->op() == Token::NOT) {
// TODO(rossberg): only do in test or value context.
expr->expression()->RecordToBooleanTypeFeedback(oracle());
}
RECURSE(Visit(expr->expression()));
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
@ -427,22 +520,25 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
RECURSE(Visit(expr->expression()));
// Collect type feedback.
expr->RecordTypeFeedback(oracle(), zone());
Property* prop = expr->expression()->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
}
RECURSE(Visit(expr->expression()));
NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
}
}
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
// Collect type feedback.
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
@ -458,15 +554,29 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
NarrowType(expr, expr->right()->bounds());
break;
case Token::OR:
case Token::AND:
case Token::AND: {
Effects left_effects = EnterEffects();
RECURSE(Visit(expr->left()));
ExitEffects();
Effects right_effects = EnterEffects();
RECURSE(Visit(expr->right()));
ExitEffects();
left_effects.Alt(right_effects);
store_.Seq(left_effects);
NarrowType(expr, Bounds::Either(
expr->left()->bounds(), expr->right()->bounds(), isolate_));
break;
}
case Token::BIT_OR:
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
Type* upper = Type::Union(
expr->left()->bounds().upper, expr->right()->bounds().upper);
if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
@ -476,12 +586,18 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::BIT_XOR:
case Token::SHL:
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
@ -501,6 +617,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MUL:
case Token::DIV:
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
break;
default:
@ -510,9 +628,6 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
// Collect type feedback.
Handle<Type> left_type, right_type, combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
@ -521,6 +636,9 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
NarrowLowerType(expr->right(), right_type);
expr->set_combined_type(combined_type);
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Boolean(), isolate_));
}

17
deps/v8/src/typing.h

@ -35,6 +35,7 @@
#include "compiler.h"
#include "type-info.h"
#include "types.h"
#include "effects.h"
#include "zone.h"
#include "scopes.h"
@ -57,8 +58,13 @@ class AstTyper: public AstVisitor {
private:
explicit AstTyper(CompilationInfo* info);
static const int kNoVar = INT_MIN;
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
CompilationInfo* info_;
TypeFeedbackOracle oracle_;
Store store_;
TypeFeedbackOracle* oracle() { return &oracle_; }
Zone* zone() const { return info_->zone(); }
@ -70,6 +76,17 @@ class AstTyper: public AstVisitor {
e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
}
Effects EnterEffects() {
store_ = store_.Push();
return store_.Top();
}
void ExitEffects() { store_ = store_.Pop(); }
int variable_index(Variable* var) {
return var->IsStackLocal() ? var->index() :
var->IsParameter() ? -var->index() : kNoVar;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitStatements(ZoneList<Statement*>* statements);

3
deps/v8/src/v8globals.h

@ -363,7 +363,8 @@ enum StateTag {
GC,
COMPILER,
OTHER,
EXTERNAL
EXTERNAL,
IDLE
};

4
deps/v8/src/version.cc

@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 20
#define BUILD_NUMBER 14
#define PATCH_LEVEL 1
#define BUILD_NUMBER 17
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0

285
deps/v8/src/x64/lithium-codegen-x64.cc

@ -278,6 +278,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
RecordAndUpdatePosition(instr->position());
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
@ -331,6 +333,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
int pos = instructions_->at(code->instruction_index())->position();
RecordAndUpdatePosition(pos);
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@ -497,37 +503,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// TODO(mstarzinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
if (value == NULL) {
int arguments_count = environment->values()->length() - translation_size;
translation->BeginArgumentsObject(arguments_count);
for (int i = 0; i < arguments_count; ++i) {
LOperand* value = environment->values()->at(translation_size + i);
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(translation_size + i),
environment->HasUint32ValueAt(translation_size + i));
}
continue;
}
AddToTranslation(translation,
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32) {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer) {
if (op == LEnvironment::materialization_marker()) {
int object_index = (*object_index_pointer)++;
if (environment->ObjectIsDuplicateAt(object_index)) {
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
translation->DuplicateObject(dupe_of);
return;
}
int object_length = environment->ObjectLengthAt(object_index);
if (environment->ObjectIsArgumentsAt(object_index)) {
translation->BeginArgumentsObject(object_length);
} else {
translation->BeginCapturedObject(object_length);
}
int dematerialized_index = *dematerialized_index_pointer;
int env_offset = environment->translation_size() + dematerialized_index;
*dematerialized_index_pointer += object_length;
for (int i = 0; i < object_length; ++i) {
LOperand* value = environment->values()->at(env_offset + i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(env_offset + i),
environment->HasUint32ValueAt(env_offset + i),
object_index_pointer,
dematerialized_index_pointer);
}
return;
}
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@ -667,7 +693,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
@ -859,6 +885,14 @@ void LCodeGen::RecordPosition(int position) {
}
void LCodeGen::RecordAndUpdatePosition(int position) {
if (position >= 0 && position != old_position_) {
masm()->positions_recorder()->RecordPosition(position);
old_position_ = position;
}
}
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@ -1866,6 +1900,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
__ j(cc, chunk_->GetAssemblyLabel(false_block));
}
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ int3();
}
@ -1996,6 +2037,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@ -2133,6 +2180,28 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (instr->hydrogen()->representation().IsTagged()) {
Register input_reg = ToRegister(instr->object());
__ Cmp(input_reg, factory()->the_hole_value());
EmitBranch(instr, equal);
return;
}
XMMRegister input_reg = ToDoubleRegister(instr->object());
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
__ addq(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
EmitBranch(instr, equal);
}
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@ -2724,111 +2793,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
__ movq(result, FieldOperand(object, offset + type->instance_size()));
} else {
// Non-negative property indices are in the properties array.
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsConstant()) {
Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
__ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
Heap* heap = type->GetHeap();
while (*current != heap->null_value()) {
__ LoadHeapObject(result, current);
__ Cmp(FieldOperand(result, HeapObject::kMapOffset),
Handle<Map>(current->map()));
DeoptimizeIf(not_equal, env);
current =
Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
// prototype chain, which causes unbounded code generation.
static bool CompactEmit(SmallMapList* list,
Handle<String> name,
int i,
Isolate* isolate) {
Handle<Map> map = list->at(i);
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
return lookup.IsField() || lookup.IsConstant();
}
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(no_condition, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
Label done;
bool all_are_compact = true;
for (int i = 0; i < map_count; ++i) {
if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
all_are_compact = false;
break;
}
}
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMap(object, map, &check_passed);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next;
bool compact = all_are_compact ? true :
CompactEmit(instr->hydrogen()->types(), name, i, isolate());
__ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
if (need_generic) {
__ Move(rcx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ bind(&done);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
@ -3423,7 +3387,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
}
void LCodeGen::EmitInteger64MathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testq(input_reg, input_reg);
Label is_positive;
@ -3460,16 +3424,14 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
EmitInteger64MathAbs(instr);
EmitSmiMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
EmitIntegerMathAbs(instr);
__ Integer32ToSmi(input_reg, input_reg);
EmitSmiMathAbs(instr);
__ bind(deferred->exit());
}
}
@ -4583,36 +4545,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
bool convert_hole = false;
HValue* change_input = instr->hydrogen()->value();
if (change_input->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(change_input);
convert_hole = load->UsesMustHandleHole();
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ ucomisd(input_reg, input_reg);
__ j(parity_odd, &no_special_nan_handling);
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
__ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
Label canonicalize;
__ j(not_equal, &canonicalize);
__ addq(rsp, Immediate(kDoubleSize));
__ Move(reg, factory()->the_hole_value());
__ jmp(&done);
__ bind(&canonicalize);
__ addq(rsp, Immediate(kDoubleSize));
__ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
__ movq(input_reg, kScratchRegister);
}
__ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
@ -4621,8 +4553,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
__ bind(&done);
}
@ -4666,22 +4596,20 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
bool allow_undefined_as_nan,
bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
if (!allow_undefined_as_nan) {
if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@ -4689,10 +4617,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
__ j(equal, &convert, Label::kNear);
__ CompareRoot(input_reg, Heap::kTheHoleValueRootIndex);
}
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@ -4805,19 +4729,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
mode = NUMBER_CANDIDATE_IS_SMI;
} else if (value->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(value);
if (load->UsesMustHandleHole()) {
mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
}
}
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->allow_undefined_as_nan(),
instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@ -5421,6 +5338,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(no_condition, instr->environment(), type);
}

25
deps/v8/src/x64/lithium-codegen-x64.h

@ -67,7 +67,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -235,7 +236,6 @@ class LCodeGen BASE_EMBEDDED {
CallKind call_kind,
RDIState rdi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
int argc);
@ -246,10 +246,14 @@ class LCodeGen BASE_EMBEDDED {
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32);
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -266,7 +270,7 @@ class LCodeGen BASE_EMBEDDED {
uint32_t additional_index = 0);
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitInteger64MathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@ -279,11 +283,14 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
XMMRegister result,
@ -319,12 +326,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name,
LEnvironment* env);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
@ -381,6 +382,8 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)

96
deps/v8/src/x64/lithium-x64.cc

@ -601,8 +601,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
&argument_index_accumulator,
&objects_to_materialize));
return instr;
}
@ -812,7 +814,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
if (phi->merged_index() < last_environment->length()) {
if (phi->HasMergedIndex()) {
last_environment->SetValueAt(phi->merged_index(), phi);
}
}
@ -882,6 +884,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@ -897,11 +900,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
argument_index_accumulator,
objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@ -916,16 +921,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
needs_arguments_object_materialization = true;
op = NULL;
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@ -936,15 +941,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
if (needs_arguments_object_materialization) {
HArgumentsObject* arguments = hydrogen_env->entry() == NULL
? graph()->GetArgumentsObject()
: hydrogen_env->entry()->arguments_object();
ASSERT(arguments->IsLinked());
for (int i = 1; i < arguments->arguments_count(); ++i) {
HValue* value = arguments->arguments_values()->at(i);
ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
LOperand* op = UseAny(value);
for (int i = object_index; i < objects_to_materialize->length(); ++i) {
HValue* object_to_materialize = objects_to_materialize->at(i);
int previously_materialized_object = -1;
for (int prev = 0; prev < i; ++prev) {
if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
previously_materialized_object = prev;
break;
}
}
int length = object_to_materialize->OperandCount();
bool is_arguments = object_to_materialize->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
continue;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* value = object_to_materialize->OperandAt(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
objects_to_materialize->Add(value, zone());
op = LEnvironment::materialization_marker();
} else {
ASSERT(!value->IsPushArgument());
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@ -1589,9 +1612,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(
instr->right()->representation()));
ASSERT(instr->left()->representation().Equals(r));
ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@ -1621,6 +1643,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return new(zone()) LCmpHoleAndBranch(object);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
@ -2036,23 +2065,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), rax);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, rax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
@ -2366,6 +2378,12 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
// There are no real uses of a captured object.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());

63
deps/v8/src/x64/lithium-x64.h

@ -74,6 +74,7 @@ class LCodeGen;
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@ -126,7 +127,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@ -207,7 +207,9 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false) { }
bit_field_(IsCallBits::encode(false)) {
set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() { }
@ -247,20 +249,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
// The 31 bits PositionBits is used to store the int position value. And the
// position value may be RelocInfo::kNoPosition (-1). The accessor always
// +1/-1 so that the encoded value of position in bit_field_ is always >= 0
// and can fit into the 31 bits PositionBits.
void set_position(int pos) {
bit_field_ = PositionBits::update(bit_field_, pos + 1);
}
int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
void MarkAsCall() { is_call_ = true; }
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@ -284,10 +296,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
bool is_call_;
int bit_field_;
};
@ -823,8 +838,20 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
};
class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
};
@ -1451,19 +1478,6 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedFieldPolymorphic(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
LOperand* object() { return inputs_[0]; }
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@ -2627,7 +2641,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);

13
deps/v8/src/x64/stub-cache-x64.cc

@ -2246,26 +2246,25 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(rax, &not_smi);
__ SmiToInteger32(rax, rax);
// Branchless abs implementation, refer to below:
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ movl(rbx, rax);
__ sarl(rbx, Immediate(kBitsPerInt - 1));
__ movq(rbx, rax);
__ sar(rbx, Immediate(kBitsPerPointer - 1));
// Do bitwise not or do nothing depending on ebx.
__ xorl(rax, rbx);
__ xor_(rax, rbx);
// Add 1 or do nothing depending on ebx.
__ subl(rax, rbx);
__ subq(rax, rbx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
// Smi case done.
__ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its value.

6
deps/v8/src/zone-inl.h

@ -109,6 +109,12 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
template <typename T>
void* ZoneSplayTree<T>::operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_

6
deps/v8/src/zone.h

@ -177,6 +177,7 @@ struct ZoneAllocationPolicy {
explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
INLINE(void* New(size_t size));
INLINE(static void Delete(void *pointer)) { }
Zone* zone() { return zone_; }
private:
Zone* zone_;
@ -201,7 +202,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
ZoneList(const ZoneList<T>& other, Zone* zone)
: List<T, ZoneAllocationPolicy>(other.length(),
ZoneAllocationPolicy(zone)) {
AddAll(other, ZoneAllocationPolicy(zone));
AddAll(other, zone);
}
// We add some convenience wrappers so that we can pass in a Zone
@ -209,8 +210,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
INLINE(void Add(const T& element, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
}
INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
Zone* zone)) {
INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone)) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {

53
deps/v8/test/cctest/cctest.h

@ -300,57 +300,4 @@ static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
}
// Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry
class RandomNumberGenerator {
public:
RandomNumberGenerator() {
init();
}
void init(uint32_t seed = 0x5688c73e) {
static const uint32_t phi = 0x9e3779b9;
c = 362436;
i = kQSize-1;
Q[0] = seed;
Q[1] = seed + phi;
Q[2] = seed + phi + phi;
for (unsigned j = 3; j < kQSize; j++) {
Q[j] = Q[j - 3] ^ Q[j - 2] ^ phi ^ j;
}
}
uint32_t next() {
uint64_t a = 18782;
uint32_t r = 0xfffffffe;
i = (i + 1) & (kQSize-1);
uint64_t t = a * Q[i] + c;
c = (t >> 32);
uint32_t x = static_cast<uint32_t>(t + c);
if (x < c) {
x++;
c++;
}
return (Q[i] = r - x);
}
uint32_t next(int max) {
return next() % max;
}
bool next(double threshold) {
ASSERT(threshold >= 0.0 && threshold <= 1.0);
if (threshold == 1.0) return true;
if (threshold == 0.0) return false;
uint32_t value = next() % 100000;
return threshold > static_cast<double>(value)/100000.0;
}
private:
static const uint32_t kQSize = 4096;
uint32_t Q[kQSize];
uint32_t c;
uint32_t i;
};
#endif // ifndef CCTEST_H_

14
deps/v8/test/cctest/test-api.cc

@ -19915,16 +19915,26 @@ THREADED_TEST(Regress260106) {
}
THREADED_TEST(JSONParse) {
THREADED_TEST(JSONParseObject) {
LocalContext context;
HandleScope scope(context->GetIsolate());
Local<Object> obj = v8::JSON::Parse(v8_str("{\"x\":42}"));
Local<Value> obj = v8::JSON::Parse(v8_str("{\"x\":42}"));
Handle<Object> global = context->Global();
global->Set(v8_str("obj"), obj);
ExpectString("JSON.stringify(obj)", "{\"x\":42}");
}
THREADED_TEST(JSONParseNumber) {
LocalContext context;
HandleScope scope(context->GetIsolate());
Local<Value> obj = v8::JSON::Parse(v8_str("42"));
Handle<Object> global = context->Global();
global->Set(v8_str("obj"), obj);
ExpectString("JSON.stringify(obj)", "42");
}
#ifndef WIN32
class ThreadInterruptTest {
public:

55
deps/v8/test/cctest/test-cpu-profiler.cc

@ -1324,3 +1324,58 @@ TEST(JsNative1JsNative2JsSample) {
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
// [Top down]:
// 6 0 (root) #0 1
// 3 3 (program) #0 2
// 3 3 (idle) #0 3
TEST(IdleTime) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8::String::New("my_profile");
cpu_profiler->StartCpuProfiling(profile_name);
i::Isolate* isolate = i::Isolate::Current();
i::ProfilerEventsProcessor* processor = isolate->cpu_profiler()->processor();
processor->AddCurrentStack(isolate);
cpu_profiler->SetIdle(true);
for (int i = 0; i < 3; i++) {
processor->AddCurrentStack(isolate);
}
cpu_profiler->SetIdle(false);
processor->AddCurrentStack(isolate);
const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
CHECK_NE(NULL, profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(
const_cast<v8::CpuProfile*>(profile))->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
names[2] = v8::String::New(ProfileGenerator::kIdleEntryName);
CheckChildrenNames(root, names);
const v8::CpuProfileNode* programNode =
GetChild(root, ProfileGenerator::kProgramEntryName);
CHECK_EQ(0, programNode->GetChildrenCount());
CHECK_GE(programNode->GetSelfSamplesCount(), 3);
CHECK_GE(programNode->GetHitCount(), 3);
const v8::CpuProfileNode* idleNode =
GetChild(root, ProfileGenerator::kIdleEntryName);
CHECK_EQ(0, idleNode->GetChildrenCount());
CHECK_GE(idleNode->GetSelfSamplesCount(), 3);
CHECK_GE(idleNode->GetHitCount(), 3);
cpu_profiler->DeleteAllCpuProfiles();
}

22
deps/v8/test/cctest/test-deoptimization.cc

@ -77,23 +77,27 @@ class AlwaysOptimizeAllowNativesSyntaxNoInlining {
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
class AllowNativesSyntaxNoInlining {
class AllowNativesSyntaxNoInliningNoParallel {
public:
AllowNativesSyntaxNoInlining()
AllowNativesSyntaxNoInliningNoParallel()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
use_inlining_(i::FLAG_use_inlining) {
use_inlining_(i::FLAG_use_inlining),
parallel_recompilation_(i::FLAG_parallel_recompilation) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
i::FLAG_parallel_recompilation = false;
}
~AllowNativesSyntaxNoInlining() {
~AllowNativesSyntaxNoInliningNoParallel() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
i::FLAG_parallel_recompilation = parallel_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
bool parallel_recompilation_;
};
@ -343,7 +347,7 @@ TEST(DeoptimizeBinaryOperationADDString) {
const char* f_source = "function f(x, y) { return x + y; };";
{
AllowNativesSyntaxNoInlining options;
AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@ -401,7 +405,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
binary_op);
char* f_source = f_source_buffer.start();
AllowNativesSyntaxNoInlining options;
AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@ -493,7 +497,7 @@ TEST(DeoptimizeCompare) {
const char* f_source = "function f(x, y) { return x < y; };";
{
AllowNativesSyntaxNoInlining options;
AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@ -540,7 +544,7 @@ TEST(DeoptimizeLoadICStoreIC) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
AllowNativesSyntaxNoInlining options;
AllowNativesSyntaxNoInliningNoParallel options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
@ -620,7 +624,7 @@ TEST(DeoptimizeLoadICStoreICNested) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
AllowNativesSyntaxNoInlining options;
AllowNativesSyntaxNoInliningNoParallel options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;

155
deps/v8/test/cctest/test-global-handles.cc

@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <map>
#include <vector>
#include "global-handles.h"
#include "cctest.h"
@ -320,157 +317,6 @@ TEST(ImplicitReferences) {
}
static const int kBlockSize = 256;
TEST(BlockCollection) {
v8::V8::Initialize();
Isolate* isolate = Isolate::Current();
GlobalHandles* global_handles = isolate->global_handles();
CHECK_EQ(0, global_handles->block_count());
CHECK_EQ(0, global_handles->global_handles_count());
Object* object = isolate->heap()->undefined_value();
const int kNumberOfBlocks = 5;
typedef Handle<Object> Block[kBlockSize];
for (int round = 0; round < 3; round++) {
Block blocks[kNumberOfBlocks];
for (int i = 0; i < kNumberOfBlocks; i++) {
for (int j = 0; j < kBlockSize; j++) {
blocks[i][j] = global_handles->Create(object);
}
}
CHECK_EQ(kNumberOfBlocks, global_handles->block_count());
for (int i = 0; i < kNumberOfBlocks; i++) {
for (int j = 0; j < kBlockSize; j++) {
global_handles->Destroy(blocks[i][j].location());
}
}
isolate->heap()->CollectAllAvailableGarbage("BlockCollection");
CHECK_EQ(0, global_handles->global_handles_count());
CHECK_EQ(1, global_handles->block_count());
}
}
class RandomMutationData {
public:
explicit RandomMutationData(Isolate* isolate)
: isolate_(isolate), weak_offset_(0) {}
void Mutate(double strong_growth_tendency,
double weak_growth_tendency = 0.05) {
for (int i = 0; i < kBlockSize * 100; i++) {
if (rng_.next(strong_growth_tendency)) {
AddStrong();
} else if (strong_nodes_.size() != 0) {
size_t to_remove = rng_.next(static_cast<int>(strong_nodes_.size()));
RemoveStrong(to_remove);
}
if (rng_.next(weak_growth_tendency)) AddWeak();
if (rng_.next(0.05)) {
#ifdef DEBUG
isolate_->global_handles()->VerifyBlockInvariants();
#endif
}
if (rng_.next(0.0001)) {
isolate_->heap()->PerformScavenge();
} else if (rng_.next(0.00003)) {
isolate_->heap()->CollectAllAvailableGarbage();
}
CheckSizes();
}
}
void RemoveAll() {
while (strong_nodes_.size() != 0) {
RemoveStrong(strong_nodes_.size() - 1);
}
isolate_->heap()->PerformScavenge();
isolate_->heap()->CollectAllAvailableGarbage();
CheckSizes();
}
private:
typedef std::vector<Object**> NodeVector;
typedef std::map<int32_t, Object**> NodeMap;
void CheckSizes() {
int stored_sizes =
static_cast<int>(strong_nodes_.size() + weak_nodes_.size());
CHECK_EQ(isolate_->global_handles()->global_handles_count(), stored_sizes);
}
void AddStrong() {
Object* object = isolate_->heap()->undefined_value();
Object** location = isolate_->global_handles()->Create(object).location();
strong_nodes_.push_back(location);
}
void RemoveStrong(size_t offset) {
isolate_->global_handles()->Destroy(strong_nodes_.at(offset));
strong_nodes_.erase(strong_nodes_.begin() + offset);
}
void AddWeak() {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
v8::HandleScope scope(isolate);
v8::Local<v8::Object> object = v8::Object::New();
int32_t offset = ++weak_offset_;
object->Set(7, v8::Integer::New(offset, isolate));
v8::Persistent<v8::Object> persistent(isolate, object);
persistent.MakeWeak(isolate, this, WeakCallback);
persistent.MarkIndependent();
Object** location = v8::Utils::OpenPersistent(persistent).location();
bool inserted =
weak_nodes_.insert(std::make_pair(offset, location)).second;
CHECK(inserted);
}
static void WeakCallback(v8::Isolate* isolate,
v8::Persistent<v8::Object>* persistent,
RandomMutationData* data) {
v8::Local<v8::Object> object =
v8::Local<v8::Object>::New(isolate, *persistent);
int32_t offset =
v8::Local<v8::Integer>::Cast(object->Get(7))->Int32Value();
Object** location = v8::Utils::OpenPersistent(persistent).location();
NodeMap& weak_nodes = data->weak_nodes_;
NodeMap::iterator it = weak_nodes.find(offset);
CHECK(it != weak_nodes.end());
CHECK(it->second == location);
weak_nodes.erase(it);
persistent->Dispose();
}
Isolate* isolate_;
RandomNumberGenerator rng_;
NodeVector strong_nodes_;
NodeMap weak_nodes_;
int32_t weak_offset_;
};
TEST(RandomMutation) {
v8::V8::Initialize();
Isolate* isolate = Isolate::Current();
CHECK_EQ(0, isolate->global_handles()->block_count());
HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(
v8::Context::New(reinterpret_cast<v8::Isolate*>(isolate)));
RandomMutationData data(isolate);
// grow some
data.Mutate(0.65);
data.Mutate(0.55);
// balanced mutation
for (int i = 0; i < 3; i++) data.Mutate(0.50);
// shrink some
data.Mutate(0.45);
data.Mutate(0.35);
// clear everything
data.RemoveAll();
}
TEST(EternalHandles) {
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
@ -518,4 +364,3 @@ TEST(EternalHandles) {
CHECK_EQ(kArrayLength, eternals->NumberOfHandles());
}

1
deps/v8/test/cctest/test-heap.cc

@ -2826,6 +2826,7 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
FLAG_use_ic = false; // ICs retain objects.
FLAG_parallel_recompilation = false;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save