From 4df999f85f2cb7ac687d3281af012d9df9699657 Mon Sep 17 00:00:00 2001
From: Ryan Dahl
Furthermore, the benchmark runner was changed to run the benchmarks for at least a few times to stabilize the reported numbers on slower diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html index 36d2ad511b..05bfffee02 100644 --- a/deps/v8/benchmarks/run.html +++ b/deps/v8/benchmarks/run.html @@ -114,7 +114,7 @@ higher scores means better performance: Bigger is better!
ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -956,7 +956,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
slow));
__ mov(r0, Operand(key_literal->handle()));
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1022,7 +1022,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, mode);
+ __ Call(ic, mode);
}
@@ -1041,7 +1041,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ ldr(r0, CodeGenerator::GlobalObject());
__ mov(r2, Operand(var->name()));
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
Apply(context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1100,7 +1100,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
Apply(context, r0);
}
}
@@ -1189,7 +1189,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
break;
}
// Fall through.
@@ -1409,7 +1409,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
}
@@ -1417,7 +1417,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
}
@@ -1475,7 +1475,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
@@ -1486,7 +1486,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(r2);
__ pop(r0); // Restore value.
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
break;
}
}
@@ -1509,7 +1509,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(r2, Operand(var->name()));
__ ldr(r1, CodeGenerator::GlobalObject());
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1598,7 +1598,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1642,7 +1642,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1691,7 +1691,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ __ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Apply(context_, r0);
@@ -1715,7 +1715,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
- EmitCallIC(ic, mode);
+ __ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Apply(context_, r0);
@@ -1854,7 +1854,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ pop(r1); // We do not need to keep the receiver.
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@@ -2769,7 +2769,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ mov(r2, Operand(expr->name()));
Handle ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3065,7 +3065,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
@@ -3079,7 +3079,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
@@ -3102,7 +3102,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ Call(ic, RelocInfo::CODE_TARGET);
if (where == kStack) __ push(r0);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
@@ -3365,21 +3365,10 @@ void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
}
-Register FullCodeGenerator::result_register() {
- return r0;
-}
+Register FullCodeGenerator::result_register() { return r0; }
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle ic, RelocInfo::Mode mode) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- __ Call(ic, mode);
-}
+Register FullCodeGenerator::context_register() { return cp; }
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index d5a700cd9f..1a76db2ce3 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -967,14 +967,6 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell) {
- // TODO(): implement this.
- return false;
-}
-
-
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// Find the end of the inlined code for the store if there is an
// inlined version of the store.
@@ -1244,6 +1236,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- r1 : receiver
// -----------------------------------
Label miss;
+ Label index_out_of_range;
Register receiver = r1;
Register index = r0;
@@ -1258,7 +1251,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &miss, // When index out of range.
+ &index_out_of_range,
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1266,6 +1259,10 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
__ bind(&miss);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 070e352e47..0da5f64696 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -266,12 +266,7 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- // Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, Top::global());
- __ cmp(prototype, ip);
- __ b(ne, miss);
+ MacroAssembler* masm, int index, Register prototype) {
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
@@ -1439,8 +1434,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
+ r0);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
@@ -1511,8 +1505,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
+ r0);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
@@ -1633,16 +1626,6 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
}
-Object* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // TODO(872): implement this.
- return Heap::undefined_value();
-}
-
-
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1722,7 +1705,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -1742,7 +1725,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -1765,7 +1748,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss);
}
@@ -2229,11 +2212,11 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
__ mov(r0, r4);
- __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 1f12502684..6e6c2c639c 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1344,41 +1344,33 @@ bool Genesis::InstallNatives() {
}
-static Handle ResolveCustomCallGeneratorHolder(
- Handle global_context,
- const char* holder_expr) {
- Handle global(global_context->global());
- const char* period_pos = strchr(holder_expr, '.');
- if (period_pos == NULL) {
- return Handle::cast(
- GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
+static void InstallCustomCallGenerator(
+ Handle holder_function,
+ CallStubCompiler::CustomGeneratorOwner owner_flag,
+ const char* function_name,
+ int id) {
+ Handle owner;
+ if (owner_flag == CallStubCompiler::FUNCTION) {
+ owner = Handle::cast(holder_function);
+ } else {
+ ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
+ owner = Handle(
+ JSObject::cast(holder_function->instance_prototype()));
}
- ASSERT_EQ(".prototype", period_pos);
- Vector property(holder_expr,
- static_cast(period_pos - holder_expr));
- Handle function = Handle::cast(
- GetProperty(global, Factory::LookupSymbol(property)));
- return Handle(JSObject::cast(function->prototype()));
-}
-
-
-static void InstallCustomCallGenerator(Handle holder,
- const char* function_name,
- int id) {
Handle name = Factory::LookupAsciiSymbol(function_name);
- Handle function(JSFunction::cast(holder->GetProperty(*name)));
+ Handle function(JSFunction::cast(owner->GetProperty(*name)));
function->shared()->set_function_data(Smi::FromInt(id));
}
void Genesis::InstallCustomCallGenerators() {
HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
- { \
- Handle holder = ResolveCustomCallGeneratorHolder( \
- global_context(), #holder_expr); \
- const int id = CallStubCompiler::k##name##CallGenerator; \
- InstallCustomCallGenerator(holder, #fun_name, id); \
+#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \
+ { \
+ Handle holder(global_context()->holder_fun##_function()); \
+ const int id = CallStubCompiler::k##name##CallGenerator; \
+ InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \
+ #fun_name, id); \
}
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index f15a804ef8..90cdc773ea 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -956,9 +956,8 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
char* DoubleToExponentialCString(double value, int f) {
- const int kMaxDigitsAfterPoint = 20;
// f might be -1 to signal that f was undefined in JavaScript.
- ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
+ ASSERT(f >= -1 && f <= 20);
bool negative = false;
if (value < 0) {
@@ -970,60 +969,29 @@ char* DoubleToExponentialCString(double value, int f) {
int decimal_point;
int sign;
char* decimal_rep = NULL;
- bool used_gay_dtoa = false;
- // f corresponds to the digits after the point. There is always one digit
- // before the point. The number of requested_digits equals hence f + 1.
- // And we have to add one character for the null-terminator.
- const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
- // Make sure that the buffer is big enough, even if we fall back to the
- // shortest representation (which happens when f equals -1).
- ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
- char v8_dtoa_buffer[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
if (f == -1) {
- if (DoubleToAscii(value, DTOA_SHORTEST, 0,
- Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point)) {
- f = decimal_rep_length - 1;
- decimal_rep = v8_dtoa_buffer;
- } else {
- decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
- decimal_rep_length = StrLength(decimal_rep);
- f = decimal_rep_length - 1;
- used_gay_dtoa = true;
- }
+ decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+ f = StrLength(decimal_rep) - 1;
} else {
- if (DoubleToAscii(value, DTOA_PRECISION, f + 1,
- Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point)) {
- decimal_rep = v8_dtoa_buffer;
- } else {
- decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
- decimal_rep_length = StrLength(decimal_rep);
- used_gay_dtoa = true;
- }
+ decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
}
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1);
+ USE(decimal_rep_length);
int exponent = decimal_point - 1;
char* result =
CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
- if (used_gay_dtoa) {
- freedtoa(decimal_rep);
- }
+ freedtoa(decimal_rep);
return result;
}
char* DoubleToPrecisionCString(double value, int p) {
- const int kMinimalDigits = 1;
- const int kMaximalDigits = 21;
- ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
- USE(kMinimalDigits);
+ ASSERT(p >= 1 && p <= 21);
bool negative = false;
if (value < 0) {
@@ -1034,22 +1002,8 @@ char* DoubleToPrecisionCString(double value, int p) {
// Find a sufficiently precise decimal representation of n.
int decimal_point;
int sign;
- char* decimal_rep = NULL;
- bool used_gay_dtoa = false;
- // Add one for the terminating null character.
- const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
- char v8_dtoa_buffer[kV8DtoaBufferCapacity];
- int decimal_rep_length;
-
- if (DoubleToAscii(value, DTOA_PRECISION, p,
- Vector(v8_dtoa_buffer, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point)) {
- decimal_rep = v8_dtoa_buffer;
- } else {
- decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
- decimal_rep_length = StrLength(decimal_rep);
- used_gay_dtoa = true;
- }
+ char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@@ -1093,9 +1047,7 @@ char* DoubleToPrecisionCString(double value, int p) {
result = builder.Finalize();
}
- if (used_gay_dtoa) {
- freedtoa(decimal_rep);
- }
+ freedtoa(decimal_rep);
return result;
}
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 5df5893f8a..cb7fdd8ff6 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -82,11 +82,14 @@ TickSample* ProfilerEventsProcessor::TickSampleEvent() {
bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
Logger::LogEventsAndTags tag) {
+ // In browser mode, leave only callbacks and non-native JS entries.
+ // We filter out regular expressions as currently we can't tell
+ // whether they origin from native scripts, so let's not confise people by
+ // showing them weird regexes they didn't wrote.
return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG
&& tag != Logger::FUNCTION_TAG
&& tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
&& tag != Logger::SCRIPT_TAG);
}
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 34eb0f0ec5..0b02e2102a 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -45,7 +45,7 @@ Debug.DebugEvent = { Break: 1,
ScriptCollected: 6 };
// Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { Caught : 0,
+Debug.ExceptionBreak = { All : 0,
Uncaught: 1 };
// The different types of steps.
@@ -87,27 +87,7 @@ var debugger_flags = {
this.value = !!value;
%SetDisableBreak(!this.value);
}
- },
- breakOnCaughtException: {
- getValue: function() { return Debug.isBreakOnException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnException();
- } else {
- Debug.clearBreakOnException();
- }
- }
- },
- breakOnUncaughtException: {
- getValue: function() { return Debug.isBreakOnUncaughtException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnUncaughtException();
- } else {
- Debug.clearBreakOnUncaughtException();
- }
- }
- },
+ }
};
@@ -801,15 +781,11 @@ Debug.clearStepping = function() {
}
Debug.setBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
+ return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
};
Debug.clearBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
-};
-
-Debug.isBreakOnException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
+ return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
};
Debug.setBreakOnUncaughtException = function() {
@@ -820,10 +796,6 @@ Debug.clearBreakOnUncaughtException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
};
-Debug.isBreakOnUncaughtException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
-};
-
Debug.showBreakPoints = function(f, full) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var source = full ? this.scriptSource(f) : this.source(f);
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 24b1d31097..87780d350c 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1200,15 +1200,6 @@ void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
}
-bool Debug::IsBreakOnException(ExceptionBreakType type) {
- if (type == BreakUncaughtException) {
- return break_on_uncaught_exception_;
- } else {
- return break_on_exception_;
- }
-}
-
-
void Debug::PrepareStep(StepAction step_action, int step_count) {
HandleScope scope;
ASSERT(Debug::InDebugger());
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 0d63085f15..8b3b29e636 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -236,7 +236,6 @@ class Debug {
static void FloodWithOneShot(Handle shared);
static void FloodHandlerWithOneShot();
static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- static bool IsBreakOnException(ExceptionBreakType type);
static void PrepareStep(StepAction step_action, int step_count);
static void ClearStepping();
static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index f4141eb619..e3dcbf2d61 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -65,12 +65,11 @@ bool DoubleToAscii(double v, DtoaMode mode, int requested_digits,
switch (mode) {
case DTOA_SHORTEST:
- return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
+ return FastDtoa(v, buffer, length, point);
case DTOA_FIXED:
return FastFixedDtoa(v, requested_digits, buffer, length, point);
- case DTOA_PRECISION:
- return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
- buffer, length, point);
+ default:
+ break;
}
return false;
}
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index d2a00cc624..b4b7be053f 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -42,8 +42,8 @@ namespace internal {
//
// A different range might be chosen on a different platform, to optimize digit
// generation, but a smaller range requires more powers of ten to be cached.
-static const int kMinimalTargetExponent = -60;
-static const int kMaximalTargetExponent = -32;
+static const int minimal_target_exponent = -60;
+static const int maximal_target_exponent = -32;
// Adjusts the last digit of the generated number, and screens out generated
@@ -61,13 +61,13 @@ static const int kMaximalTargetExponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
-static bool RoundWeed(Vector buffer,
- int length,
- uint64_t distance_too_high_w,
- uint64_t unsafe_interval,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit) {
+bool RoundWeed(Vector buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
@@ -75,7 +75,7 @@ static bool RoundWeed(Vector buffer,
// Note: w_low < w < w_high
//
// The real w (* unit) must lie somewhere inside the interval
- // ]w_low; w_high[ (often written as "(w_low; w_high)")
+ // ]w_low; w_low[ (often written as "(w_low; w_low)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
@@ -122,10 +122,10 @@ static bool RoundWeed(Vector buffer,
// inside the safe interval then we simply do not know and bail out (returning
// false).
//
- // Similarly we have to take into account the imprecision of 'w' when finding
- // the closest representation of 'w'. If we have two potential
- // representations, and one is closer to both w_low and w_high, then we know
- // it is closer to the actual value v.
+ // Similarly we have to take into account the imprecision of 'w' when rounding
+ // the buffer. If we have two potential representations we need to make sure
+ // that the chosen one is closer to w_low and w_high since v can be anywhere
+ // between them.
//
// By generating the digits of too_high we got the largest (closest to
// too_high) buffer that is still in the unsafe interval. In the case where
@@ -139,9 +139,6 @@ static bool RoundWeed(Vector buffer,
// (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
- // We need to do the following tests in this order to avoid over- and
- // underflows.
- ASSERT(rest <= unsafe_interval);
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
@@ -169,62 +166,6 @@ static bool RoundWeed(Vector buffer,
}
-// Rounds the buffer upwards if the result is closer to v by possibly adding
-// 1 to the buffer. If the precision of the calculation is not sufficient to
-// round correctly, return false.
-// The rounding might shift the whole buffer in which case the kappa is
-// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
-//
-// If 2*rest > ten_kappa then the buffer needs to be round up.
-// rest can have an error of +/- 1 unit. This function accounts for the
-// imprecision and returns false, if the rounding direction cannot be
-// unambiguously determined.
-//
-// Precondition: rest < ten_kappa.
-static bool RoundWeedCounted(Vector buffer,
- int length,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit,
- int* kappa) {
- ASSERT(rest < ten_kappa);
- // The following tests are done in a specific order to avoid overflows. They
- // will work correctly with any uint64 values of rest < ten_kappa and unit.
- //
- // If the unit is too big, then we don't know which way to round. For example
- // a unit of 50 means that the real number lies within rest +/- 50. If
- // 10^kappa == 40 then there is no way to tell which way to round.
- if (unit >= ten_kappa) return false;
- // Even if unit is just half the size of 10^kappa we are already completely
- // lost. (And after the previous test we know that the expression will not
- // over/underflow.)
- if (ten_kappa - unit <= unit) return false;
- // If 2 * (rest + unit) <= 10^kappa we can safely round down.
- if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
- return true;
- }
- // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
- if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
- // Increment the last digit recursively until we find a non '9' digit.
- buffer[length - 1]++;
- for (int i = length - 1; i > 0; --i) {
- if (buffer[i] != '0' + 10) break;
- buffer[i] = '0';
- buffer[i - 1]++;
- }
- // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
- // exception of the first digit all digits are now '0'. Simply switch the
- // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
- // the power (the kappa) is increased.
- if (buffer[0] == '0' + 10) {
- buffer[0] = '1';
- (*kappa) += 1;
- }
- return true;
- }
- return false;
-}
-
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
@@ -237,7 +178,7 @@ static const uint32_t kTen9 = 1000000000;
// number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
-// Precondition: number < (1 << (number_bits + 1)).
+// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
@@ -340,18 +281,18 @@ static void BiggestPowerTen(uint32_t number,
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
+// exponent. Its exponent is bounded by minimal_target_exponent and
+// maximal_target_exponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
-// is, their error must be less than a unit of their last digits.
+// is, their error must be less that a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+// * minimal_target_exponent <= w.e() <= maximal_target_exponent
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
@@ -380,15 +321,15 @@ static void BiggestPowerTen(uint32_t number,
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
-static bool DigitGen(DiyFp low,
- DiyFp w,
- DiyFp high,
- Vector buffer,
- int* length,
- int* kappa) {
+bool DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ Vector buffer,
+ int* length,
+ int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@@ -418,23 +359,23 @@ static bool DigitGen(DiyFp low,
uint32_t integrals = static_cast(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
+ uint32_t divider;
+ int divider_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
+ ÷r, ÷r_exponent);
+ *kappa = divider_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // with the divider exponent + 1. And the divider is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
- int digit = integrals / divisor;
+ int digit = integrals / divider;
buffer[*length] = '0' + digit;
(*length)++;
- integrals %= divisor;
+ integrals %= divider;
(*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
+ // Note that kappa now equals the exponent of the divider and that the
// invariant thus holds again.
uint64_t rest =
(static_cast(integrals) << -one.e()) + fractionals;
@@ -445,24 +386,32 @@ static bool DigitGen(DiyFp low,
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
- static_cast(divisor) << -one.e(), unit);
+ static_cast(divider) << -one.e(), unit);
}
- divisor /= 10;
+ divider /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
+ // increase its (imaginary) exponent. At the same time we decrease the
+ // divider's (one's) exponent and shift its significand.
+ // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
+ // fractionals.f *= 10;
+ // fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
+ // one.f >>= 1; one.e++; // value remains unchanged.
+ // and we have again fractionals.e == one.e which allows us to divide
+ // fractionals.f() by one.f()
+ // We simply combine the *= 10 and the >>= 1.
while (true) {
- fractionals *= 10;
- unit *= 10;
- unsafe_interval.set_f(unsafe_interval.f() * 10);
+ fractionals *= 5;
+ unit *= 5;
+ unsafe_interval.set_f(unsafe_interval.f() * 5);
+ unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
+ one.set_f(one.f() >> 1);
+ one.set_e(one.e() + 1);
// Integer division by one.
int digit = static_cast(fractionals >> -one.e());
buffer[*length] = '0' + digit;
@@ -477,113 +426,6 @@ static bool DigitGen(DiyFp low,
}
-
-// Generates (at most) requested_digits of input number w.
-// w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by kMinimalTargetExponent and
-// kMaximalTargetExponent.
-// Hence -60 <= w.e() <= -32.
-//
-// Returns false if it fails, in which case the generated digits in the buffer
-// should not be used.
-// Preconditions:
-// * w is correct up to 1 ulp (unit in the last place). That
-// is, its error must be strictly less than a unit of its last digit.
-// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
-//
-// Postconditions: returns false if procedure fails.
-// otherwise:
-// * buffer is not null-terminated, but length contains the number of
-// digits.
-// * the representation in buffer is the most precise representation of
-// requested_digits digits.
-// * buffer contains at most requested_digits digits of w. If there are less
-// than requested_digits digits then some trailing '0's have been removed.
-// * kappa is such that
-// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
-//
-// Remark: This procedure takes into account the imprecision of its input
-// numbers. If the precision is not enough to guarantee all the postconditions
-// then false is returned. This usually happens rarely, but the failure-rate
-// increases with higher requested_digits.
-static bool DigitGenCounted(DiyFp w,
- int requested_digits,
- Vector buffer,
- int* length,
- int* kappa) {
- ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
- ASSERT(kMinimalTargetExponent >= -60);
- ASSERT(kMaximalTargetExponent <= -32);
- // w is assumed to have an error less than 1 unit. Whenever w is scaled we
- // also scale its error.
- uint64_t w_error = 1;
- // We cut the input number into two parts: the integral digits and the
- // fractional digits. We don't emit any decimal separator, but adapt kappa
- // instead. Example: instead of writing "1.2" we put "12" into the buffer and
- // increase kappa by 1.
- DiyFp one = DiyFp(static_cast(1) << -w.e(), w.e());
- // Division by one is a shift.
- uint32_t integrals = static_cast(w.f() >> -one.e());
- // Modulo by one is an and.
- uint64_t fractionals = w.f() & (one.f() - 1);
- uint32_t divisor;
- int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
- *kappa = divisor_exponent + 1;
- *length = 0;
-
- // Loop invariant: buffer = w / 10^kappa (integer division)
- // The invariant holds for the first iteration: kappa has been initialized
- // with the divisor exponent + 1. And the divisor is the biggest power of ten
- // that is smaller than 'integrals'.
- while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- integrals %= divisor;
- (*kappa)--;
- // Note that kappa now equals the exponent of the divisor and that the
- // invariant thus holds again.
- if (requested_digits == 0) break;
- divisor /= 10;
- }
-
- if (requested_digits == 0) {
- uint64_t rest =
- (static_cast(integrals) << -one.e()) + fractionals;
- return RoundWeedCounted(buffer, *length, rest,
- static_cast(divisor) << -one.e(), w_error,
- kappa);
- }
-
- // The integrals have been generated. We are at the point of the decimal
- // separator. In the following loop we simply multiply the remaining digits by
- // 10 and divide by one. We just need to pay attention to multiply associated
- // data (the 'unit'), too.
- // Note that the multiplication by 10 does not overflow, because w.e >= -60
- // and thus one.e >= -60.
- ASSERT(one.e() >= -60);
- ASSERT(fractionals < one.f());
- ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
- while (requested_digits > 0 && fractionals > w_error) {
- fractionals *= 10;
- w_error *= 10;
- // Integer division by one.
- int digit = static_cast(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
- requested_digits--;
- fractionals &= one.f() - 1; // Modulo by one.
- (*kappa)--;
- }
- if (requested_digits != 0) return false;
- return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
- kappa);
-}
-
-
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated).
@@ -595,10 +437,7 @@ static bool DigitGenCounted(DiyFp w,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
-static bool Grisu3(double v,
- Vector buffer,
- int* length,
- int* decimal_exponent) {
+bool grisu3(double v, Vector buffer, int* length, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
@@ -609,12 +448,12 @@ static bool Grisu3(double v,
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
- GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
- kMaximalTargetExponent, &mk, &ten_mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
+ GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
+ maximal_target_exponent, &mk, &ten_mk);
+ ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize &&
+ maximal_target_exponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize);
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
@@ -649,75 +488,17 @@ static bool Grisu3(double v,
}
-// The "counted" version of grisu3 (see above) only generates requested_digits
-// number of digits. This version does not generate the shortest representation,
-// and with enough requested digits 0.1 will at some point print as 0.9999999...
-// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
-// therefore the rounding strategy for halfway cases is irrelevant.
-static bool Grisu3Counted(double v,
- int requested_digits,
- Vector buffer,
- int* length,
- int* decimal_exponent) {
- DiyFp w = Double(v).AsNormalizedDiyFp();
- DiyFp ten_mk; // Cached power of ten: 10^-k
- int mk; // -k
- GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
- kMaximalTargetExponent, &mk, &ten_mk);
- ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
- // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
- // 64 bit significand and ten_mk is thus only precise up to 64 bits.
-
- // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
- // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
- // off by a small amount.
- // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
- // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
- // (f-1) * 2^e < w*10^k < (f+1) * 2^e
- DiyFp scaled_w = DiyFp::Times(w, ten_mk);
-
- // We now have (double) (scaled_w * 10^-mk).
- // DigitGen will generate the first requested_digits digits of scaled_w and
- // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
- // will not always be exactly the same since DigitGenCounted only produces a
- // limited number of digits.)
- int kappa;
- bool result = DigitGenCounted(scaled_w, requested_digits,
- buffer, length, &kappa);
- *decimal_exponent = -mk + kappa;
- return result;
-}
-
-
bool FastDtoa(double v,
- FastDtoaMode mode,
- int requested_digits,
Vector buffer,
int* length,
- int* decimal_point) {
+ int* point) {
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
- bool result = false;
- int decimal_exponent = 0;
- switch (mode) {
- case FAST_DTOA_SHORTEST:
- result = Grisu3(v, buffer, length, &decimal_exponent);
- break;
- case FAST_DTOA_PRECISION:
- result = Grisu3Counted(v, requested_digits,
- buffer, length, &decimal_exponent);
- break;
- default:
- UNREACHABLE();
- }
- if (result) {
- *decimal_point = *length + decimal_exponent;
- buffer[*length] = '\0';
- }
+ int decimal_exponent;
+ bool result = grisu3(v, buffer, length, &decimal_exponent);
+ *point = *length + decimal_exponent;
+ buffer[*length] = '\0';
return result;
}
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index 94c22ecd7c..4403a75029 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -31,52 +31,27 @@
namespace v8 {
namespace internal {
-enum FastDtoaMode {
- // Computes the shortest representation of the given input. The returned
- // result will be the most accurate number of this length. Longer
- // representations might be more accurate.
- FAST_DTOA_SHORTEST,
- // Computes a representation where the precision (number of digits) is
- // given as input. The precision is independent of the decimal point.
- FAST_DTOA_PRECISION
-};
-
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v.
-// The result should be interpreted as buffer * 10^(point - length).
-//
-// Precondition:
-// * v must be a strictly positive finite double.
-//
+// v must be a strictly positive finite double.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true and mode equals
-// - FAST_DTOA_SHORTEST, then
-// the parameter requested_digits is ignored.
-// The result satisfies
-// v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible. E.g.
-// if 0.099999999999 and 0.1 represent the same double then "1" is returned
-// with point = 0.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain
-// the one closest to v.
-// - FAST_DTOA_PRECISION, then
-// the buffer contains requested_digits digits.
-// the difference v - (buffer * 10^(point-length)) is closest to zero for
-// all possible representations of requested_digits digits.
-// If there are two values that are equally close, then FastDtoa returns
-// false.
-// For both modes the buffer must be large enough to hold the result.
+// If the function returns true then
+// v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible: no
+// 0.099999999999 instead of 0.1.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain the
+// one closest to v.
+// The variable 'sign' will be '0' if the given number is positive, and '1'
+// otherwise.
bool FastDtoa(double d,
- FastDtoaMode mode,
- int requested_digits,
Vector buffer,
int* length,
- int* decimal_point);
+ int* point);
} } // namespace v8::internal
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 3cdb0157e7..76a441b64d 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -143,8 +143,8 @@ void StackFrameIterator::Reset() {
state.pc_address =
reinterpret_cast(StandardFrame::ComputePCAddress(fp_));
type = StackFrame::ComputeType(&state);
+ if (SingletonFor(type) == NULL) return;
}
- if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
@@ -203,24 +203,13 @@ bool StackTraceFrameIterator::IsValidFrame() {
// -------------------------------------------------------------------------
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
- if (!validator_.IsValid(fp)) return false;
- Address sp = ExitFrame::ComputeStackPointer(fp);
- if (!validator_.IsValid(sp)) return false;
- StackFrame::State state;
- ExitFrame::FillState(fp, sp, &state);
- if (!validator_.IsValid(reinterpret_cast(state.pc_address))) {
- return false;
- }
- return *state.pc_address != NULL;
-}
-
-
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(),
- stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(low_bound, high_bound)),
+ maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
+ is_valid_top_(
+ IsWithinBounds(low_bound, high_bound,
+ Top::c_entry_fp(Top::GetCurrentThread())) &&
+ Top::handler(Top::GetCurrentThread()) != NULL),
is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
is_working_iterator_(is_valid_top_ || is_valid_fp_),
iteration_done_(!is_working_iterator_),
@@ -228,14 +217,6 @@ SafeStackFrameIterator::SafeStackFrameIterator(
}
-bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
- Address fp = Top::c_entry_fp(Top::GetCurrentThread());
- ExitFrameValidator validator(low_bound, high_bound);
- if (!validator.IsValidFP(fp)) return false;
- return Top::handler(Top::GetCurrentThread()) != NULL;
-}
-
-
void SafeStackFrameIterator::Advance() {
ASSERT(is_working_iterator_);
ASSERT(!done());
@@ -277,8 +258,9 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// sure that caller FP address is valid.
Address caller_fp = Memory::Address_at(
frame->fp() + EntryFrameConstants::kCallerFPOffset);
- ExitFrameValidator validator(stack_validator_);
- if (!validator.IsValidFP(caller_fp)) return false;
+ if (!IsValidStackAddress(caller_fp)) {
+ return false;
+ }
} else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check
@@ -433,22 +415,6 @@ Address ExitFrame::GetCallerStackPointer() const {
}
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- Address sp = ComputeStackPointer(fp);
- FillState(fp, sp, state);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
-}
-
-
-void ExitFrame::FillState(Address fp, Address sp, State* state) {
- state->sp = sp;
- state->fp = fp;
- state->pc_address = reinterpret_cast(sp - 1 * kPointerSize);
-}
-
-
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 2d4f338ae0..20111904f5 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -67,7 +67,7 @@ class PcToCodeCache : AllStatic {
static PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
- static const int kPcToCodeCacheSize = 1024;
+ static const int kPcToCodeCacheSize = 256;
static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
};
@@ -141,13 +141,6 @@ class StackFrame BASE_EMBEDDED {
NO_ID = 0
};
- struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
- Address sp;
- Address fp;
- Address* pc_address;
- };
-
// Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) {
this->state_ = original.state_;
@@ -208,6 +201,12 @@ class StackFrame BASE_EMBEDDED {
int index) const { }
protected:
+ struct State {
+ Address sp;
+ Address fp;
+ Address* pc_address;
+ };
+
explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
virtual ~StackFrame() { }
@@ -319,8 +318,6 @@ class ExitFrame: public StackFrame {
// pointer. Used when constructing the first stack frame seen by an
// iterator and the frames following entry frames.
static Type GetStateForFramePointer(Address fp, State* state);
- static Address ComputeStackPointer(Address fp);
- static void FillState(Address fp, Address sp, State* state);
protected:
explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@@ -446,7 +443,6 @@ class JavaScriptFrame: public StandardFrame {
inline Object* function_slot_object() const;
friend class StackFrameIterator;
- friend class StackTracer;
};
@@ -658,36 +654,12 @@ class SafeStackFrameIterator BASE_EMBEDDED {
}
private:
- class StackAddressValidator {
- public:
- StackAddressValidator(Address low_bound, Address high_bound)
- : low_bound_(low_bound), high_bound_(high_bound) { }
- bool IsValid(Address addr) const {
- return IsWithinBounds(low_bound_, high_bound_, addr);
- }
- private:
- Address low_bound_;
- Address high_bound_;
- };
-
- class ExitFrameValidator {
- public:
- explicit ExitFrameValidator(const StackAddressValidator& validator)
- : validator_(validator) { }
- ExitFrameValidator(Address low_bound, Address high_bound)
- : validator_(low_bound, high_bound) { }
- bool IsValidFP(Address fp);
- private:
- StackAddressValidator validator_;
- };
-
bool IsValidStackAddress(Address addr) const {
- return stack_validator_.IsValid(addr);
+ return IsWithinBounds(low_bound_, high_bound_, addr);
}
bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Address low_bound, Address high_bound);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
@@ -702,7 +674,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
ActiveCountMaintainer maintainer_;
static int active_count_;
- StackAddressValidator stack_validator_;
+ Address low_bound_;
+ Address high_bound_;
const bool is_valid_top_;
const bool is_valid_fp_;
const bool is_working_iterator_;
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 2d60d5b032..9db233c229 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -509,9 +509,6 @@ class FullCodeGenerator: public AstVisitor {
static Register result_register();
static Register context_register();
- // Helper for calling an IC stub.
- void EmitCallIC(Handle ic, RelocInfo::Mode mode);
-
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 905d06551b..650800fa4b 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -2650,20 +2650,6 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
-static bool HasDuplicates(DescriptorArray* descriptors) {
- int count = descriptors->number_of_descriptors();
- if (count > 1) {
- String* prev_key = descriptors->GetKey(0);
- for (int i = 1; i != count; i++) {
- String* current_key = descriptors->GetKey(i);
- if (prev_key == current_key) return true;
- prev_key = current_key;
- }
- }
- return false;
-}
-
-
Object* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
@@ -2697,34 +2683,23 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
- // Inline constructor can only handle inobject properties.
- fun->shared()->ForbidInlineConstructor();
- } else {
- Object* descriptors_obj = DescriptorArray::Allocate(count);
- if (descriptors_obj->IsFailure()) return descriptors_obj;
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
- for (int i = 0; i < count; i++) {
- String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE);
- field.SetEnumerationIndex(i);
- descriptors->Set(i, &field);
- }
- descriptors->SetNextEnumerationIndex(count);
- descriptors->SortUnchecked();
-
- // The descriptors may contain duplicates because the compiler does not
- // guarantee the uniqueness of property names (it would have required
- // quadratic time). Once the descriptors are sorted we can check for
- // duplicates in linear time.
- if (HasDuplicates(descriptors)) {
- fun->shared()->ForbidInlineConstructor();
- } else {
- map->set_instance_descriptors(descriptors);
- map->set_pre_allocated_property_fields(count);
- map->set_unused_property_fields(in_object_properties - count);
- }
+ count = in_object_properties;
+ }
+ Object* descriptors_obj = DescriptorArray::Allocate(count);
+ if (descriptors_obj->IsFailure()) return descriptors_obj;
+ DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+ for (int i = 0; i < count; i++) {
+ String* name = fun->shared()->GetThisPropertyAssignmentName(i);
+ ASSERT(name->IsSymbol());
+ FieldDescriptor field(name, i, NONE);
+ field.SetEnumerationIndex(i);
+ descriptors->Set(i, &field);
}
+ descriptors->SetNextEnumerationIndex(count);
+ descriptors->Sort();
+ map->set_instance_descriptors(descriptors);
+ map->set_pre_allocated_property_fields(count);
+ map->set_unused_property_fields(in_object_properties - count);
}
return map;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index e0cb8a1352..eef307d7ed 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2179,16 +2179,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2211,29 +2201,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
-void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0xC2);
- emit_sse_operand(dst, src);
- EMIT(1); // LT == 1
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x0F);
- EMIT(0x28);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2390,19 +2358,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::psllq(XMMRegister reg, int8_t imm8) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(esi, reg); // esi == 6
- EMIT(imm8);
-}
-
-
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 539addd086..928f172894 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -788,15 +788,9 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void andpd(XMMRegister dst, XMMRegister src);
-
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
- void cmpltsd(XMMRegister dst, XMMRegister src);
-
- void movaps(XMMRegister dst, XMMRegister src);
-
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -812,8 +806,6 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
- void psllq(XMMRegister reg, int8_t imm8);
-
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index bde2f18449..86f3877c7e 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -9144,15 +9144,9 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
public:
DeferredReferenceGetNamedValue(Register dst,
Register receiver,
- Handle name,
- bool is_contextual)
- : dst_(dst),
- receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
+ Handle name)
+ : dst_(dst), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
@@ -9164,7 +9158,6 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
Register dst_;
Register receiver_;
Handle name_;
- bool is_contextual_;
};
@@ -9174,15 +9167,9 @@ void DeferredReferenceGetNamedValue::Generate() {
}
__ Set(ecx, Immediate(name_));
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ call(ic, mode);
- // The call must be followed by:
- // - a test eax instruction to indicate that the inobject property
- // case was inlined.
- // - a mov ecx instruction to indicate that the contextual property
- // load was inlined.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a test eax instruction to indicate
+ // that the inobject property case was inlined.
//
// Store the delta to the map check instruction here in the test
// instruction. Use masm_-> instead of the __ macro since the
@@ -9190,13 +9177,8 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
- if (is_contextual_) {
- masm_->mov(ecx, -delta_to_patch_site);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
- } else {
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
- }
+ masm_->test(eax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -9367,17 +9349,12 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
-
- bool contextual_load_in_builtin =
- is_contextual &&
- (Bootstrapper::IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
Result result;
- // Do not inline in the global code or when not in loop.
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time in
+ // the code generator. Unoptimized code is toplevel code or code that is
+ // not in a loop.
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
frame()->Push(name);
@@ -9390,26 +9367,19 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) {
// instruction here.
__ nop();
} else {
- // Inline the property load.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
+ // Inline the inobject property case.
+ Comment cmnt(masm(), "[ Inlined named property load");
Result receiver = frame()->Pop();
receiver.ToRegister();
result = allocator()->Allocate();
ASSERT(result.is_valid());
DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(),
- receiver.reg(),
- name,
- is_contextual);
+ new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
@@ -9421,33 +9391,17 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) {
// which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal);
- // The delta from the patch label to the actual load must be
- // statically known.
+ // The delta from the patch label to the load offset must be statically
+ // known.
ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
+ // The initial (invalid) offset has to be large enough to force a 32-bit
+ // instruction encoding to allow patching with an arbitrary offset. Use
+ // kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- if (is_contextual) {
- // Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), Factory::null_value());
- if (FLAG_debug_code) {
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::global_property_cell_map());
- __ Assert(equal, "Uninitialized inlined contextual load");
- }
- __ mov(result.reg(),
- FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
- __ cmp(result.reg(), Factory::the_hole_value());
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
- } else {
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
- }
-
+ __ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
}
ASSERT(frame()->height() == original_height - 1);
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 207648bd80..64305ef69e 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -685,8 +685,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
+ case 2: mnem = "fstp"; break;
case 3: mnem = "fstp_d"; break;
default: UnimplementedInstruction();
}
@@ -958,14 +957,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1165,23 +1156,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- int8_t imm8 = static_cast(data[1]);
- AppendToBuffer("psllq %s,%d",
- NameOfXMMRegister(rm),
- static_cast(imm8));
- data += 2;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
} else {
UnimplementedInstruction();
}
@@ -1300,23 +1274,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
NameOfXMMRegister(rm));
data++;
}
- } else if (b2 == 0xC2) {
- // Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data += 2;
} else {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@@ -1410,7 +1367,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
" %s",
tmp_buffer_.start());
return instr_len;
-} // NOLINT (function is too long)
+}
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index dd44f0ee5f..9baf76336b 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -35,8 +35,16 @@ namespace v8 {
namespace internal {
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute the stack pointer.
+ Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+ // Fill in the state.
+ state->fp = fp;
+ state->sp = sp;
+ state->pc_address = reinterpret_cast(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
+ return EXIT;
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 8144f41dd9..1e65c4b40a 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -631,7 +631,10 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ pop(edx);
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Absence of a test eax instruction following the call
+ // indicates that none of the load was inlined.
+ __ nop();
}
}
}
@@ -988,7 +991,8 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
+ __ call(ic, mode);
+ __ nop(); // Signal no inlined code.
}
@@ -1065,7 +1069,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
slow));
__ mov(eax, Immediate(key_literal->handle()));
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1089,7 +1093,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, var->name());
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code
+ // Remember that the assembler may choose to do peephole optimization
+ // (eg, push/pop elimination).
+ __ nop();
Apply(context, eax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1152,8 +1161,10 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Do a keyed property load.
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test eax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
+ __ nop();
// Drop key and object left on the stack by IC.
Apply(context, eax);
}
@@ -1251,7 +1262,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
break;
}
// Fall through.
@@ -1464,14 +1476,16 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
}
@@ -1830,7 +1844,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
break;
}
case KEYED_PROPERTY: {
@@ -1841,7 +1856,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(edx);
__ pop(eax); // Restore value.
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
break;
}
}
@@ -1864,7 +1880,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(ecx, var->name());
__ mov(edx, CodeGenerator::GlobalObject());
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1948,7 +1965,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(edx);
}
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1986,7 +2004,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2033,7 +2054,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ __ call(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
Apply(context_, eax);
@@ -2056,7 +2077,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle ic = CodeGenerator::ComputeKeyedCallInitialize(
arg_count, in_loop);
- EmitCallIC(ic, mode);
+ __ call(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
Apply(context_, eax);
@@ -2180,7 +2201,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use keyed CallIC.
VisitForValue(prop->obj(), kStack);
if (prop->is_synthetic()) {
VisitForValue(prop->key(), kAccumulator);
@@ -2189,7 +2210,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ pop(edx); // We do not need to keep the receiver.
Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call as it is treated specially
+ // by the LoadIC code.
+ __ nop();
// Push result (function).
__ push(eax);
// Push Global receiver.
@@ -3117,7 +3142,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Set(ecx, Immediate(expr->name()));
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3422,7 +3447,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
if (expr->is_postfix()) {
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
@@ -3436,7 +3464,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(ecx);
__ pop(edx);
Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
if (expr->is_postfix()) {
// Result is on the stack
if (context_ != Expression::kEffect) {
@@ -3460,7 +3491,8 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
if (where == kStack) __ push(eax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
@@ -3712,36 +3744,10 @@ void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
}
-Register FullCodeGenerator::result_register() {
- return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return esi;
-}
+Register FullCodeGenerator::result_register() { return eax; }
-void FullCodeGenerator::EmitCallIC(Handle ic, RelocInfo::Mode mode) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- __ call(ic, mode);
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
-}
+Register FullCodeGenerator::context_register() { return esi; }
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 413c36e922..3d0bd796a0 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -692,6 +692,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
+ Label index_out_of_range;
Register receiver = edx;
Register index = eax;
@@ -706,7 +707,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &miss, // When index out of range.
+ &index_out_of_range,
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -714,6 +715,10 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ __ ret(0);
+
__ bind(&miss);
GenerateMiss(masm);
}
@@ -1661,38 +1666,6 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
-// One byte opcode for mov ecx,0xXXXXXXXX.
-static const byte kMovEcxByte = 0xB9;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell) {
- // The address of the instruction following the call.
- Address mov_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a cmp eax, nothing
- // was inlined.
- if (*mov_instruction_address != kMovEcxByte) return false;
-
- Address delta_address = mov_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = mov_instruction_address + delta + 3;
- *(reinterpret_cast