Browse Source

Upgrade V8 to 2.1.4

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
d96c52694a
  1. 7
      deps/v8/ChangeLog
  2. 8
      deps/v8/include/v8.h
  3. 1
      deps/v8/src/SConscript
  4. 1
      deps/v8/src/api.cc
  5. 18
      deps/v8/src/arm/codegen-arm.cc
  6. 13
      deps/v8/src/arm/codegen-arm.h
  7. 15
      deps/v8/src/arm/full-codegen-arm.cc
  8. 6
      deps/v8/src/arm/macro-assembler-arm.cc
  9. 117
      deps/v8/src/arm/stub-cache-arm.cc
  10. 2
      deps/v8/src/arm/virtual-frame-arm.h
  11. 81
      deps/v8/src/array.js
  12. 107
      deps/v8/src/ast.cc
  13. 111
      deps/v8/src/ast.h
  14. 70
      deps/v8/src/bootstrapper.cc
  15. 99
      deps/v8/src/builtins.cc
  16. 119
      deps/v8/src/cached_powers.h
  17. 22
      deps/v8/src/checks.h
  18. 48
      deps/v8/src/codegen.cc
  19. 30
      deps/v8/src/codegen.h
  20. 4
      deps/v8/src/compilation-cache.cc
  21. 44
      deps/v8/src/compiler.cc
  22. 8
      deps/v8/src/contexts.h
  23. 16
      deps/v8/src/conversions.cc
  24. 1040
      deps/v8/src/data-flow.cc
  25. 305
      deps/v8/src/data-flow.h
  26. 101
      deps/v8/src/date-delay.js
  27. 2
      deps/v8/src/debug.cc
  28. 136
      deps/v8/src/diy_fp.h
  29. 169
      deps/v8/src/double.h
  30. 1
      deps/v8/src/factory.cc
  31. 4
      deps/v8/src/factory.h
  32. 41
      deps/v8/src/globals.h
  33. 494
      deps/v8/src/grisu3.cc
  34. 55
      deps/v8/src/grisu3.h
  35. 28
      deps/v8/src/heap.cc
  36. 16
      deps/v8/src/ia32/assembler-ia32.cc
  37. 3
      deps/v8/src/ia32/assembler-ia32.h
  38. 79
      deps/v8/src/ia32/codegen-ia32.cc
  39. 49
      deps/v8/src/ia32/codegen-ia32.h
  40. 43
      deps/v8/src/ia32/disasm-ia32.cc
  41. 5
      deps/v8/src/ia32/full-codegen-ia32.cc
  42. 131
      deps/v8/src/ia32/ic-ia32.cc
  43. 109
      deps/v8/src/ia32/macro-assembler-ia32.cc
  44. 15
      deps/v8/src/ia32/macro-assembler-ia32.h
  45. 217
      deps/v8/src/ia32/stub-cache-ia32.cc
  46. 12
      deps/v8/src/ia32/virtual-frame-ia32.cc
  47. 3
      deps/v8/src/ia32/virtual-frame-ia32.h
  48. 13
      deps/v8/src/ic.cc
  49. 3
      deps/v8/src/log-utils.cc
  50. 2
      deps/v8/src/log-utils.h
  51. 5
      deps/v8/src/log.cc
  52. 6
      deps/v8/src/macros.py
  53. 11
      deps/v8/src/math.js
  54. 3
      deps/v8/src/messages.js
  55. 10
      deps/v8/src/mips/codegen-mips-inl.h
  56. 18
      deps/v8/src/mips/codegen-mips.cc
  57. 19
      deps/v8/src/mips/codegen-mips.h
  58. 8
      deps/v8/src/mips/simulator-mips.cc
  59. 6
      deps/v8/src/mips/virtual-frame-mips.h
  60. 2
      deps/v8/src/objects-debug.cc
  61. 19
      deps/v8/src/objects-inl.h
  62. 21
      deps/v8/src/objects.cc
  63. 28
      deps/v8/src/objects.h
  64. 65
      deps/v8/src/parser.cc
  65. 3
      deps/v8/src/parser.h
  66. 2461
      deps/v8/src/powers_ten.h
  67. 57
      deps/v8/src/regexp-delay.js
  68. 474
      deps/v8/src/runtime.cc
  69. 8
      deps/v8/src/runtime.h
  70. 1
      deps/v8/src/scopes.h
  71. 84
      deps/v8/src/string.js
  72. 8
      deps/v8/src/stub-cache.cc
  73. 25
      deps/v8/src/stub-cache.h
  74. 48
      deps/v8/src/top.cc
  75. 5
      deps/v8/src/top.h
  76. 40
      deps/v8/src/utils.h
  77. 1
      deps/v8/src/v8-counters.h
  78. 2
      deps/v8/src/version.cc
  79. 6
      deps/v8/src/virtual-frame-inl.h
  80. 8
      deps/v8/src/x64/codegen-x64.cc
  81. 13
      deps/v8/src/x64/codegen-x64.h
  82. 5
      deps/v8/src/x64/full-codegen-x64.cc
  83. 12
      deps/v8/src/x64/macro-assembler-x64.cc
  84. 131
      deps/v8/src/x64/stub-cache-x64.cc
  85. 2
      deps/v8/src/x64/virtual-frame-x64.h
  86. 4
      deps/v8/test/cctest/SConscript
  87. 100048
      deps/v8/test/cctest/gay_shortest.cc
  88. 44
      deps/v8/test/cctest/gay_shortest.h
  89. 67
      deps/v8/test/cctest/test-diy_fp.cc
  90. 204
      deps/v8/test/cctest/test-double.cc
  91. 116
      deps/v8/test/cctest/test-grisu3.cc
  92. 8
      deps/v8/test/cctest/test-log-stack-tracer.cc
  93. 6
      deps/v8/test/cctest/test-log.cc
  94. 1
      deps/v8/test/cctest/test-serialize.cc
  95. 48
      deps/v8/test/mjsunit/abs.js
  96. 61
      deps/v8/test/mjsunit/array-pop.js
  97. 68
      deps/v8/test/mjsunit/array-push.js
  98. 14
      deps/v8/test/mjsunit/fuzz-natives.js
  99. 42
      deps/v8/test/mjsunit/regexp-compile.js
  100. 35
      deps/v8/test/mjsunit/regress/regress-641.js

7
deps/v8/ChangeLog

@ -1,3 +1,10 @@
2010-03-10: Version 2.1.4
Fixed code cache lookup for keyed IC's (issue http://crbug.com/37853).
Performance improvements on all platforms.
2010-03-10: Version 2.1.3
Added API method for context-disposal notifications.

8
deps/v8/include/v8.h

@ -2421,6 +2421,14 @@ class V8EXPORT V8 {
*/
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
/**
* The minimum allowed size for a log lines buffer. If the size of
* the buffer given will not be enough to hold a line of the maximum
* length, an attempt to find a log line end in GetLogLines will
* fail, and an empty result will be returned.
*/
static const int kMinimumSizeForLogLinesBuffer = 2048;
/**
* Retrieve the V8 thread id of the calling thread.
*

1
deps/v8/src/SConscript

@ -63,6 +63,7 @@ SOURCES = {
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
grisu3.cc
handles.cc
hashmap.cc
heap-profiler.cc

1
deps/v8/src/api.cc

@ -3580,6 +3580,7 @@ int V8::GetActiveProfilerModules() {
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
#endif
return 0;

18
deps/v8/src/arm/codegen-arm.cc

@ -2709,18 +2709,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
// Load the function of this activation.
__ ldr(r2, frame_->Function());
__ ldr(r3, frame_->Function());
// Literal array.
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Literal index.
__ mov(r1, Operand(Smi::FromInt(node->literal_index())));
__ mov(r2, Operand(Smi::FromInt(node->literal_index())));
// Constant properties.
__ mov(r0, Operand(node->constant_properties()));
frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
__ mov(r1, Operand(node->constant_properties()));
// Should the object literal have fast elements?
__ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
frame_->EmitPush(r0); // save the result
for (int i = 0; i < node->properties()->length(); i++) {
@ -3597,7 +3599,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);

13
deps/v8/src/arm/codegen-arm.h

@ -197,6 +197,10 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1;
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@ -326,6 +330,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@ -360,7 +365,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
@ -396,14 +401,10 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast call to sine function.
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.

15
deps/v8/src/arm/full-codegen-arm.cc

@ -783,15 +783,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_properties()));
__ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit() | r0.bit());
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
// If result_saved is true the result is on top of the stack. If

6
deps/v8/src/arm/macro-assembler-arm.cc

@ -280,9 +280,9 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(bit_cast<int32_t>(kZapValue)));
mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(offset, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}

117
deps/v8/src/arm/stub-cache-arm.cc

@ -815,6 +815,104 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
}
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// TODO(639): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss;
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
// TODO(642): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss;
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@ -824,6 +922,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// -- r2 : name
// -- lr : return address
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
return generator(this, object, holder, function, name, check);
}
Label miss;
// Get the receiver from the stack
@ -916,18 +1021,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ b(ne, &miss);
break;
default:
UNREACHABLE();
}

2
deps/v8/src/arm/virtual-frame-arm.h

@ -364,6 +364,8 @@ class VirtualFrame : public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;

81
deps/v8/src/array.js

@ -1088,15 +1088,6 @@ function ArrayIsArray(obj) {
return IS_ARRAY(obj);
}
// -------------------------------------------------------------------
function UpdateFunctionLengths(lengths) {
for (var key in lengths) {
%FunctionSetLength(this[key], lengths[key]);
}
}
// -------------------------------------------------------------------
function SetupArray() {
@ -1109,46 +1100,46 @@ function SetupArray() {
"isArray", ArrayIsArray
));
var specialFunctions = %SpecialArrayFunctions({});
function getFunction(name, jsBuiltin, len) {
var f = jsBuiltin;
if (specialFunctions.hasOwnProperty(name)) {
f = specialFunctions[name];
}
if (!IS_UNDEFINED(len)) {
%FunctionSetLength(f, len);
}
return f;
}
// Setup non-enumerable functions of the Array.prototype object and
// set their names.
InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
"toString", ArrayToString,
"toLocaleString", ArrayToLocaleString,
"join", ArrayJoin,
"pop", ArrayPop,
"push", ArrayPush,
"concat", ArrayConcat,
"reverse", ArrayReverse,
"shift", ArrayShift,
"unshift", ArrayUnshift,
"slice", ArraySlice,
"splice", ArraySplice,
"sort", ArraySort,
"filter", ArrayFilter,
"forEach", ArrayForEach,
"some", ArraySome,
"every", ArrayEvery,
"map", ArrayMap,
"indexOf", ArrayIndexOf,
"lastIndexOf", ArrayLastIndexOf,
"reduce", ArrayReduce,
"reduceRight", ArrayReduceRight
));
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
UpdateFunctionLengths({
ArrayFilter: 1,
ArrayForEach: 1,
ArraySome: 1,
ArrayEvery: 1,
ArrayMap: 1,
ArrayIndexOf: 1,
ArrayLastIndexOf: 1,
ArrayPush: 1,
ArrayReduce: 1,
ArrayReduceRight: 1
});
InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
"concat", getFunction("concat", ArrayConcat),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),
"slice", getFunction("slice", ArraySlice, 2),
"splice", getFunction("splice", ArraySplice, 2),
"sort", getFunction("sort", ArraySort),
"filter", getFunction("filter", ArrayFilter, 1),
"forEach", getFunction("forEach", ArrayForEach, 1),
"some", getFunction("some", ArraySome, 1),
"every", getFunction("every", ArrayEvery, 1),
"map", getFunction("map", ArrayMap, 1),
"indexOf", getFunction("indexOf", ArrayIndexOf, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
"reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
));
%FinishArrayPrototypeSetup($Array.prototype);
}

107
deps/v8/src/ast.cc

@ -58,13 +58,27 @@ AST_NODE_LIST(DECL_ACCEPT)
// ----------------------------------------------------------------------------
// Implementation of other node functionality.
Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
return (expression()->AsAssignment() != NULL &&
!expression()->AsAssignment()->is_compound())
? expression()->AsAssignment()
: NULL;
}
CountOperation* ExpressionStatement::StatementAsCountOperation() {
return expression()->AsCountOperation();
}
VariableProxy::VariableProxy(Handle<String> name,
bool is_this,
bool inside_with)
: name_(name),
var_(NULL),
is_this_(is_this),
inside_with_(inside_with) {
inside_with_(inside_with),
is_trivial_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
@ -484,5 +498,96 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
}
// IsPrimitive implementation. IsPrimitive is true if the value of an
// expression is known at compile-time to be any JS type other than Object
// (e.g, it is Undefined, Null, Boolean, String, or Number).
// The following expression types are never primitive because they express
// Object values.
bool FunctionLiteral::IsPrimitive() { return false; }
bool FunctionBoilerplateLiteral::IsPrimitive() { return false; }
bool RegExpLiteral::IsPrimitive() { return false; }
bool ObjectLiteral::IsPrimitive() { return false; }
bool ArrayLiteral::IsPrimitive() { return false; }
bool CatchExtensionObject::IsPrimitive() { return false; }
bool CallNew::IsPrimitive() { return false; }
bool ThisFunction::IsPrimitive() { return false; }
// The following expression types are not always primitive because we do not
// have enough information to conclude that they are.
bool VariableProxy::IsPrimitive() { return false; }
bool Property::IsPrimitive() { return false; }
bool Call::IsPrimitive() { return false; }
bool CallRuntime::IsPrimitive() { return false; }
// The value of a conditional is the value of one of the alternatives. It's
// always primitive if both alternatives are always primitive.
bool Conditional::IsPrimitive() {
return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
}
// A literal is primitive when it is not a JSObject.
bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
// The value of an assignment is the value of its right-hand side.
bool Assignment::IsPrimitive() {
switch (op()) {
case Token::INIT_VAR:
case Token::INIT_CONST:
case Token::ASSIGN:
return value()->IsPrimitive();
default:
// {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
// Arithmetic operations are always primitive. They express Numbers
// with the exception of +, which expresses a Number or a String.
return true;
}
}
// Throw does not express a value, so it's trivially always primitive.
bool Throw::IsPrimitive() { return true; }
// Unary operations always express primitive values. delete and ! express
// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
bool UnaryOperation::IsPrimitive() { return true; }
// Count operations (pre- and post-fix increment and decrement) always
// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2,
// 11.4.4, ane 11.4.5.
bool CountOperation::IsPrimitive() { return true; }
// Binary operations depend on the operator.
bool BinaryOperation::IsPrimitive() {
switch (op()) {
case Token::COMMA:
// Value is the value of the right subexpression.
return right()->IsPrimitive();
case Token::OR:
case Token::AND:
// Value is the value one of the subexpressions.
return left()->IsPrimitive() && right()->IsPrimitive();
default:
// {|, ^, &, <<, >>, >>>, +, -, *, /, %}
// Arithmetic operations are always primitive. They express Numbers
// with the exception of +, which expresses a Number or a String.
return true;
}
}
// Compare operations always express Boolean values.
bool CompareOperation::IsPrimitive() { return true; }
} } // namespace v8::internal

111
deps/v8/src/ast.h

@ -137,6 +137,7 @@ class AstNode: public ZoneObject {
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
virtual CountOperation* AsCountOperation() { return NULL; }
virtual BinaryOperation* AsBinaryOperation() { return NULL; }
virtual Assignment* AsAssignment() { return NULL; }
virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
@ -161,6 +162,9 @@ class Statement: public AstNode {
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
virtual CountOperation* StatementAsCountOperation() { return NULL; }
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@ -200,6 +204,8 @@ class Expression: public AstNode {
virtual bool IsValidLeftHandSide() { return false; }
virtual Variable* AssignedVar() { return NULL; }
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
@ -214,6 +220,10 @@ class Expression: public AstNode {
// evaluate out of order.
virtual bool IsTrivial() { return false; }
// True if the expression always has one of the non-Object JS types
// (Undefined, Null, Boolean, String, or Number).
virtual bool IsPrimitive() = 0;
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
@ -274,6 +284,12 @@ class ValidLeftHandSideSentinel: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
static ValidLeftHandSideSentinel* instance() { return &instance_; }
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
private:
static ValidLeftHandSideSentinel instance_;
};
@ -321,6 +337,16 @@ class Block: public BreakableStatement {
virtual void Accept(AstVisitor* v);
virtual Assignment* StatementAsSimpleAssignment() {
if (statements_.length() != 1) return NULL;
return statements_[0]->StatementAsSimpleAssignment();
}
virtual CountOperation* StatementAsCountOperation() {
if (statements_.length() != 1) return NULL;
return statements_[0]->StatementAsCountOperation();
}
void AddStatement(Statement* statement) { statements_.Add(statement); }
ZoneList<Statement*>* statements() { return &statements_; }
@ -442,8 +468,8 @@ class ForStatement: public IterationStatement {
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true) {
}
may_have_function_literal_(true),
loop_variable_(NULL) {}
void Initialize(Statement* init,
Expression* cond,
@ -464,12 +490,17 @@ class ForStatement: public IterationStatement {
return may_have_function_literal_;
}
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
private:
Statement* init_;
Expression* cond_;
Statement* next_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
friend class AstOptimizer;
};
@ -507,6 +538,9 @@ class ExpressionStatement: public Statement {
// Type testing & conversion.
virtual ExpressionStatement* AsExpressionStatement() { return this; }
virtual Assignment* StatementAsSimpleAssignment();
virtual CountOperation* StatementAsCountOperation();
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() { return expression_; }
@ -774,6 +808,7 @@ class Literal: public Expression {
virtual bool IsLeaf() { return true; }
virtual bool IsTrivial() { return true; }
virtual bool IsPrimitive();
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@ -849,24 +884,31 @@ class ObjectLiteral: public MaterializedLiteral {
ZoneList<Property*>* properties,
int literal_index,
bool is_simple,
bool fast_elements,
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties) {}
properties_(properties),
fast_elements_(fast_elements) {}
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return properties()->is_empty(); }
virtual bool IsPrimitive();
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
ZoneList<Property*>* properties() const { return properties_; }
bool fast_elements() const { return fast_elements_; }
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
};
@ -884,6 +926,8 @@ class RegExpLiteral: public MaterializedLiteral {
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@ -910,6 +954,8 @@ class ArrayLiteral: public MaterializedLiteral {
virtual bool IsLeaf() { return values()->is_empty(); }
virtual bool IsPrimitive();
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@ -930,6 +976,8 @@ class CatchExtensionObject: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@ -964,7 +1012,9 @@ class VariableProxy: public Expression {
// Reading from a mutable variable is a side effect, but 'this' is
// immutable.
virtual bool IsTrivial() { return is_this(); }
virtual bool IsTrivial() { return is_trivial_; }
virtual bool IsPrimitive();
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
@ -979,6 +1029,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
bool is_trivial() { return is_trivial_; }
void set_is_trivial(bool b) { is_trivial_ = b; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@ -988,6 +1040,7 @@ class VariableProxy: public Expression {
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool inside_with_;
bool is_trivial_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@ -1004,6 +1057,11 @@ class VariableProxySentinel: public VariableProxy {
return &identifier_proxy_;
}
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
static VariableProxySentinel this_proxy_;
@ -1047,6 +1105,11 @@ class Slot: public Expression {
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors
@ -1079,6 +1142,8 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
virtual bool IsPrimitive();
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
int position() const { return pos_; }
@ -1109,6 +1174,8 @@ class Call: public Expression {
// Type testing and conversion.
virtual Call* AsCall() { return this; }
virtual bool IsPrimitive();
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@ -1131,6 +1198,8 @@ class CallNew: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@ -1155,6 +1224,8 @@ class CallRuntime: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1179,6 +1250,8 @@ class UnaryOperation: public Expression {
// Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; }
virtual bool IsPrimitive();
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@ -1200,6 +1273,8 @@ class BinaryOperation: public Expression {
// Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; }
virtual bool IsPrimitive();
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
bool ResultOverwriteAllowed() {
@ -1246,6 +1321,14 @@ class CountOperation: public Expression {
virtual void Accept(AstVisitor* v);
virtual CountOperation* AsCountOperation() { return this; }
virtual Variable* AssignedVar() {
return expression()->AsVariableProxy()->AsVariable();
}
virtual bool IsPrimitive();
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
Token::Value op() const { return op_; }
@ -1272,6 +1355,8 @@ class CompareOperation: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
@ -1302,6 +1387,8 @@ class Conditional: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@ -1324,6 +1411,14 @@ class Assignment: public Expression {
virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; }
virtual bool IsPrimitive();
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
virtual Variable* AssignedVar() {
return target()->AsVariableProxy()->AsVariable();
}
Token::Value binary_op() const;
Token::Value op() const { return op_; }
@ -1358,6 +1453,9 @@ class Throw: public Expression {
: exception_(exception), pos_(pos) {}
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@ -1407,6 +1505,8 @@ class FunctionLiteral: public Expression {
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
@ -1477,6 +1577,8 @@ class FunctionBoilerplateLiteral: public Expression {
virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
private:
Handle<JSFunction> boilerplate_;
};
@ -1486,6 +1588,7 @@ class ThisFunction: public Expression {
public:
virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
};

70
deps/v8/src/bootstrapper.cc

@ -55,7 +55,7 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Iterate(ObjectVisitor* v) {
v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
}
@ -245,12 +245,6 @@ class Genesis BASE_EMBEDDED {
bool make_prototype_enumerable = false);
void MakeFunctionInstancePrototypeWritable();
void AddSpecialFunction(Handle<JSObject> prototype,
const char* name,
Handle<Code> code);
void BuildSpecialFunctionTable();
static bool CompileBuiltin(int index);
static bool CompileNative(Vector<const char> name, Handle<String> source);
static bool CompileScriptCached(Vector<const char> name,
@ -777,8 +771,6 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
delegate->shared()->DontAdaptArguments();
}
global_context()->set_special_function_table(Heap::empty_fixed_array());
// Initialize the out of memory slot.
global_context()->set_out_of_memory(Heap::false_value());
@ -1457,65 +1449,6 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
}
void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
const char* name,
Handle<Code> code) {
Handle<String> key = Factory::LookupAsciiSymbol(name);
Handle<Object> value = Handle<Object>(prototype->GetProperty(*key));
if (value->IsJSFunction()) {
Handle<JSFunction> optimized = Factory::NewFunction(key,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
false);
optimized->shared()->DontAdaptArguments();
int len = global_context()->special_function_table()->length();
Handle<FixedArray> new_array = Factory::NewFixedArray(len + 3);
for (int index = 0; index < len; index++) {
new_array->set(index,
global_context()->special_function_table()->get(index));
}
new_array->set(len+0, *prototype);
new_array->set(len+1, *value);
new_array->set(len+2, *optimized);
global_context()->set_special_function_table(*new_array);
}
}
void Genesis::BuildSpecialFunctionTable() {
HandleScope scope;
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
// Add special versions for some Array.prototype functions.
Handle<JSFunction> function =
Handle<JSFunction>(
JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
Handle<JSObject> visible_prototype =
Handle<JSObject>(JSObject::cast(function->prototype()));
// Remember to put those specializations on the hidden prototype if present.
Handle<JSObject> special_prototype;
Handle<Object> superproto(visible_prototype->GetPrototype());
if (superproto->IsJSObject() &&
JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
special_prototype = Handle<JSObject>::cast(superproto);
} else {
special_prototype = visible_prototype;
}
AddSpecialFunction(special_prototype, "pop",
Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
AddSpecialFunction(special_prototype, "push",
Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
AddSpecialFunction(special_prototype, "shift",
Handle<Code>(Builtins::builtin(Builtins::ArrayShift)));
AddSpecialFunction(special_prototype, "unshift",
Handle<Code>(Builtins::builtin(Builtins::ArrayUnshift)));
AddSpecialFunction(special_prototype, "slice",
Handle<Code>(Builtins::builtin(Builtins::ArraySlice)));
AddSpecialFunction(special_prototype, "splice",
Handle<Code>(Builtins::builtin(Builtins::ArraySplice)));
}
Genesis::Genesis(Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) {
@ -1539,7 +1472,6 @@ Genesis::Genesis(Handle<Object> global_object,
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
BuildSpecialFunctionTable();
if (!ConfigureGlobalObjects(global_template)) return;

99
deps/v8/src/builtins.cc

@ -319,6 +319,24 @@ static bool ArrayPrototypeHasNoElements() {
}
static bool IsJSArrayWithFastElements(Object* receiver,
FixedArray** elements) {
if (!receiver->IsJSArray()) {
return false;
}
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = HeapObject::cast(array->elements());
if (elms->map() != Heap::fixed_array_map()) {
return false;
}
*elements = FixedArray::cast(elms);
return true;
}
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
@ -331,7 +349,7 @@ static Object* CallJsBuiltin(const char* name,
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = &args[i + 1];
argv[i] = args.at<Object>(i + 1).location();
}
bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
@ -346,8 +364,12 @@ static Object* CallJsBuiltin(const char* name,
BUILTIN(ArrayPush) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)) {
return CallJsBuiltin("ArrayPush", args);
}
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -359,7 +381,6 @@ BUILTIN(ArrayPush) {
ASSERT(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
FixedArray* elms = FixedArray::cast(array->elements());
if (new_length > elms->length()) {
// New backing storage is needed.
@ -390,14 +411,17 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)) {
return CallJsBuiltin("ArrayPop", args);
}
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
// Get top element
FixedArray* elms = FixedArray::cast(array->elements());
Object* top = elms->get(len - 1);
// Set the length.
@ -420,18 +444,18 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
if (!ArrayPrototypeHasNoElements()) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayShift", args);
}
JSArray* array = JSArray::cast(*args.receiver());
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
FixedArray* elms = FixedArray::cast(array->elements());
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
@ -451,26 +475,22 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
if (!ArrayPrototypeHasNoElements()) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayUnshift", args);
}
JSArray* array = JSArray::cast(*args.receiver());
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
// Note that we cannot quit early if to_add == 0 as
// values should be lifted from prototype into
// the array.
int new_length = len + to_add;
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
FixedArray* elms = FixedArray::cast(array->elements());
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@ -503,11 +523,13 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
if (!ArrayPrototypeHasNoElements()) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySlice", args);
}
JSArray* array = JSArray::cast(*args.receiver());
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@ -558,8 +580,6 @@ BUILTIN(ArraySlice) {
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
FixedArray* elms = FixedArray::cast(array->elements());
AssertNoAllocation no_gc;
CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
@ -573,11 +593,13 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
if (!ArrayPrototypeHasNoElements()) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySplice", args);
}
JSArray* array = JSArray::cast(*args.receiver());
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@ -618,8 +640,6 @@ BUILTIN(ArraySplice) {
}
int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
FixedArray* elms = FixedArray::cast(array->elements());
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
Object* result = AllocateEmptyJSArray();
@ -766,20 +786,19 @@ static Object* HandleApiCallHelper(
HandleScope scope;
Handle<JSFunction> function = args.called_function();
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
if (is_construct) {
Handle<FunctionTemplateInfo> desc =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(function->shared()->function_data()));
Handle<FunctionTemplateInfo> desc(fun_data);
bool pending_exception = false;
Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
&pending_exception);
ASSERT(Top::has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
fun_data = *desc;
}
FunctionTemplateInfo* fun_data =
FunctionTemplateInfo::cast(function->shared()->function_data());
Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
@ -850,8 +869,8 @@ BUILTIN(HandleApiCallConstruct) {
static void VerifyTypeCheck(Handle<JSObject> object,
Handle<JSFunction> function) {
FunctionTemplateInfo* info =
FunctionTemplateInfo::cast(function->shared()->function_data());
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* info = function->shared()->get_api_func_data();
if (info->signature()->IsUndefined()) return;
SignatureInfo* signature = SignatureInfo::cast(info->signature());
Object* receiver_type = signature->receiver();
@ -935,9 +954,9 @@ static Object* HandleApiCallAsFunctionOrConstructor(
// used to create the called object.
ASSERT(obj->map()->has_instance_call_handler());
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
Object* template_info = constructor->shared()->function_data();
ASSERT(constructor->shared()->IsApiFunction());
Object* handler =
FunctionTemplateInfo::cast(template_info)->instance_call_handler();
constructor->shared()->get_api_func_data()->instance_call_handler();
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();

119
deps/v8/src/cached_powers.h

@ -0,0 +1,119 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CACHED_POWERS_H_
#define V8_CACHED_POWERS_H_
#include "diy_fp.h"
namespace v8 {
namespace internal {
struct CachedPower {
uint64_t significand;
int16_t binary_exponent;
int16_t decimal_exponent;
};
// The following defines implement the interface between this file and the
// generated 'powers_ten.h'.
// GRISU_CACHE_NAME(1) contains all possible cached powers.
// GRISU_CACHE_NAME(i) contains GRISU_CACHE_NAME(1) where only every 'i'th
// element is kept. More formally GRISU_CACHE_NAME(i) contains the elements j*i
// with 0 <= j < k with k such that j*k < the size of GRISU_CACHE_NAME(1).
// The higher 'i' is the fewer elements we use.
// Given that there are less elements, the exponent-distance between two
// elements in the cache grows. The variable GRISU_CACHE_MAX_DISTANCE(i) stores
// the maximum distance between two elements.
#define GRISU_CACHE_STRUCT CachedPower
#define GRISU_CACHE_NAME(i) kCachedPowers##i
#define GRISU_CACHE_MAX_DISTANCE(i) kCachedPowersMaxDistance##i
#define GRISU_CACHE_OFFSET kCachedPowerOffset
#define GRISU_UINT64_C V8_2PART_UINT64_C
// The following include imports the precompiled cached powers.
#include "powers_ten.h" // NOLINT
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// We can't use a function since we reference variables depending on the 'i'.
// This way the compiler is able to see at compile time that only one
// cache-array variable is used and thus can remove all the others.
#define COMPUTE_FOR_CACHE(i) \
if (!found && (gamma - alpha + 1 >= GRISU_CACHE_MAX_DISTANCE(i))) { \
int kQ = DiyFp::kSignificandSize; \
double k = ceiling((alpha - e + kQ - 1) * kD_1_LOG2_10); \
int index = (GRISU_CACHE_OFFSET + static_cast<int>(k) - 1) / i + 1; \
cached_power = GRISU_CACHE_NAME(i)[index]; \
found = true; \
} \
static void GetCachedPower(int e, int alpha, int gamma, int* mk, DiyFp* c_mk) {
// The following if statement should be optimized by the compiler so that only
// one array is referenced and the others are not included in the object file.
bool found = false;
CachedPower cached_power;
COMPUTE_FOR_CACHE(20);
COMPUTE_FOR_CACHE(19);
COMPUTE_FOR_CACHE(18);
COMPUTE_FOR_CACHE(17);
COMPUTE_FOR_CACHE(16);
COMPUTE_FOR_CACHE(15);
COMPUTE_FOR_CACHE(14);
COMPUTE_FOR_CACHE(13);
COMPUTE_FOR_CACHE(12);
COMPUTE_FOR_CACHE(11);
COMPUTE_FOR_CACHE(10);
COMPUTE_FOR_CACHE(9);
COMPUTE_FOR_CACHE(8);
COMPUTE_FOR_CACHE(7);
COMPUTE_FOR_CACHE(6);
COMPUTE_FOR_CACHE(5);
COMPUTE_FOR_CACHE(4);
COMPUTE_FOR_CACHE(3);
COMPUTE_FOR_CACHE(2);
COMPUTE_FOR_CACHE(1);
if (!found) {
UNIMPLEMENTED();
// Silence compiler warnings.
cached_power.significand = 0;
cached_power.binary_exponent = 0;
cached_power.decimal_exponent = 0;
}
*c_mk = DiyFp(cached_power.significand, cached_power.binary_exponent);
*mk = cached_power.decimal_exponent;
ASSERT((alpha <= c_mk->e() + e) && (c_mk->e() + e <= gamma));
}
#undef GRISU_REDUCTION
#undef GRISU_CACHE_STRUCT
#undef GRISU_CACHE_NAME
#undef GRISU_CACHE_MAX_DISTANCE
#undef GRISU_CACHE_OFFSET
#undef GRISU_UINT64_C
} } // namespace v8::internal
#endif // V8_CACHED_POWERS_H_

22
deps/v8/src/checks.h

@ -80,6 +80,7 @@ static inline void CheckEqualsHelper(const char* file, int line,
}
}
// Helper function used by the CHECK_EQ function when given int64_t
// arguments. Should not be called directly.
static inline void CheckEqualsHelper(const char* file, int line,
@ -202,6 +203,27 @@ static inline void CheckEqualsHelper(const char* file,
}
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
const char* value_source,
double value) {
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
volatile double* exp = new double[1];
*exp = expected;
volatile double* val = new double[1];
*val = value;
if (*exp == *val) {
V8_Fatal(file, line,
"CHECK_NE(%s, %s) failed\n# Value: %f",
expected_source, value_source, *val);
}
delete[] exp;
delete[] val;
}
namespace v8 {
class Value;
template <class T> class Handle;

48
deps/v8/src/codegen.cc

@ -351,42 +351,18 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
}
// List of special runtime calls which are generated inline. For some of these
// functions the code will be generated inline, and for others a call to a code
// stub will be inlined.
#define INLINE_RUNTIME_ENTRY(Name, argc, ressize) \
{&CodeGenerator::Generate##Name, "_" #Name, argc}, \
// Special cases: These 'runtime calls' manipulate the current
// frame and are only used 1 or two places, so we generate them
// inline instead of generating calls to them. They are used
// for implementing Function.prototype.call() and
// Function.prototype.apply().
CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateIsSmi, "_IsSmi"},
{&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
{&CodeGenerator::GenerateIsArray, "_IsArray"},
{&CodeGenerator::GenerateIsRegExp, "_IsRegExp"},
{&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
{&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
{&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
{&CodeGenerator::GenerateClassOf, "_ClassOf"},
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateCharFromCode, "_CharFromCode"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
{&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
{&CodeGenerator::GenerateNumberToString, "_NumberToString"},
{&CodeGenerator::GenerateMathPow, "_Math_pow"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
{&CodeGenerator::GenerateMathCos, "_Math_cos"},
{&CodeGenerator::GenerateMathSqrt, "_Math_sqrt"},
INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
};
#undef INLINE_RUNTIME_ENTRY
CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
Handle<String> name) {
@ -431,6 +407,14 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
}
int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
CodeGenerator::InlineRuntimeLUT* f =
CodeGenerator::FindInlineRuntimeLUT(name);
if (f != NULL) return f->nargs;
return -1;
}
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(

30
deps/v8/src/codegen.h

@ -99,6 +99,36 @@ namespace v8 {
namespace internal {
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \
F(IsArray, 1, 1) \
F(IsRegExp, 1, 1) \
F(IsConstructCall, 0, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1) \
F(ClassOf, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
F(FastCharCodeAt, 2, 1) \
F(CharFromCode, 1, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomPositiveSmi, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \
F(NumberToString, 1, 1) \
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
F(MathSqrt, 1, 1)
// Support for "structured" code comments.
#ifdef DEBUG

4
deps/v8/src/compilation-cache.cc

@ -218,9 +218,7 @@ void CompilationSubCache::Iterate(ObjectVisitor* v) {
void CompilationSubCache::Clear() {
for (int i = 0; i < generations_; i++) {
tables_[i] = Heap::undefined_value();
}
MemsetPointer(tables_, Heap::undefined_value(), generations_);
}

44
deps/v8/src/compiler.cc

@ -79,12 +79,32 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null();
}
if (function->scope()->num_parameters() > 0 ||
function->scope()->num_stack_slots()) {
AssignedVariablesAnalyzer ava(function);
ava.Analyze();
if (ava.HasStackOverflow()) {
return Handle<Code>::null();
}
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(function);
if (!builder.HasStackOverflow()) {
int variable_count =
function->num_parameters() + function->scope()->num_stack_slots();
if (variable_count > 0 && builder.definitions()->length() > 0) {
ReachingDefinitions rd(builder.postorder(),
builder.definitions(),
variable_count);
rd.Compute();
}
}
#ifdef DEBUG
if (FLAG_print_graph_text) {
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder());
}
#endif
@ -463,12 +483,32 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
return Handle<JSFunction>::null();
}
if (literal->scope()->num_parameters() > 0 ||
literal->scope()->num_stack_slots()) {
AssignedVariablesAnalyzer ava(literal);
ava.Analyze();
if (ava.HasStackOverflow()) {
return Handle<JSFunction>::null();
}
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(literal);
if (!builder.HasStackOverflow()) {
int variable_count =
literal->num_parameters() + literal->scope()->num_stack_slots();
if (variable_count > 0 && builder.definitions()->length() > 0) {
ReachingDefinitions rd(builder.postorder(),
builder.definitions(),
variable_count);
rd.Compute();
}
}
#ifdef DEBUG
if (FLAG_print_graph_text) {
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder());
}
#endif

8
deps/v8/src/contexts.h

@ -50,12 +50,6 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
// Comment for special_function_table:
// Table for providing optimized/specialized functions.
// The array contains triplets [object, general_function, optimized_function].
// Primarily added to support built-in optimized variants of
// Array.prototype.{push,pop}.
#define GLOBAL_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
@ -82,7 +76,6 @@ enum ContextLookupFlags {
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(SPECIAL_FUNCTION_TABLE_INDEX, FixedArray, special_function_table) \
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
@ -206,7 +199,6 @@ class Context: public FixedArray {
GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
SPECIAL_FUNCTION_TABLE_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,

16
deps/v8/src/conversions.cc

@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "factory.h"
#include "grisu3.h"
#include "scanner.h"
namespace v8 {
@ -382,8 +383,17 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
int length = StrLength(decimal_rep);
char* decimal_rep;
bool used_dtoa = false;
char grisu_buffer[kGrisu3MaximalLength + 1];
int length;
if (grisu3(v, grisu_buffer, &sign, &length, &decimal_point)) {
decimal_rep = grisu_buffer;
} else {
decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
used_dtoa = true;
length = StrLength(decimal_rep);
}
if (sign) builder.AddCharacter('-');
@ -418,7 +428,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
builder.AddFormatted("%d", exponent);
}
freedtoa(decimal_rep);
if (used_dtoa) freedtoa(decimal_rep);
}
}
return builder.Finalize();

1040
deps/v8/src/data-flow.cc

File diff suppressed because it is too large

305
deps/v8/src/data-flow.h

@ -100,6 +100,13 @@ class BitVector: public ZoneObject {
}
}
void Subtract(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
data_[i] &= ~other.data_[i];
}
}
void Clear() {
for (int i = 0; i < data_length_; i++) {
data_[i] = 0;
@ -113,6 +120,13 @@ class BitVector: public ZoneObject {
return true;
}
bool Equals(const BitVector& other) {
for (int i = 0; i < data_length_; i++) {
if (data_[i] != other.data_[i]) return false;
}
return true;
}
int length() const { return length_; }
private:
@ -122,56 +136,68 @@ class BitVector: public ZoneObject {
};
// Forward declarations of Node types.
class Node;
class BranchNode;
class JoinNode;
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
// Simple fixed-capacity list-based worklist (managed as a queue) of
// pointers to T.
template<typename T>
class WorkList BASE_EMBEDDED {
public:
FlowGraph() : entry_(NULL), exit_(NULL) {}
// The worklist cannot grow bigger than size. We keep one item empty to
// distinguish between empty and full.
explicit WorkList(int size)
: capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
}
static FlowGraph Empty() { return FlowGraph(); }
bool is_empty() { return head_ == tail_; }
bool is_empty() const { return entry_ == NULL; }
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
bool is_full() {
// The worklist is full if head is at 0 and tail is at capacity - 1:
// head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
// or if tail is immediately to the left of head:
// tail+1 == head ==> tail - head == -1
int diff = tail_ - head_;
return (diff == -1 || diff == capacity_ - 1);
}
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
void Insert(T* item) {
ASSERT(!is_full());
queue_[tail_++] = item;
if (tail_ == capacity_) tail_ = 0;
}
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
T* Remove() {
ASSERT(!is_empty());
T* item = queue_[head_++];
if (head_ == capacity_) head_ = 0;
return item;
}
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
private:
int capacity_; // Including one empty slot.
int head_; // Where the first item is.
int tail_; // Where the next inserted item will go.
List<T*> queue_;
};
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
struct ReachingDefinitionsData BASE_EMBEDDED {
public:
ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
#ifdef DEBUG
void PrintText(ZoneList<Node*>* postorder);
#endif
void Initialize(int definition_count) {
rd_in_ = new BitVector(definition_count);
kill_ = new BitVector(definition_count);
gen_ = new BitVector(definition_count);
}
BitVector* rd_in() { return rd_in_; }
BitVector* kill() { return kill_; }
BitVector* gen() { return gen_; }
private:
Node* entry_;
Node* exit_;
BitVector* rd_in_;
BitVector* kill_;
BitVector* gen_;
};
@ -182,7 +208,9 @@ class Node: public ZoneObject {
virtual ~Node() {}
virtual bool IsExitNode() { return false; }
virtual bool IsBlockNode() { return false; }
virtual bool IsBranchNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
@ -200,11 +228,23 @@ class Node: public ZoneObject {
int number() { return number_; }
void set_number(int number) { number_ = number; }
// Functions used by data-flow analyses.
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result) = 0;
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
#ifdef DEBUG
virtual void AssignNumbers();
void AssignNodeNumber();
void PrintReachingDefinitions();
virtual void PrintText() = 0;
#endif
protected:
ReachingDefinitionsData rd_;
private:
int number_;
bool mark_;
@ -213,49 +253,27 @@ class Node: public ZoneObject {
};
// An entry node has no predecessors and a single successor.
class EntryNode: public Node {
public:
EntryNode() : successor_(NULL) {}
void AddPredecessor(Node* predecessor) { UNREACHABLE(); }
void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void PrintText();
#endif
private:
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(EntryNode);
};
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
public:
ExitNode() : predecessors_(4) {}
bool IsExitNode() { return true; }
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
void AddSuccessor(Node* successor) { /* Do nothing. */ }
void AddSuccessor(Node* successor) { UNREACHABLE(); }
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
void PrintText();
#endif
@ -280,6 +298,8 @@ class BlockNode: public Node {
bool IsBlockNode() { return true; }
bool is_empty() { return instructions_.is_empty(); }
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
@ -298,8 +318,14 @@ class BlockNode: public Node {
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
void AssignNumbers();
void PrintText();
#endif
@ -317,6 +343,8 @@ class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
bool IsBranchNode() { return true; }
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
@ -335,6 +363,9 @@ class BranchNode: public Node {
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
void PrintText();
#endif
@ -374,6 +405,9 @@ class JoinNode: public Node {
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
void PrintText();
#endif
@ -386,12 +420,68 @@ class JoinNode: public Node {
};
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
public:
static FlowGraph Empty() {
FlowGraph graph;
graph.entry_ = new BlockNode();
graph.exit_ = graph.entry_;
return graph;
}
bool is_empty() const {
return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
}
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
#ifdef DEBUG
void PrintText(ZoneList<Node*>* postorder);
#endif
private:
FlowGraph() : entry_(NULL), exit_(NULL) {}
Node* entry_;
Node* exit_;
};
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor {
public:
FlowGraphBuilder()
: global_exit_(NULL),
: graph_(FlowGraph::Empty()),
global_exit_(NULL),
preorder_(4),
postorder_(4),
definitions_(4) {
@ -400,8 +490,8 @@ class FlowGraphBuilder: public AstVisitor {
void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* postorder() { return &postorder_; }
ZoneList<Expression*>* definitions() { return &definitions_; }
private:
ExitNode* global_exit() { return global_exit_; }
@ -418,8 +508,9 @@ class FlowGraphBuilder: public AstVisitor {
// The flow graph builder collects a list of definitions (assignments and
// count operations) to stack-allocated variables to use for reaching
// definitions analysis.
ZoneList<AstNode*> definitions_;
// definitions analysis. AST node numbers in the AST are used to refer
// into this list.
ZoneList<Expression*> definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
@ -502,6 +593,74 @@ class LivenessAnalyzer : public AstVisitor {
};
// Computes the set of assigned variables and annotates variables proxies
// that are trivial sub-expressions and for-loops where the loop variable
// is guaranteed to be a smi.
class AssignedVariablesAnalyzer : public AstVisitor {
public:
explicit AssignedVariablesAnalyzer(FunctionLiteral* fun);
void Analyze();
private:
Variable* FindSmiLoopVariable(ForStatement* stmt);
int BitIndex(Variable* var);
void RecordAssignedVar(Variable* var);
void MarkIfTrivial(Expression* expr);
// Visits an expression saving the accumulator before, clearing
// it before visting and restoring it after visiting.
void ProcessExpression(Expression* expr);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
FunctionLiteral* fun_;
// Accumulator for assigned variables set.
BitVector av_;
DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
};
class ReachingDefinitions BASE_EMBEDDED {
public:
ReachingDefinitions(ZoneList<Node*>* postorder,
ZoneList<Expression*>* definitions,
int variable_count)
: postorder_(postorder),
definitions_(definitions),
variables_(variable_count) {
int definition_count = definitions->length();
for (int i = 0; i < variable_count; i++) {
variables_.Add(new BitVector(definition_count));
}
}
static int IndexFor(Variable* var, int variable_count);
void Compute();
private:
// A (postorder) list of flow-graph nodes in the body.
ZoneList<Node*>* postorder_;
// A list of all the definitions in the body.
ZoneList<Expression*>* definitions_;
// For each variable, the set of all its definitions.
List<BitVector*> variables_;
DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
};
} } // namespace v8::internal

101
deps/v8/src/date-delay.js

@ -293,55 +293,48 @@ function CalculateDateTable() {
}
// Constructor for creating objects holding year, month, and date.
// Introduced to ensure the two return points in FromJulianDay match same map.
function DayTriplet(year, month, date) {
this.year = year;
this.month = month;
this.date = date;
}
var julian_day_cache_triplet;
var julian_day_cache_day = $NaN;
// Compute year, month, and day from modified Julian day.
// The missing days in 1582 are ignored for JavaScript compatibility.
function FromJulianDay(julian) {
if (julian_day_cache_day == julian) {
return julian_day_cache_triplet;
var ymd_from_time_cache = [$NaN, $NaN, $NaN];
var ymd_from_time_cached_time = $NaN;
function YearFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}
var result;
// Avoid floating point and non-Smi maths in common case. This is also a period of
// time where leap years are very regular. The range is not too large to avoid overflow
// when doing the multiply-to-divide trick.
if (julian > kDayZeroInJulianDay &&
(julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
var y = 1968;
// Divide by 1461 by multiplying with 22967 and shifting down by 25!
var after_1968 = (jsimple * 22967) >> 25;
y += after_1968 << 2;
jsimple -= 1461 * after_1968;
var four_year_cycle = four_year_cycle_table[jsimple];
result = new DayTriplet(y + (four_year_cycle >> kYearShift),
(four_year_cycle & kMonthMask) >> kMonthShift,
four_year_cycle & kDayMask);
} else {
var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
var jd = FLOOR(365 * jc + (0.25 * jc));
var je = FLOOR((jb - jd)/30.6001);
var m = je - 1;
if (m > 12) m -= 13;
var y = jc - 4715;
if (m > 2) { --y; --m; }
var d = jb - jd - FLOOR(30.6001 * je);
result = new DayTriplet(y, m, d);
%DateYMDFromTime(t, ymd_from_time_cache);
ymd_from_time_cached_time = t
}
return ymd_from_time_cache[0];
}
function MonthFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}
julian_day_cache_day = julian;
julian_day_cache_triplet = result;
return result;
%DateYMDFromTime(t, ymd_from_time_cache);
ymd_from_time_cached_time = t
}
return ymd_from_time_cache[1];
}
function DateFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}
%DateYMDFromTime(t, ymd_from_time_cache);
ymd_from_time_cached_time = t
}
return ymd_from_time_cache[2];
}
@ -577,11 +570,10 @@ function TwoDigitString(value) {
function DateString(time) {
var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return WeekDays[WeekDay(time)] + ' '
+ Months[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ' '
+ YMD.year;
+ Months[MonthFromTime(time)] + ' '
+ TwoDigitString(DateFromTime(time)) + ' '
+ YearFromTime(time);
}
@ -590,11 +582,10 @@ var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July'
function LongDateString(time) {
var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return LongWeekDays[WeekDay(time)] + ', '
+ LongMonths[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ', '
+ YMD.year;
+ LongMonths[MonthFromTime(time)] + ' '
+ TwoDigitString(DateFromTime(time)) + ', '
+ YearFromTime(time);
}

2
deps/v8/src/debug.cc

@ -803,7 +803,7 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) {
v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_)));
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
}

136
deps/v8/src/diy_fp.h

@ -0,0 +1,136 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DIY_FP_H_
#define V8_DIY_FP_H_
namespace v8 {
namespace internal {
// This "Do It Yourself Floating Point" class implements a floating-point number
// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
// have the most significant bit of the significand set.
// Multiplication and Subtraction do not normalize their results.
// DiyFp are not designed to contain special doubles (NaN and Infinity).
class DiyFp {
public:
static const int kSignificandSize = 64;
DiyFp() : f_(0), e_(0) {}
DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
// this = this - other.
// The exponents of both numbers must be the same and the significand of this
// must be bigger than the significand of other.
// The result will not be normalized.
void Subtract(const DiyFp& other) {
ASSERT(e_ == other.e_);
ASSERT(f_ >= other.f_);
f_ -= other.f_;
}
// Returns a - b.
// The exponents of both numbers must be the same and this must be bigger
// than other. The result will not be normalized.
static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Subtract(b);
return result;
}
// this = this * other.
void Multiply(const DiyFp& other) {
// Simply "emulates" a 128 bit multiplication.
// However: the resulting number only contains 64 bits. The least
// significant 64 bits are only used for rounding the most significant 64
// bits.
const uint64_t kM32 = 0xFFFFFFFFu;
uint64_t a = f_ >> 32;
uint64_t b = f_ & kM32;
uint64_t c = other.f_ >> 32;
uint64_t d = other.f_ & kM32;
uint64_t ac = a * c;
uint64_t bc = b * c;
uint64_t ad = a * d;
uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
tmp += 1U << 31; // round
uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
e_ += other.e_ + 64;
f_ = result_f;
}
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Multiply(b);
return result;
}
void Normalize() {
ASSERT(f_ != 0);
uint64_t f = f_;
int e = e_;
// This method is mainly called for normalizing boundaries. In general
// boundaries need to be shifted by 10 bits. We thus optimize for this case.
const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
while ((f & k10MSBits) == 0) {
f <<= 10;
e -= 10;
}
while ((f & kUint64MSB) == 0) {
f <<= 1;
e--;
}
f_ = f;
e_ = e;
}
static DiyFp Normalize(const DiyFp& a) {
DiyFp result = a;
result.Normalize();
return result;
}
uint64_t f() const { return f_; }
int e() const { return e_; }
void set_f(uint64_t new_value) { f_ = new_value; }
void set_e(int new_value) { e_ = new_value; }
private:
static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
uint64_t f_;
int e_;
};
} } // namespace v8::internal
#endif // V8_DIY_FP_H_

169
deps/v8/src/double.h

@ -0,0 +1,169 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DOUBLE_H_
#define V8_DOUBLE_H_
#include "diy_fp.h"
namespace v8 {
namespace internal {
// We assume that doubles and uint64_t have the same endianness.
static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
// Helper functions for doubles.
class Double {
public:
static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
static const uint64_t kSignificandMask =
V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
explicit Double(uint64_t d64) : d64_(d64) {}
DiyFp AsDiyFp() const {
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// this->Significand() must not be 0.
DiyFp AsNormalizedDiyFp() const {
uint64_t f = Significand();
int e = Exponent();
ASSERT(f != 0);
// The current double could be a denormal.
while ((f & kHiddenBit) == 0) {
f <<= 1;
e--;
}
// Do the final shifts in one go. Don't forget the hidden bit (the '-1').
f <<= DiyFp::kSignificandSize - kSignificandSize - 1;
e -= DiyFp::kSignificandSize - kSignificandSize - 1;
return DiyFp(f, e);
}
// Returns the double's bit as uint64.
uint64_t AsUint64() const {
return d64_;
}
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
uint64_t d64 = AsUint64();
int biased_e = static_cast<int>((d64 & kExponentMask) >> kSignificandSize);
return biased_e - kExponentBias;
}
uint64_t Significand() const {
uint64_t d64 = AsUint64();
uint64_t significand = d64 & kSignificandMask;
if (!IsDenormal()) {
return significand + kHiddenBit;
} else {
return significand;
}
}
// Returns true if the double is a denormal.
bool IsDenormal() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == 0;
}
// We consider denormals not to be special.
// Hence only Infinity and NaN are special.
bool IsSpecial() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) == 0);
}
int Sign() const {
uint64_t d64 = AsUint64();
return (d64 & kSignMask) == 0? 1: -1;
}
// Returns the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (significand_is_zero && v.e() != kDenormalExponent) {
// The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
// The only exception is for the smallest normal: the largest denormal is
// at the same distance as its successor.
// Note: denormals have the same exponent as the smallest normals.
m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
} else {
m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
}
m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
m_minus.set_e(m_plus.e());
*out_m_plus = m_plus;
*out_m_minus = m_minus;
}
double value() const { return uint64_to_double(d64_); }
private:
static const int kSignificandSize = 52; // Excludes the hidden bit.
static const int kExponentBias = 0x3FF + kSignificandSize;
static const int kDenormalExponent = -kExponentBias + 1;
uint64_t d64_;
};
} } // namespace v8::internal
#endif // V8_DOUBLE_H_

1
deps/v8/src/factory.cc

@ -866,6 +866,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
map->set_instance_descriptors(*array);
}
ASSERT(result->shared()->IsApiFunction());
return result;
}

4
deps/v8/src/factory.h

@ -317,7 +317,7 @@ class Factory : public AllStatic {
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline Handle<type> name() { \
return Handle<type>(bit_cast<type**, Object**>( \
return Handle<type>(BitCast<type**, Object**>( \
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
@ -325,7 +325,7 @@ class Factory : public AllStatic {
#define SYMBOL_ACCESSOR(name, str) \
static inline Handle<String> name() { \
return Handle<String>(bit_cast<String**, Object**>( \
return Handle<String>(BitCast<String**, Object**>( \
&Heap::roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)

41
deps/v8/src/globals.h

@ -98,6 +98,11 @@ typedef byte* Address;
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
@ -569,42 +574,6 @@ F FUNCTION_CAST(Address addr) {
#define INLINE(header) inline header
#endif
// The type-based aliasing rule allows the compiler to assume that pointers of
// different types (for some definition of different) never alias each other.
// Thus the following code does not work:
//
// float f = foo();
// int fbits = *(int*)(&f);
//
// The compiler 'knows' that the int pointer can't refer to f since the types
// don't match, so the compiler may cache f in a register, leaving random data
// in fbits. Using C++ style casts makes no difference, however a pointer to
// char data is assumed to alias any other pointer. This is the 'memcpy
// exception'.
//
// Bit_cast uses the memcpy exception to move the bits from a variable of one
// type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize bit_cast away.
//
// There is an additional use for bit_cast.
// Recent gccs will warn when they see casts that may result in breakage due to
// the type-based aliasing rule. If you have checked that there is no breakage
// you can use bit_cast to cast one pointer type to another. This confuses gcc
// enough that it can no longer see that you have cast one pointer type to
// another thus avoiding the warning.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)

494
deps/v8/src/grisu3.cc

@ -0,0 +1,494 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "grisu3.h"
#include "cached_powers.h"
#include "diy_fp.h"
#include "double.h"
namespace v8 {
namespace internal {
template <int alpha = -60, int gamma = -32>
class Grisu3 {
public:
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer (not null-terminated).
// If the function returns true then
// v == (double) (buffer * 10^decimal_exponent).
// The digits in the buffer are the shortest representation possible: no
// 0.099999999999 instead of 0.1.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
static bool grisu3(double v,
char* buffer, int* length, int* decimal_exponent);
private:
// Rounds the buffer according to the rest.
// If there is too much imprecision to round then false is returned.
// Similarily false is returned when the buffer is not within Delta.
static bool RoundWeed(char* buffer, int len, uint64_t wp_W, uint64_t Delta,
uint64_t rest, uint64_t ten_kappa, uint64_t ulp);
// Dispatches to the a specialized digit-generation routine. The chosen
// routine depends on w.e (which in turn depends on alpha and gamma).
// Currently there is only one digit-generation routine, but it would be easy
// to add others.
static bool DigitGen(DiyFp low, DiyFp w, DiyFp high,
char* buffer, int* len, int* kappa);
// Generates w's digits. The result is the shortest in the interval low-high.
// All DiyFp are assumed to be imprecise and this function takes this
// imprecision into account. If the function cannot compute the best
// representation (due to the imprecision) then false is returned.
static bool DigitGen_m60_m32(DiyFp low, DiyFp w, DiyFp high,
char* buffer, int* length, int* kappa);
};
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::grisu3(double v,
char* buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// neighbors. Any number strictly between boundary_minus and boundary_plus
// will round to v when read as double.
// Grisu3 will never output representations that lie exactly on a boundary.
DiyFp boundary_minus, boundary_plus;
Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
GetCachedPower(w.e() + DiyFp::kSignificandSize, alpha, gamma, &mk, &ten_mk);
ASSERT(alpha <= w.e() + ten_mk.e() + DiyFp::kSignificandSize &&
gamma >= w.e() + ten_mk.e() + DiyFp::kSignificandSize);
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
ASSERT(scaled_w.e() ==
boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
// In theory it would be possible to avoid some recomputations by computing
// the difference between w and boundary_minus/plus (a power of 2) and to
// compute scaled_boundary_minus/plus by subtracting/adding from
// scaled_w. However the code becomes much less readable and the speed
// enhancements are not terriffic.
DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
// DigitGen will generate the digits of scaled_w. Therefore we have
// v == (double) (scaled_w * 10^-mk).
// Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
// integer than it will be updated. For instance if scaled_w == 1.23 then
// the buffer will be filled with "123" und the decimal_exponent will be
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by alpha and gamma. Typically alpha >= -63
// and gamma <= 3.
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
// is, their error must be less that a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
// * alpha <= w.e() <= gamma
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
// * buffer contains the shortest possible decimal digit-sequence
// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
// correct values of low and high (without their error).
// * if more than one decimal representation gives the minimal number of
// decimal digits then the one closest to W (where W is the correct value
// of w) is chosen.
// Remark: this procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely (~0.5%).
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::DigitGen(DiyFp low,
DiyFp w,
DiyFp high,
char* buffer,
int* len,
int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
ASSERT(alpha <= w.e() && w.e() <= gamma);
// The following tests use alpha and gamma to avoid unnecessary dynamic tests.
if ((alpha >= -60 && gamma <= -32) || // -60 <= w.e() <= -32
(alpha <= -32 && gamma >= -60 && // Alpha/gamma overlaps -60/-32 region.
-60 <= w.e() && w.e() <= -32)) {
return DigitGen_m60_m32(low, w, high, buffer, len, kappa);
} else {
// A simple adaption of the special case -60/-32 would allow greater ranges
// of alpha/gamma and thus reduce the number of precomputed cached powers of
// ten.
UNIMPLEMENTED();
return false;
}
}
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
static const uint32_t kTen6 = 1000000;
static const uint32_t kTen7 = 10000000;
static const uint32_t kTen8 = 100000000;
static const uint32_t kTen9 = 1000000000;
// Returns the biggest power of ten that is <= than the given number. We
// furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
int* exponent) {
switch (number_bits) {
case 32:
case 31:
case 30:
if (kTen9 <= number) {
*power = kTen9;
*exponent = 9;
break;
} // else fallthrough
case 29:
case 28:
case 27:
if (kTen8 <= number) {
*power = kTen8;
*exponent = 8;
break;
} // else fallthrough
case 26:
case 25:
case 24:
if (kTen7 <= number) {
*power = kTen7;
*exponent = 7;
break;
} // else fallthrough
case 23:
case 22:
case 21:
case 20:
if (kTen6 <= number) {
*power = kTen6;
*exponent = 6;
break;
} // else fallthrough
case 19:
case 18:
case 17:
if (kTen5 <= number) {
*power = kTen5;
*exponent = 5;
break;
} // else fallthrough
case 16:
case 15:
case 14:
if (kTen4 <= number) {
*power = kTen4;
*exponent = 4;
break;
} // else fallthrough
case 13:
case 12:
case 11:
case 10:
if (1000 <= number) {
*power = 1000;
*exponent = 3;
break;
} // else fallthrough
case 9:
case 8:
case 7:
if (100 <= number) {
*power = 100;
*exponent = 2;
break;
} // else fallthrough
case 6:
case 5:
case 4:
if (10 <= number) {
*power = 10;
*exponent = 1;
break;
} // else fallthrough
case 3:
case 2:
case 1:
if (1 <= number) {
*power = 1;
*exponent = 0;
break;
} // else fallthrough
case 0:
*power = 0;
*exponent = -1;
break;
default:
// Following assignments are here to silence compiler warnings.
*power = 0;
*exponent = 0;
UNREACHABLE();
}
}
// Same comments as for DigitGen but with additional precondition:
// -60 <= w.e() <= -32
//
// Say, for the sake of example, that
// w.e() == -48, and w.f() == 0x1234567890abcdef
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
// w's fractional part is therefore 0x567890abcdef.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the comma would be computed by
// (0x567890abcdef * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::DigitGen_m60_m32(DiyFp low,
DiyFp w,
DiyFp high,
char* buffer,
int* length,
int* kappa) {
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
// the new numbers are outside of the interval we want the final
// representation to lie in.
// Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
// numbers that are certain to lie in the interval. We will use this fact
// later on.
// We will now start by generating the digits within the uncertain
// interval. Later we will weed out representations that lie outside the safe
// interval and thus _might_ lie outside the correct interval.
uint64_t unit = 1;
DiyFp too_low = DiyFp(low.f() - unit, low.e());
DiyFp too_high = DiyFp(high.f() + unit, high.e());
// too_low and too_high are guaranteed to lie outside the interval we want the
// generated number in.
DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
// We now cut the input number into two parts: the integral digits and the
// fractionals. We will not write any decimal separator though, but adapt
// kappa instead.
// Reminder: we are currently computing the digits (stored inside the buffer)
// such that: too_low < buffer * 10^kappa < too_high
// We use too_high for the digit_generation and stop as soon as possible.
// If we stop early we effectively round down.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divider;
int divider_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divider, &divider_exponent);
*kappa = divider_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divider exponent + 1. And the divider is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
int digit = integrals / divider;
buffer[*length] = '0' + digit;
(*length)++;
integrals %= divider;
(*kappa)--;
// Note that kappa now equals the exponent of the divider and that the
// invariant thus holds again.
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
// Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
// Reminder: unsafe_interval.e() == one.e()
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divider) << -one.e(), unit);
}
divider /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
// Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
// increase its (imaginary) exponent. At the same time we decrease the
// divider's (one's) exponent and shift its significand.
// Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
// fractionals.f *= 10;
// fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
// one.f >>= 1; one.e++; // value remains unchanged.
// and we have again fractionals.e == one.e which allows us to divide
// fractionals.f() by one.f()
// We simply combine the *= 10 and the >>= 1.
while (true) {
fractionals *= 5;
unit *= 5;
unsafe_interval.set_f(unsafe_interval.f() * 5);
unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
one.set_f(one.f() >> 1);
one.set_e(one.e() + 1);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
(*length)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
}
// Rounds the given generated digits in the buffer and weeds out generated
// digits that are not in the safe interval, or where we cannot find a rounded
// representation.
// Input: * buffer containing the digits of too_high / 10^kappa
// * the buffer's length
// * distance_too_high_w == (too_high - w).f() * unit
// * unsafe_interval == (too_high - too_low).f() * unit
// * rest = (too_high - buffer * 10^kappa).f() * unit
// * ten_kappa = 10^kappa * unit
// * unit = the common multiplier
// Output: returns true on success.
// Modifies the generated digits in the buffer to approach (round towards) w.
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::RoundWeed(char* buffer,
int length,
uint64_t distance_too_high_w,
uint64_t unsafe_interval,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w- = too_high - big_distance, and
// w+ = too_high - small_distance.
// Note: w- < w < w+
//
// The real w (* unit) must lie somewhere inside the interval
// ]w-; w+[ (often written as "(w-; w+)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
//
// By generating the digits of too_high we got the biggest last digit.
// In the case that w+ < buffer < too_high we try to decrement the buffer.
// This way the buffer approaches (rounds towards) w.
// There are 3 conditions that stop the decrementation process:
// 1) the buffer is already below w+
// 2) decrementing the buffer would make it leave the unsafe interval
// 3) decrementing the buffer would yield a number below w+ and farther away
// than the current number. In other words:
// (buffer{-1} < w+) && w+ - buffer{-1} > buffer - w+
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w+
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
rest += ten_kappa;
}
// We have approached w+ as much as possible. We now test if approaching w-
// would require changing the buffer. If yes, then we have two possible
// representations close to w, but we cannot decide which one is closer.
if (rest < big_distance &&
unsafe_interval - rest >= ten_kappa &&
(rest + ten_kappa < big_distance ||
big_distance - rest > rest + ten_kappa - big_distance)) {
return false;
}
// Weeding test.
// The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
// Since too_low = too_high - unsafe_interval this is equivalent too
// [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
// Conceptually we have: rest ~= too_high - buffer
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
}
bool grisu3(double v, char* buffer, int* sign, int* length, int* point) {
ASSERT(v != 0);
ASSERT(!Double(v).IsSpecial());
if (v < 0) {
v = -v;
*sign = 1;
} else {
*sign = 0;
}
int decimal_exponent;
bool result = Grisu3<-60, -32>::grisu3(v, buffer, length, &decimal_exponent);
*point = *length + decimal_exponent;
buffer[*length] = '\0';
return result;
}
} } // namespace v8::internal

55
deps/v8/src/grisu3.h

@ -0,0 +1,55 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_GRISU3_H_
#define V8_GRISU3_H_
namespace v8 {
namespace internal {
// Grisu3 will produce at most kGrisu3MaximalLength digits. This does not
// include the terminating '\0' character.
static const int kGrisu3MaximalLength = 17;
// Provides a decimal representation of v.
// v must satisfy v != 0 and it must not be Infinity or NaN.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true then
// v == (double) (buffer * 10^(decimal-point - length)).
// The digits in the buffer are the shortest representation possible: no
// 0.099999999999 instead of 0.1.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain the
// one closest to v.
// The variable 'sign' will be '0' if the given number is positive, and '1'
// otherwise.
bool grisu3(double d, char* buffer, int* sign, int* length, int* decimal_point);
} } // namespace v8::internal
#endif // V8_GRISU3_H_

28
deps/v8/src/heap.cc

@ -2571,11 +2571,9 @@ Object* Heap::CopyJSObject(JSObject* source) {
reinterpret_cast<Object**>(source->address()),
object_size);
// Update write barrier for all fields that lie beyond the header.
for (int offset = JSObject::kHeaderSize;
offset < object_size;
offset += kPointerSize) {
RecordWrite(clone_address, offset);
}
RecordWrites(clone_address,
JSObject::kHeaderSize,
object_size - JSObject::kHeaderSize);
} else {
clone = new_space_.AllocateRaw(object_size);
if (clone->IsFailure()) return clone;
@ -2906,12 +2904,9 @@ Object* Heap::AllocateFixedArray(int length) {
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
Object* value = undefined_value();
// Initialize body.
for (int index = 0; index < length; index++) {
ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
ASSERT(!Heap::InNewSpace(undefined_value()));
MemsetPointer(array->data_start(), undefined_value(), length);
}
return result;
}
@ -2963,11 +2958,8 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
Object* value = undefined_value();
for (int index = 0; index < length; index++) {
ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
ASSERT(!Heap::InNewSpace(undefined_value()));
MemsetPointer(array->data_start(), undefined_value(), length);
return array;
}
@ -2994,9 +2986,7 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(the_hole_value()));
MemsetPointer(HeapObject::RawField(array, FixedArray::kHeaderSize),
the_hole_value(),
length);
MemsetPointer(array->data_start(), the_hole_value(), length);
}
return result;
}
@ -3409,7 +3399,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list");
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
v->Synchronize("symbol");
Bootstrapper::Iterate(v);

16
deps/v8/src/ia32/assembler-ia32.cc

@ -2148,6 +2148,17 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
}
void Assembler::movmskpd(Register dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x50);
emit_sse_operand(dst, src);
}
void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2283,6 +2294,11 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
}
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::Print() {
Disassembler::Decode(stdout, buffer_, pc_);
}

3
deps/v8/src/ia32/assembler-ia32.h

@ -769,6 +769,7 @@ class Assembler : public Malloced {
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
@ -828,7 +829,7 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
private:
byte* addr_at(int pos) { return buffer_ + pos; }

79
deps/v8/src/ia32/codegen-ia32.cc

@ -2286,6 +2286,12 @@ void CodeGenerator::Comparison(AstNode* node,
// a jump target and branching to duplicate the virtual frame at
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
if (left_side.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg(), "Argument not a smi");
}
} else {
JumpTarget is_smi;
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
@ -2341,6 +2347,8 @@ void CodeGenerator::Comparison(AstNode* node,
dest->false_target()->Jump();
is_smi.Bind();
}
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
@ -3579,6 +3587,24 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
}
CheckStack(); // TODO(1222600): ignore if body contains calls.
// If we have (a) a loop with a compile-time constant trip count
// and (b) the loop induction variable is not assignend inside the
// loop we update the number type of the induction variable to be smi.
if (node->is_fast_smi_loop()) {
// Set number type of the loop variable to smi.
Slot* slot = node->loop_variable()->slot();
ASSERT(slot->type() == Slot::LOCAL);
frame_->SetTypeForLocalAt(slot->index(), NumberInfo::Smi());
if (FLAG_debug_code) {
frame_->PushLocalAt(slot->index());
Result var = frame_->Pop();
var.ToRegister();
__ AbortIfNotSmi(var.reg(), "Loop variable not a smi.");
}
}
Visit(node->body());
// If there is an update expression, compile it if necessary.
@ -4754,11 +4780,13 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->Push(Smi::FromInt(node->literal_index()));
// Constant properties.
frame_->Push(node->constant_properties());
// Should the object literal have fast elements?
frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
Result clone;
if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
frame_->Push(&clone);
@ -5912,7 +5940,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@ -6624,6 +6652,19 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ Set(tmp.reg(), Immediate(0));
}
if (is_increment) {
__ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
} else {
__ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
}
if (new_value.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(new_value.reg(), "Argument not a smi");
}
if (tmp.is_valid()) tmp.Unuse();
} else {
DeferredCode* deferred = NULL;
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
@ -6634,12 +6675,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
is_increment);
}
if (is_increment) {
__ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
} else {
__ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
}
// If the count operation didn't overflow and the result is a valid
// smi, we're done. Otherwise, we jump to the deferred slow-case
// code.
@ -6658,6 +6693,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
deferred->Branch(not_zero);
}
deferred->BindExit();
}
// Postfix: store the old value in the allocated slot under the
// reference.
@ -6823,8 +6859,15 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
overwrite_mode = OVERWRITE_RIGHT;
}
if (node->left()->IsTrivial()) {
Load(node->right());
Result right = frame_->Pop();
frame_->Push(node->left());
frame_->Push(&right);
} else {
Load(node->left());
Load(node->right());
}
GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
}
}
@ -7024,8 +7067,20 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
default:
UNREACHABLE();
}
if (left->IsTrivial()) {
if (!left_already_loaded) {
Load(right);
Result right_result = frame_->Pop();
frame_->Push(left);
frame_->Push(&right_result);
} else {
Load(right);
}
} else {
if (!left_already_loaded) Load(left);
Load(right);
}
Comparison(node, cc, strict, destination());
}
@ -10162,6 +10217,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
void RecordWriteStub::Generate(MacroAssembler* masm) {
masm->RecordWriteHelper(object_, addr_, scratch_);
masm->ret(0);
}
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;

49
deps/v8/src/ia32/codegen-ia32.h

@ -339,6 +339,10 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@ -522,6 +526,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@ -555,7 +560,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
@ -593,14 +598,10 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast call to transcendental functions.
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
// Fast case for sqrt
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.
@ -976,6 +977,42 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register addr, Register scratch)
: object_(object), addr_(addr), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register addr_;
Register scratch_;
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
object_.code(), addr_.code(), scratch_.code());
}
#endif
// Minor key encoding in 12 bits of three registers (object, address and
// scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
AddressBits::encode(addr_.code()) |
ScratchBits::encode(scratch_.code());
}
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

43
deps/v8/src/ia32/disasm-ia32.cc

@ -1069,14 +1069,28 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
UnimplementedInstruction();
}
} else if (*data == 0x2F) {
} else if (*data == 0x2E || *data == 0x2F) {
const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("comisd %s,%s",
if (mod == 0x3) {
AppendToBuffer("%s %s,%s", mnem,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
data += PrintRightOperand(data);
}
} else if (*data == 0x50) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movmskpd %s,%s",
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
@ -1198,6 +1212,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const char* mnem = "?";
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
case 0x2C: mnem = "cvttsd2si"; break;
case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
@ -1208,6 +1223,29 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (b2 == 0x2A) {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else {
AppendToBuffer("%s %s,%s",
mnem,
NameOfXMMRegister(regop),
NameOfCPURegister(rm));
data++;
}
} else if (b2 == 0x2C) {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
data += PrintRightOperand(data);
} else {
AppendToBuffer("%s %s,%s",
mnem,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
}
} else {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else {
@ -1218,6 +1256,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
}
}
}
} else {
UnimplementedInstruction();
}

5
deps/v8/src/ia32/full-codegen-ia32.cc

@ -901,10 +901,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_properties()));
__ push(Immediate(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
// If result_saved is true the result is on top of the stack. If

131
deps/v8/src/ia32/ic-ia32.cc

@ -152,6 +152,108 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label. If the load succeeds and we
// fall through, key holds the result on exit.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ mov(r1, r0);
__ not_(r0);
__ shl(r1, 15);
__ add(r0, Operand(r1));
// hash = hash ^ (hash >> 12);
__ mov(r1, r0);
__ shr(r1, 12);
__ xor_(r0, Operand(r1));
// hash = hash + (hash << 2);
__ lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
__ mov(r1, r0);
__ shr(r1, 4);
__ xor_(r0, Operand(r1));
// hash = hash * 2057;
__ imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
__ mov(r1, r0);
__ shr(r1, 16);
__ xor_(r0, Operand(r1));
// Compute capacity mask.
const int kCapacityOffset =
NumberDictionary::kHeaderSize +
NumberDictionary::kCapacityIndex * kPointerSize;
__ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
__ dec(r1);
const int kElementsStartOffset =
NumberDictionary::kHeaderSize +
NumberDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
__ mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
}
__ and_(r2, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
__ cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
kElementsStartOffset));
if (i != (kProbes - 1)) {
__ j(equal, &done, taken);
} else {
__ j(not_equal, miss, not_taken);
}
}
__ bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
__ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
@ -225,6 +327,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@ -273,7 +376,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: untagged index
// eax: key
// ecx: elements
__ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@ -281,6 +384,32 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ SmiTag(eax);
__ ret(0);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// edx: receiver
// ebx: untagged index
// eax: key
// ecx: elements
__ CheckMap(ecx, Factory::hash_table_map(), &slow, true);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
GenerateNumberDictionaryLoad(masm,
&slow_pop_receiver,
ecx,
eax,
ebx,
edx,
edi);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
__ bind(&slow_pop_receiver);
// Pop the receiver from the stack and jump to runtime.
__ pop(edx);
__ bind(&slow);
// Slow case: jump to runtime.
// edx: receiver

109
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -47,33 +47,32 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
masm->and_(object, ~Page::kPageAlignmentMask);
and_(object, ~Page::kPageAlignmentMask);
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
masm->sub(addr, Operand(page_start));
masm->shr(addr, kObjectAlignmentBits);
sub(addr, Operand(page_start));
shr(addr, kObjectAlignmentBits);
Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
masm->j(less, &fast);
cmp(pointer_offset, Page::kPageSize / kPointerSize);
j(less, &fast);
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Find the length of the large object (FixedArray).
masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
Register array_length = scratch;
@ -83,7 +82,7 @@ static void RecordWriteHelper(MacroAssembler* masm,
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
masm->lea(page_start,
lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset));
@ -92,50 +91,31 @@ static void RecordWriteHelper(MacroAssembler* masm,
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
masm->bind(&fast);
masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
bind(&fast);
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register addr, Register scratch)
: object_(object), addr_(addr), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register addr_;
Register scratch_;
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
object_.code(), addr_.code(), scratch_.code());
}
#endif
// Minor key encoding in 12 bits of three registers (object, address and
// scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
AddressBits::encode(addr_.code()) |
ScratchBits::encode(scratch_.code());
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
j(cc, branch);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(scratch, Operand(object, -new_space_start));
and_(scratch, Heap::NewSpaceMask());
j(cc, branch);
}
};
void RecordWriteStub::Generate(MacroAssembler* masm) {
RecordWriteHelper(masm, object_, addr_, scratch_);
masm->ret(0);
}
@ -161,22 +141,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
test(value, Immediate(kSmiTagMask));
j(zero, &done);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(value), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(value, Operand(object, -new_space_start));
and_(value, Heap::NewSpaceMask());
j(equal, &done);
}
InNewSpace(object, value, equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
@ -209,7 +174,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
RecordWriteHelper(this, object, dst, value);
RecordWriteHelper(object, dst, value);
} else {
RecordWriteStub stub(object, dst, value);
CallStub(&stub);
@ -221,9 +186,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(scratch, Immediate(bit_cast<int32_t>(kZapValue)));
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
}
}
@ -397,6 +362,12 @@ void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
}
void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
test(object, Immediate(kSmiTagMask));
Assert(equal, msg);
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, Operand(esp));

15
deps/v8/src/ia32/macro-assembler-ia32.h

@ -48,6 +48,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
void RecordWriteHelper(Register object,
Register addr,
Register scratch);
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@ -179,6 +191,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object, const char* msg);
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object, const char* msg);
// ---------------------------------------------------------------------------
// Exception handling

217
deps/v8/src/ia32/stub-cache-ia32.cc

@ -549,9 +549,8 @@ class CallOptimization BASE_EMBEDDED {
// fast api call builtin.
void AnalyzePossibleApiFunction(JSFunction* function) {
SharedFunctionInfo* sfi = function->shared();
if (sfi->function_data()->IsUndefined()) return;
FunctionTemplateInfo* info =
FunctionTemplateInfo::cast(sfi->function_data());
if (!sfi->IsApiFunction()) return;
FunctionTemplateInfo* info = sfi->get_api_func_data();
// Require a C++ callback.
if (info->call_code()->IsUndefined()) return;
@ -698,8 +697,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallOptimization optimization(lookup);
if (optimization.is_constant_call() &&
!Top::CanHaveSpecialFunctions(holder)) {
if (optimization.is_constant_call()) {
CompileCacheable(masm,
object,
receiver,
@ -1211,6 +1209,195 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
}
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss;
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, name, &miss);
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode (not dictionary).
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(Operand(eax), Immediate(argc << 1));
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiTag(ecx);
// Check if we could survive without allocation, go to builtin otherwise.
__ cmp(eax, Operand(ecx));
__ j(greater, &call_builtin);
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Push the element.
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_rset_update);
__ InNewSpace(ebx, ecx, equal, &exit);
RecordWriteStub stub(ebx, edx, ecx);
__ CallStub(&stub);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
}
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss, empty_array, call_builtin;
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, name, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode (not dictionary).
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &miss);
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
__ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
__ j(negative, &empty_array);
// Get the last element.
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
// Fill with the hole.
__ mov(FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
Immediate(Factory::the_hole_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&empty_array);
__ mov(eax, Immediate(Factory::undefined_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@ -1223,6 +1410,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
return generator(this, object, holder, function, name, check);
}
Label miss_in_smi_check;
// Get the receiver from the stack.
@ -1333,18 +1528,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, eax, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &miss, not_taken);
break;
default:
UNREACHABLE();
}

12
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1171,11 +1171,17 @@ void VirtualFrame::Push(Expression* expr) {
}
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->is_this()) {
PushParameterAt(-1);
if (proxy != NULL) {
Slot* slot = proxy->var()->slot();
if (slot->type() == Slot::LOCAL) {
PushLocalAt(slot->index());
return;
}
if (slot->type() == Slot::PARAMETER) {
PushParameterAt(slot->index());
return;
}
}
UNREACHABLE();
}

3
deps/v8/src/ia32/virtual-frame-ia32.h

@ -422,6 +422,9 @@ class VirtualFrame: public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
// Update the type information of a local variable frame element directly.
inline void SetTypeForLocalAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;

13
deps/v8/src/ic.cc

@ -150,7 +150,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
int index = map->IndexInCodeCache(String::cast(name), target);
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
// For keyed load/store, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
@ -458,17 +458,6 @@ Object* CallIC::LoadFunction(State state,
ASSERT(result != Heap::the_hole_value());
if (result->IsJSFunction()) {
// Check if there is an optimized (builtin) version of the function.
// Ignored this will degrade performance for some Array functions.
// Please note we only return the optimized function iff
// the JSObject has FastElements.
if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
Object* opt = Top::LookupSpecialFunction(JSObject::cast(*object),
lookup.holder(),
JSFunction::cast(result));
if (opt->IsJSFunction()) return opt;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
if (Debug::StepInActive()) {

3
deps/v8/src/log-utils.cc

@ -196,6 +196,9 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
actual_size = static_cast<int>(end_pos - dest_buf + 1);
// If the assertion below is hit, it means that there was no line end
// found --- something wrong has happened.
ASSERT(actual_size > 0);
ASSERT(actual_size <= max_size);
return actual_size;
}

2
deps/v8/src/log-utils.h

@ -115,7 +115,7 @@ class Log : public AllStatic {
}
// Size of buffer used for formatting log messages.
static const int kMessageBufferSize = 2048;
static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
private:
typedef int (*WritePtr)(const char* msg, int length);

5
deps/v8/src/log.cc

@ -1346,10 +1346,9 @@ void Logger::LogCompiledFunctions() {
LOG(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
} else if (shared->function_data()->IsFunctionTemplateInfo()) {
} else if (shared->IsApiFunction()) {
// API function.
FunctionTemplateInfo* fun_data =
FunctionTemplateInfo::cast(shared->function_data());
FunctionTemplateInfo* fun_data = shared->get_api_func_data();
Object* raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);

6
deps/v8/src/macros.py

@ -136,9 +136,9 @@ macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
macro DATE_FROM_TIME(time) = (DateFromTime(time));
macro YEAR_FROM_TIME(time) = (YearFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));

11
deps/v8/src/math.js

@ -45,7 +45,8 @@ $Math.__proto__ = global.Object.prototype;
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_abs(x);
if (x === 0) return 0; // To handle -0.
return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
@ -84,7 +85,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_cos(x);
return %_MathCos(x);
}
// ECMA 262 - 15.8.2.8
@ -159,7 +160,7 @@ function MathMin(arg1, arg2) { // length == 2
function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
return %_Math_pow(x, y);
return %_MathPow(x, y);
}
// ECMA 262 - 15.8.2.14
@ -176,13 +177,13 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_sin(x);
return %_MathSin(x);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_sqrt(x);
return %_MathSqrt(x);
}
// ECMA 262 - 15.8.2.18

3
deps/v8/src/messages.js

@ -182,7 +182,8 @@ function FormatMessage(message) {
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
illegal_access: "illegal access"
};
}
var format = kMessages[message.type];

10
deps/v8/src/mips/codegen-mips-inl.h

@ -38,16 +38,6 @@ namespace internal {
void DeferredCode::Jump() { __ b(&entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal

18
deps/v8/src/mips/codegen-mips.cc

@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
@ -297,6 +298,16 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@ -335,7 +346,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@ -429,6 +440,11 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; }
#define __ ACCESS_MASM(masm)
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
return Handle<Code>::null();
}
// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {

19
deps/v8/src/mips/codegen-mips.h

@ -154,6 +154,10 @@ class CodeGenerator: public AstVisitor {
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 6;
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
@ -188,6 +192,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@ -216,7 +221,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
@ -246,15 +251,11 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast support for Math.sqrt().
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {

8
deps/v8/src/mips/simulator-mips.cc

@ -31,7 +31,7 @@
#include "disasm.h"
#include "assembler.h"
#include "globals.h" // Need the bit_cast
#include "globals.h" // Need the BitCast
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
@ -604,7 +604,7 @@ void Simulator::set_fpu_register(int fpureg, int32_t value) {
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
*v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
*v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
}
@ -625,7 +625,7 @@ int32_t Simulator::get_fpu_register(int fpureg) const {
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
return *v8i::bit_cast<double*, int32_t*>(
return *v8i::BitCast<double*, int32_t*>(
const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
@ -901,7 +901,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case MFHC1:
fp_out = get_fpu_register_double(fs_reg);
alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
break;
case MTC1:
case MTHC1:

6
deps/v8/src/mips/virtual-frame-mips.h

@ -71,7 +71,7 @@ class VirtualFrame : public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
NumberInfo::Type info = NumberInfo::kUnknown);
NumberInfo info = NumberInfo::Unknown());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@ -367,7 +367,7 @@ class VirtualFrame : public ZoneObject {
void EmitMultiPushReversed(RegList regs); // higher first
// Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
@ -391,6 +391,8 @@ class VirtualFrame : public ZoneObject {
// 'a' registers are arguments register a0 to a3.
void EmitArgumentSlots(RegList reglist);
inline void SetTypeForLocalAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;

2
deps/v8/src/objects-debug.cc

@ -789,7 +789,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kExternalReferenceDataOffset);
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kScriptOffset);
VerifyObjectField(kDebugInfoOffset);
}

19
deps/v8/src/objects-inl.h

@ -2361,8 +2361,7 @@ ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object,
kExternalReferenceDataOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
@ -2453,6 +2452,22 @@ bool SharedFunctionInfo::is_compiled() {
}
bool SharedFunctionInfo::IsApiFunction() {
return function_data()->IsFunctionTemplateInfo();
}
FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
ASSERT(IsApiFunction());
return FunctionTemplateInfo::cast(function_data());
}
bool SharedFunctionInfo::HasCustomCallGenerator() {
return function_data()->IsProxy();
}
bool JSFunction::IsBoilerplate() {
return map() == Heap::boilerplate_function_map();
}

21
deps/v8/src/objects.cc

@ -480,7 +480,7 @@ bool JSObject::IsDirty() {
if (!cons_obj->IsJSFunction())
return true;
JSFunction* fun = JSFunction::cast(cons_obj);
if (!fun->shared()->function_data()->IsFunctionTemplateInfo())
if (!fun->shared()->IsApiFunction())
return true;
// If the object is fully fast case and has the same map it was
// created with then no changes can have been made to it.
@ -3055,7 +3055,7 @@ Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
}
int Map::IndexInCodeCache(String* name, Code* code) {
int Map::IndexInCodeCache(Object* name, Code* code) {
// Get the internal index if a code cache exists.
if (!code_cache()->IsFixedArray()) {
return CodeCache::cast(code_cache())->GetIndex(name, code);
@ -3200,12 +3200,11 @@ Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
}
int CodeCache::GetIndex(String* name, Code* code) {
// This is not used for normal load/store/call IC's.
int CodeCache::GetIndex(Object* name, Code* code) {
if (code->type() == NORMAL) {
if (normal_type_cache()->IsUndefined()) return -1;
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->GetIndex(name, code->flags());
return cache->GetIndex(String::cast(name), code->flags());
}
FixedArray* array = default_cache();
@ -3217,11 +3216,11 @@ int CodeCache::GetIndex(String* name, Code* code) {
}
void CodeCache::RemoveByIndex(String* name, Code* code, int index) {
void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
if (code->type() == NORMAL) {
ASSERT(!normal_type_cache()->IsUndefined());
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
ASSERT(cache->GetIndex(name, code->flags()) == index);
ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
cache->RemoveByIndex(index);
} else {
FixedArray* array = default_cache();
@ -6434,9 +6433,9 @@ void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
InterceptorInfo* JSObject::GetNamedInterceptor() {
ASSERT(map()->has_named_interceptor());
JSFunction* constructor = JSFunction::cast(map()->constructor());
Object* template_info = constructor->shared()->function_data();
ASSERT(constructor->shared()->IsApiFunction());
Object* result =
FunctionTemplateInfo::cast(template_info)->named_property_handler();
constructor->shared()->get_api_func_data()->named_property_handler();
return InterceptorInfo::cast(result);
}
@ -6444,9 +6443,9 @@ InterceptorInfo* JSObject::GetNamedInterceptor() {
InterceptorInfo* JSObject::GetIndexedInterceptor() {
ASSERT(map()->has_indexed_interceptor());
JSFunction* constructor = JSFunction::cast(map()->constructor());
Object* template_info = constructor->shared()->function_data();
ASSERT(constructor->shared()->IsApiFunction());
Object* result =
FunctionTemplateInfo::cast(template_info)->indexed_property_handler();
constructor->shared()->get_api_func_data()->indexed_property_handler();
return InterceptorInfo::cast(result);
}

28
deps/v8/src/objects.h

@ -2154,24 +2154,24 @@ class Dictionary: public HashTable<Shape, Key> {
// Returns the value at entry.
Object* ValueAt(int entry) {
return get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
}
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
}
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
return PropertyDetails(
Smi::cast(get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
}
// Set the details for entry.
void DetailsAtPut(int entry, PropertyDetails value) {
set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
}
// Sorting support
@ -2194,7 +2194,7 @@ class Dictionary: public HashTable<Shape, Key> {
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
}
int NextEnumerationIndex() {
@ -2971,7 +2971,7 @@ class Map: public HeapObject {
// Returns the non-negative index of the code object if it is in the
// cache and -1 otherwise.
int IndexInCodeCache(String* name, Code* code);
int IndexInCodeCache(Object* name, Code* code);
// Removes a code object from the code cache at the given index.
void RemoveFromCodeCache(String* name, Code* code, int index);
@ -3186,12 +3186,18 @@ class SharedFunctionInfo: public HeapObject {
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
// [function data]: This field has been added for make benefit the API.
// [function data]: This field holds some additional data for function.
// Currently it either has FunctionTemplateInfo to make benefit the API
// or Proxy wrapping CustomCallGenerator.
// In the long run we don't want all functions to have this field but
// we can fix that when we have a better model for storing hidden data
// on objects.
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasCustomCallGenerator();
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
@ -3299,9 +3305,9 @@ class SharedFunctionInfo: public HeapObject {
static const int kConstructStubOffset = kCodeOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
static const int kExternalReferenceDataOffset =
static const int kFunctionDataOffset =
kInstanceClassNameOffset + kPointerSize;
static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
@ -3743,10 +3749,10 @@ class CodeCache: public Struct {
// code object is not in that cache. This index can be used to later call
// RemoveByIndex. The cache cannot be modified between a call to GetIndex and
// RemoveByIndex.
int GetIndex(String* name, Code* code);
int GetIndex(Object* name, Code* code);
// Remove an object from the cache with the provided internal index.
void RemoveByIndex(String* name, Code* code, int index);
void RemoveByIndex(Object* name, Code* code, int index);
static inline CodeCache* cast(Object* obj);

65
deps/v8/src/parser.cc

@ -30,6 +30,7 @@
#include "api.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
#include "messages.h"
#include "platform.h"
@ -211,6 +212,7 @@ class Parser {
ZoneList<ObjectLiteral::Property*>* properties,
Handle<FixedArray> constants,
bool* is_simple,
bool* fast_elements,
int* depth);
// Populate the literals fixed array for a materialized array literal.
@ -1962,9 +1964,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Factory::NewFunctionBoilerplate(name, literals, code);
boilerplate->shared()->set_construct_stub(*construct_stub);
// Copy the function data to the boilerplate. Used by
// builtins.cc:HandleApiCall to perform argument type checks and to
// find the right native code to call.
// Copy the function data to the boilerplate.
boilerplate->shared()->set_function_data(fun->shared()->function_data());
int parameters = fun->shared()->formal_parameter_count();
boilerplate->shared()->set_formal_parameter_count(parameters);
@ -3445,7 +3445,11 @@ Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ObjectLiteral* object_literal = expression->AsObjectLiteral();
if (object_literal != NULL) {
ASSERT(object_literal->is_simple());
result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL));
if (object_literal->fast_elements()) {
result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
} else {
result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
}
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
@ -3483,11 +3487,14 @@ void Parser::BuildObjectLiteralConstantProperties(
ZoneList<ObjectLiteral::Property*>* properties,
Handle<FixedArray> constant_properties,
bool* is_simple,
bool* fast_elements,
int* depth) {
int position = 0;
// Accumulate the value in local variables and store it at the end.
bool is_simple_acc = true;
int depth_acc = 1;
uint32_t max_element_index = 0;
uint32_t elements = 0;
for (int i = 0; i < properties->length(); i++) {
ObjectLiteral::Property* property = properties->at(i);
if (!IsBoilerplateProperty(property)) {
@ -3506,11 +3513,31 @@ void Parser::BuildObjectLiteralConstantProperties(
Handle<Object> value = GetBoilerplateValue(property->value());
is_simple_acc = is_simple_acc && !value->IsUndefined();
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
if (key->IsString()
&& Handle<String>::cast(key)->AsArrayIndex(&element_index)
&& element_index > max_element_index) {
max_element_index = element_index;
elements++;
} else if (key->IsSmi()) {
int key_value = Smi::cast(*key)->value();
if (key_value > 0
&& static_cast<uint32_t>(key_value) > max_element_index) {
max_element_index = key_value;
}
elements++;
}
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
constant_properties->set(position++, *value);
}
*fast_elements =
(max_element_index <= 32) || ((2 * elements) >= max_element_index);
*is_simple = is_simple_acc;
*depth = depth_acc;
}
@ -3608,15 +3635,18 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
bool is_simple = true;
bool fast_elements = true;
int depth = 1;
BuildObjectLiteralConstantProperties(properties.elements(),
constant_properties,
&is_simple,
&fast_elements,
&depth);
return new ObjectLiteral(constant_properties,
properties.elements(),
literal_index,
is_simple,
fast_elements,
depth);
}
@ -3832,7 +3862,27 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
}
// Otherwise we have a runtime call.
// Check that the expected number arguments are passed to runtime functions.
if (!is_pre_parsing_) {
if (function != NULL
&& function->nargs != -1
&& function->nargs != args->length()) {
ReportMessage("illegal_access", Vector<const char*>::empty());
*ok = false;
return NULL;
} else if (function == NULL && !name.is_null()) {
// If this is not a runtime function implemented in C++ it might be an
// inlined runtime function.
int argc = CodeGenerator::InlineRuntimeCallArgumentsCount(name);
if (argc != -1 && argc != args->length()) {
ReportMessage("illegal_access", Vector<const char*>::empty());
*ok = false;
return NULL;
}
}
}
// Otherwise we have a valid runtime call.
return NEW(CallRuntime(name, function, args));
}
@ -4123,15 +4173,18 @@ Expression* Parser::ParseJsonObject(bool* ok) {
Handle<FixedArray> constant_properties =
Factory::NewFixedArray(boilerplate_properties * 2, TENURED);
bool is_simple = true;
bool fast_elements = true;
int depth = 1;
BuildObjectLiteralConstantProperties(properties.elements(),
constant_properties,
&is_simple,
&fast_elements,
&depth);
return new ObjectLiteral(constant_properties,
properties.elements(),
literal_index,
is_simple,
fast_elements,
depth);
}

3
deps/v8/src/parser.h

@ -168,7 +168,8 @@ FunctionLiteral* MakeLazyAST(Handle<Script> script,
class CompileTimeValue: public AllStatic {
public:
enum Type {
OBJECT_LITERAL,
OBJECT_LITERAL_FAST_ELEMENTS,
OBJECT_LITERAL_SLOW_ELEMENTS,
ARRAY_LITERAL
};

2461
deps/v8/src/powers_ten.h

File diff suppressed because it is too large

57
deps/v8/src/regexp-delay.js

@ -95,6 +95,16 @@ function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
%IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
%IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
%IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
// Clear the regexp result cache.
cachedRegexp = 0;
cachedSubject = 0;
cachedLastIndex = 0;
cachedAnswer = 0;
// These are from string.js.
cachedReplaceSubject = 0;
cachedReplaceRegexp = 0;
cachedReplaceReplacement = 0;
cachedReplaceAnswer = 0;
}
// Call internal function to compile the pattern.
@ -140,7 +150,36 @@ function DoRegExpExec(regexp, string, index) {
}
var cachedRegexp;
var cachedSubject;
var cachedLastIndex;
var cachedAnswer;
function CloneRegexpAnswer(array) {
var len = array.length;
var answer = new $Array(len);
for (var i = 0; i < len; i++) {
answer[i] = array[i];
}
answer.index = array.index;
answer.input = array.input;
return answer;
}
function RegExpExec(string) {
if (%_ObjectEquals(cachedLastIndex, this.lastIndex) &&
%_ObjectEquals(cachedRegexp, this) &&
%_ObjectEquals(cachedSubject, string)) {
var last = cachedAnswer;
if (last == null) {
return last;
} else {
return CloneRegexpAnswer(last);
}
}
if (!IS_REGEXP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.exec', this]);
@ -159,6 +198,7 @@ function RegExpExec(string) {
s = ToString(string);
}
var lastIndex = this.lastIndex;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
if (i < 0 || i > s.length) {
@ -172,7 +212,11 @@ function RegExpExec(string) {
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
return matchIndices; // no match
cachedLastIndex = lastIndex;
cachedRegexp = this;
cachedSubject = s;
cachedAnswer = matchIndices; // Null.
return matchIndices; // No match.
}
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
@ -196,11 +240,18 @@ function RegExpExec(string) {
}
}
if (this.global)
this.lastIndex = lastMatchInfo[CAPTURE1];
result.index = lastMatchInfo[CAPTURE0];
result.input = s;
if (this.global) {
this.lastIndex = lastMatchInfo[CAPTURE1];
return result;
} else {
cachedRegexp = this;
cachedSubject = s;
cachedLastIndex = lastIndex;
cachedAnswer = result;
return CloneRegexpAnswer(result);
}
}

474
deps/v8/src/runtime.cc

@ -32,6 +32,7 @@
#include "accessors.h"
#include "api.h"
#include "arguments.h"
#include "codegen.h"
#include "compiler.h"
#include "cpu.h"
#include "dateparser-inl.h"
@ -247,7 +248,8 @@ static Handle<Object> CreateLiteralBoilerplate(
static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties) {
Handle<FixedArray> constant_properties,
bool should_have_fast_elements) {
// Get the global context from the literals array. This is the
// context in which the function was created and we use the object
// function from this context to create the object literal. We do
@ -263,6 +265,10 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
&is_result_from_cache);
Handle<JSObject> boilerplate = Factory::NewJSObjectFromMap(map);
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) NormalizeElements(boilerplate);
{ // Add the constant properties to the boilerplate.
int length = constant_properties->length();
OptimizedObjectForAddingMultipleProperties opt(boilerplate,
@ -344,8 +350,10 @@ static Handle<Object> CreateLiteralBoilerplate(
Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
switch (CompileTimeValue::GetType(array)) {
case CompileTimeValue::OBJECT_LITERAL:
return CreateObjectLiteralBoilerplate(literals, elements);
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
return CreateObjectLiteralBoilerplate(literals, elements, true);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
return CreateObjectLiteralBoilerplate(literals, elements, false);
case CompileTimeValue::ARRAY_LITERAL:
return CreateArrayLiteralBoilerplate(literals, elements);
default:
@ -355,26 +363,6 @@ static Handle<Object> CreateLiteralBoilerplate(
}
static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
// Copy the arguments.
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
Handle<Object> result =
CreateObjectLiteralBoilerplate(literals, constant_properties);
if (result.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *result);
return *result;
}
static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
// Takes a FixedArray of elements containing the literal elements of
// the array literal and produces JSArray with those elements.
@ -398,15 +386,19 @@ static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
static Object* Runtime_CreateObjectLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
CONVERT_SMI_CHECKED(fast_elements, args[3]);
bool should_have_fast_elements = fast_elements == 1;
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
boilerplate = CreateObjectLiteralBoilerplate(literals,
constant_properties,
should_have_fast_elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
@ -417,15 +409,19 @@ static Object* Runtime_CreateObjectLiteral(Arguments args) {
static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
CONVERT_SMI_CHECKED(fast_elements, args[3]);
bool should_have_fast_elements = fast_elements == 1;
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
boilerplate = CreateObjectLiteralBoilerplate(literals,
constant_properties,
should_have_fast_elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
@ -1242,6 +1238,70 @@ static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) {
}
static void SetCustomCallGenerator(Handle<JSFunction> function,
CustomCallGenerator generator) {
if (function->shared()->function_data()->IsUndefined()) {
function->shared()->set_function_data(*FromCData(generator));
}
}
static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name,
CustomCallGenerator generator = NULL) {
Handle<String> key = Factory::LookupAsciiSymbol(name);
Handle<Code> code(Builtins::builtin(builtin_name));
Handle<JSFunction> optimized = Factory::NewFunction(key,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
false);
optimized->shared()->DontAdaptArguments();
if (generator != NULL) {
SetCustomCallGenerator(optimized, generator);
}
SetProperty(holder, key, optimized, NONE);
return optimized;
}
static Object* CompileArrayPushCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check) {
return compiler->CompileArrayPushCall(object, holder, function, name, check);
}
static Object* CompileArrayPopCall(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check) {
return compiler->CompileArrayPopCall(object, holder, function, name, check);
}
static Object* Runtime_SpecialArrayFunctions(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, holder, 0);
InstallBuiltin(holder, "pop", Builtins::ArrayPop, CompileArrayPopCall);
InstallBuiltin(holder, "push", Builtins::ArrayPush, CompileArrayPushCall);
InstallBuiltin(holder, "shift", Builtins::ArrayShift);
InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
InstallBuiltin(holder, "slice", Builtins::ArraySlice);
InstallBuiltin(holder, "splice", Builtins::ArraySplice);
return *holder;
}
static Object* Runtime_MaterializeRegExpLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
@ -1376,9 +1436,7 @@ static Object* Runtime_FunctionIsAPIFunction(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
// The function_data field of the shared function info is used exclusively by
// the API.
return !f->shared()->function_data()->IsUndefined() ? Heap::true_value()
return f->shared()->IsApiFunction() ? Heap::true_value()
: Heap::false_value();
}
@ -4847,16 +4905,6 @@ static Object* Runtime_StringCompare(Arguments args) {
}
static Object* Runtime_Math_abs(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Counters::math_abs.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
return Heap::AllocateHeapNumber(fabs(x));
}
static Object* Runtime_Math_acos(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@ -5090,14 +5138,7 @@ static Object* Runtime_Math_tan(Arguments args) {
}
static Object* Runtime_DateMakeDay(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_SMI_CHECKED(year, args[0]);
CONVERT_SMI_CHECKED(month, args[1]);
CONVERT_SMI_CHECKED(date, args[2]);
static int MakeDay(int year, int month, int day) {
static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
181, 212, 243, 273, 304, 334};
static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
@ -5119,7 +5160,7 @@ static Object* Runtime_DateMakeDay(Arguments args) {
// ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
// Jan 1 1970. This is required so that we don't run into integer
// division of negative numbers.
// c) there shouldn't be overflow for 32-bit integers in the following
// c) there shouldn't be an overflow for 32-bit integers in the following
// operations.
static const int year_delta = 399999;
static const int base_day = 365 * (1970 + year_delta) +
@ -5135,10 +5176,327 @@ static Object* Runtime_DateMakeDay(Arguments args) {
base_day;
if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
return Smi::FromInt(day_from_year + day_from_month[month] + date - 1);
return day_from_year + day_from_month[month] + day - 1;
}
return Smi::FromInt(day_from_year + day_from_month_leap[month] + date - 1);
return day_from_year + day_from_month_leap[month] + day - 1;
}
static Object* Runtime_DateMakeDay(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_SMI_CHECKED(year, args[0]);
CONVERT_SMI_CHECKED(month, args[1]);
CONVERT_SMI_CHECKED(date, args[2]);
return Smi::FromInt(MakeDay(year, month, date));
}
static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
static const int kDaysIn4Years = 4 * 365 + 1;
static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
static const int kDays1970to2000 = 30 * 365 + 7;
static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
kDays1970to2000;
static const int kYearsOffset = 400000;
static const char kDayInYear[] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
static const char kMonthInYear[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
// This function works for dates from 1970 to 2099.
static inline void DateYMDFromTimeAfter1970(int date,
int &year, int &month, int &day) {
#ifdef DEBUG
int save_date = date; // Need this for ASSERT in the end.
#endif
year = 1970 + (4 * date + 2) / kDaysIn4Years;
date %= kDaysIn4Years;
month = kMonthInYear[date];
day = kDayInYear[date];
ASSERT(MakeDay(year, month, day) == save_date);
}
static inline void DateYMDFromTimeSlow(int date,
int &year, int &month, int &day) {
#ifdef DEBUG
int save_date = date; // Need this for ASSERT in the end.
#endif
date += kDaysOffset;
year = 400 * (date / kDaysIn400Years) - kYearsOffset;
date %= kDaysIn400Years;
ASSERT(MakeDay(year, 0, 1) + date == save_date);
date--;
int yd1 = date / kDaysIn100Years;
date %= kDaysIn100Years;
year += 100 * yd1;
date++;
int yd2 = date / kDaysIn4Years;
date %= kDaysIn4Years;
year += 4 * yd2;
date--;
int yd3 = date / 365;
date %= 365;
year += yd3;
bool is_leap = (!yd1 || yd2) && !yd3;
ASSERT(date >= -1);
ASSERT(is_leap || date >= 0);
ASSERT(date < 365 || is_leap && date < 366);
ASSERT(is_leap == (year % 4 == 0 && (year % 100 || (year % 400 == 0))));
ASSERT(is_leap || MakeDay(year, 0, 1) + date == save_date);
ASSERT(!is_leap || MakeDay(year, 0, 1) + date + 1 == save_date);
if (is_leap) {
day = kDayInYear[2*365 + 1 + date];
month = kMonthInYear[2*365 + 1 + date];
} else {
day = kDayInYear[date];
month = kMonthInYear[date];
}
ASSERT(MakeDay(year, month, day) == save_date);
}
static inline void DateYMDFromTime(int date,
int &year, int &month, int &day) {
if (date >= 0 && date < 32 * kDaysIn4Years) {
DateYMDFromTimeAfter1970(date, year, month, day);
} else {
DateYMDFromTimeSlow(date, year, month, day);
}
}
static Object* Runtime_DateYMDFromTime(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(t, args[0]);
CONVERT_CHECKED(JSArray, res_array, args[1]);
int year, month, day;
DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
res_array->SetElement(0, Smi::FromInt(year));
res_array->SetElement(1, Smi::FromInt(month));
res_array->SetElement(2, Smi::FromInt(day));
return Heap::undefined_value();
}
@ -8675,18 +9033,28 @@ static Object* Runtime_ListNatives(Arguments args) {
HandleScope scope;
Handle<JSArray> result = Factory::NewJSArray(0);
int index = 0;
bool inline_runtime_functions = false;
#define ADD_ENTRY(Name, argc, ressize) \
{ \
HandleScope inner; \
Handle<String> name = \
Factory::NewStringFromAscii( \
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
name = Factory::NewStringFromAscii( \
Vector<const char>("_" #Name, StrLength("_" #Name))); \
} else { \
name = Factory::NewStringFromAscii( \
Vector<const char>(#Name, StrLength(#Name))); \
} \
Handle<JSArray> pair = Factory::NewJSArray(0); \
SetElement(pair, 0, name); \
SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \
SetElement(result, index++, pair); \
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
inline_runtime_functions = true;
INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
return *result;
}

8
deps/v8/src/runtime.h

@ -61,6 +61,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(ToSlowProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
\
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
@ -132,7 +133,6 @@ namespace internal {
F(StringCompare, 2, 1) \
\
/* Math */ \
F(Math_abs, 1, 1) \
F(Math_acos, 1, 1) \
F(Math_asin, 1, 1) \
F(Math_atan, 1, 1) \
@ -204,6 +204,7 @@ namespace internal {
F(DateLocalTimeOffset, 0, 1) \
F(DateDaylightSavingsOffset, 1, 1) \
F(DateMakeDay, 3, 1) \
F(DateYMDFromTime, 2, 1) \
\
/* Numbers */ \
F(NumberIsFinite, 1, 1) \
@ -234,11 +235,10 @@ namespace internal {
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateArrayLiteralBoilerplate, 3, 1) \
F(CreateObjectLiteralBoilerplate, 3, 1) \
F(CloneLiteralBoilerplate, 1, 1) \
F(CloneShallowLiteralBoilerplate, 1, 1) \
F(CreateObjectLiteral, 3, 1) \
F(CreateObjectLiteralShallow, 3, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\

1
deps/v8/src/scopes.h

@ -277,7 +277,6 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
// ---------------------------------------------------------------------------
// Debugging.

84
deps/v8/src/string.js

@ -239,14 +239,29 @@ function StringReplace(search, replace) {
}
var cachedReplaceSubject;
var cachedReplaceRegexp;
var cachedReplaceReplacement;
var cachedReplaceAnswer;
// Helper function for regular expressions in String.prototype.replace.
function StringReplaceRegExp(subject, regexp, replace) {
if (%_ObjectEquals(replace, cachedReplaceReplacement) &&
%_ObjectEquals(subject, cachedReplaceSubject) &&
%_ObjectEquals(regexp, cachedReplaceRegexp)) {
return cachedReplaceAnswer;
}
replace = TO_STRING_INLINE(replace);
return %StringReplaceRegExpWithString(subject,
var answer = %StringReplaceRegExpWithString(subject,
regexp,
replace,
lastMatchInfo);
};
cachedReplaceSubject = subject;
cachedReplaceRegexp = regexp;
cachedReplaceReplacement = replace;
cachedReplaceAnswer = answer;
return answer;
}
// Expand the $-expressions in the string and return a new string with
@ -387,19 +402,44 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Unfortunately, that means this code is nearly duplicated, here and in
// jsregexp.cc.
if (regexp.global) {
var numberOfCaptures = NUMBER_OF_CAPTURES(matchInfo) >> 1;
var previous = 0;
var startOfMatch;
if (NUMBER_OF_CAPTURES(matchInfo) == 2) {
// Both branches contain essentially the same loop except for the call
// to the replace function. The branch is put outside of the loop for
// speed
do {
var startOfMatch = matchInfo[CAPTURE0];
startOfMatch = matchInfo[CAPTURE0];
result.addSpecialSlice(previous, startOfMatch);
previous = matchInfo[CAPTURE1];
if (numberOfCaptures == 1) {
var match = SubString(subject, startOfMatch, previous);
// Don't call directly to avoid exposing the built-in global object.
result.add(replace.call(null, match, startOfMatch, subject));
// Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
// Increment previous if we matched an empty string, as per ECMA-262
// 15.5.4.10.
if (previous == startOfMatch) {
// Add the skipped character to the output, if any.
if (previous < subject.length) {
result.addSpecialSlice(previous, previous + 1);
}
previous++;
// Per ECMA-262 15.10.6.2, if the previous index is greater than the
// string length, there is no match
if (previous > subject.length) {
return result.generate();
}
}
matchInfo = DoRegExpExec(regexp, subject, previous);
} while (!IS_NULL(matchInfo));
} else {
do {
startOfMatch = matchInfo[CAPTURE0];
result.addSpecialSlice(previous, startOfMatch);
previous = matchInfo[CAPTURE1];
result.add(ApplyReplacementFunction(replace, matchInfo, subject));
}
// Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
@ -411,19 +451,19 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
result.addSpecialSlice(previous, previous + 1);
}
previous++;
}
// Per ECMA-262 15.10.6.2, if the previous index is greater than the
// string length, there is no match
matchInfo = (previous > subject.length)
? null
: DoRegExpExec(regexp, subject, previous);
if (previous > subject.length) {
return result.generate();
}
}
matchInfo = DoRegExpExec(regexp, subject, previous);
} while (!IS_NULL(matchInfo));
}
// Tack on the final right substring after the last match, if necessary.
if (previous < subject.length) {
// Tack on the final right substring after the last match.
result.addSpecialSlice(previous, subject.length);
}
} else { // Not a global regexp, no need to loop.
result.addSpecialSlice(0, matchInfo[CAPTURE0]);
var endOfMatch = matchInfo[CAPTURE1];
@ -719,16 +759,26 @@ function StringTrimRight() {
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
var static_charcode_array = new $Array(4);
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
if (n == 1) return %_CharFromCode(ToNumber(code) & 0xffff)
if (n == 1) {
if (!%_IsSmi(code)) code = ToNumber(code);
return %_CharFromCode(code & 0xffff);
}
// NOTE: This is not super-efficient, but it is necessary because we
// want to avoid converting to numbers from within the virtual
// machine. Maybe we can find another way of doing this?
var codes = new $Array(n);
for (var i = 0; i < n; i++) codes[i] = ToNumber(%_Arguments(i));
var codes = static_charcode_array;
for (var i = 0; i < n; i++) {
var code = %_Arguments(i);
if (!%_IsSmi(code)) code = ToNumber(code);
codes[i] = code;
}
codes.length = n;
return %StringFromCharCodeArray(codes);
}

8
deps/v8/src/stub-cache.cc

@ -435,14 +435,6 @@ Object* StubCache::ComputeCallConstant(int argc,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
if (object->IsJSObject()) {
Object* opt =
Top::LookupSpecialFunction(JSObject::cast(object), holder, function);
if (opt->IsJSFunction()) {
check = StubCompiler::JSARRAY_HAS_FAST_ELEMENTS_CHECK;
function = JSFunction::cast(opt);
}
}
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
// internal error which will make sure we do not update any

25
deps/v8/src/stub-cache.h

@ -326,8 +326,7 @@ class StubCompiler BASE_EMBEDDED {
RECEIVER_MAP_CHECK,
STRING_CHECK,
NUMBER_CHECK,
BOOLEAN_CHECK,
JSARRAY_HAS_FAST_ELEMENTS_CHECK
BOOLEAN_CHECK
};
StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
@ -549,7 +548,7 @@ class KeyedStoreStubCompiler: public StubCompiler {
class CallStubCompiler: public StubCompiler {
public:
explicit CallStubCompiler(int argc, InLoopFlag in_loop)
CallStubCompiler(int argc, InLoopFlag in_loop)
: arguments_(argc), in_loop_(in_loop) { }
Object* CompileCallField(JSObject* object,
@ -570,6 +569,18 @@ class CallStubCompiler: public StubCompiler {
JSFunction* function,
String* name);
Object* CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check);
Object* CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check);
private:
const ParameterCount arguments_;
const InLoopFlag in_loop_;
@ -591,6 +602,14 @@ class ConstructStubCompiler: public StubCompiler {
};
typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
JSFunction* function,
String* name,
StubCompiler::CheckType check);
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_

48
deps/v8/src/top.cc

@ -92,15 +92,15 @@ void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
v->VisitPointer(&(thread->pending_exception_));
v->VisitPointer(&(thread->pending_message_obj_));
v->VisitPointer(
bit_cast<Object**, Script**>(&(thread->pending_message_script_)));
v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_)));
BitCast<Object**, Script**>(&(thread->pending_message_script_)));
v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
v->VisitPointer(&(thread->scheduled_exception_));
for (v8::TryCatch* block = thread->TryCatchHandler();
block != NULL;
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_)));
v->VisitPointer(bit_cast<Object**, void**>(&(block->message_)));
v->VisitPointer(BitCast<Object**, void**>(&(block->exception_)));
v->VisitPointer(BitCast<Object**, void**>(&(block->message_)));
}
// Iterate over pointers on native execution stack.
@ -439,10 +439,9 @@ void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
// Get the data object from access check info.
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
Object* info = constructor->shared()->function_data();
if (info == Heap::undefined_value()) return;
Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
if (!constructor->shared()->IsApiFunction()) return;
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
if (data_obj == Heap::undefined_value()) return;
HandleScope scope;
@ -502,10 +501,10 @@ bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
// Get named access check callback
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
Object* info = constructor->shared()->function_data();
if (info == Heap::undefined_value()) return false;
if (!constructor->shared()->IsApiFunction()) return false;
Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
if (data_obj == Heap::undefined_value()) return false;
Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
@ -547,10 +546,10 @@ bool Top::MayIndexedAccess(JSObject* receiver,
// Get indexed access check callback
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
Object* info = constructor->shared()->function_data();
if (info == Heap::undefined_value()) return false;
if (!constructor->shared()->IsApiFunction()) return false;
Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
Object* data_obj =
constructor->shared()->get_api_func_data()->access_check_info();
if (data_obj == Heap::undefined_value()) return false;
Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
@ -950,27 +949,6 @@ Handle<Context> Top::GetCallingGlobalContext() {
}
bool Top::CanHaveSpecialFunctions(JSObject* object) {
return object->IsJSArray();
}
Object* Top::LookupSpecialFunction(JSObject* receiver,
JSObject* prototype,
JSFunction* function) {
if (CanHaveSpecialFunctions(receiver)) {
FixedArray* table = context()->global_context()->special_function_table();
for (int index = 0; index < table->length(); index +=3) {
if ((prototype == table->get(index)) &&
(function == table->get(index+1))) {
return table->get(index+2);
}
}
}
return Heap::undefined_value();
}
char* Top::ArchiveThread(char* to) {
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
InitializeThreadLocal();

5
deps/v8/src/top.h

@ -342,11 +342,6 @@ class Top {
return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
}
static bool CanHaveSpecialFunctions(JSObject* object);
static Object* LookupSpecialFunction(JSObject* receiver,
JSObject* prototype,
JSFunction* value);
static void RegisterTryCatchHandler(v8::TryCatch* that);
static void UnregisterTryCatchHandler(v8::TryCatch* that);

40
deps/v8/src/utils.h

@ -29,6 +29,7 @@
#define V8_UTILS_H_
#include <stdlib.h>
#include <string.h>
namespace v8 {
namespace internal {
@ -396,7 +397,7 @@ class EmbeddedVector : public Vector<T> {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
this->set_start(buffer_);
return *this;
}
@ -599,6 +600,43 @@ static inline void MemsetPointer(T** dest, T* value, int counter) {
// Calculate 10^exponent.
int TenToThe(int exponent);
// The type-based aliasing rule allows the compiler to assume that pointers of
// different types (for some definition of different) never alias each other.
// Thus the following code does not work:
//
// float f = foo();
// int fbits = *(int*)(&f);
//
// The compiler 'knows' that the int pointer can't refer to f since the types
// don't match, so the compiler may cache f in a register, leaving random data
// in fbits. Using C++ style casts makes no difference, however a pointer to
// char data is assumed to alias any other pointer. This is the 'memcpy
// exception'.
//
// Bit_cast uses the memcpy exception to move the bits from a variable of one
// type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize BitCast away.
//
// There is an additional use for BitCast.
// Recent gccs will warn when they see casts that may result in breakage due to
// the type-based aliasing rule. If you have checked that there is no breakage
// you can use BitCast to cast one pointer type to another. This confuses gcc
// enough that it can no longer see that you have cast one pointer type to
// another thus avoiding the warning.
template <class Dest, class Source>
inline Dest BitCast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
} } // namespace v8::internal
#endif // V8_UTILS_H_

1
deps/v8/src/v8-counters.h

@ -170,7 +170,6 @@ namespace internal {
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
SC(number_to_string_runtime, V8.NumberToStringRuntime) \
SC(math_abs, V8.MathAbs) \
SC(math_acos, V8.MathAcos) \
SC(math_asin, V8.MathAsin) \
SC(math_atan, V8.MathAtan) \

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
#define BUILD_NUMBER 3
#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

6
deps/v8/src/virtual-frame-inl.h

@ -119,6 +119,12 @@ bool VirtualFrame::Equals(VirtualFrame* other) {
return true;
}
void VirtualFrame::SetTypeForLocalAt(int index, NumberInfo info) {
elements_[local0_index() + index].set_number_info(info);
}
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_INL_H_

8
deps/v8/src/x64/codegen-x64.cc

@ -2424,11 +2424,13 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->Push(Smi::FromInt(node->literal_index()));
// Constant properties.
frame_->Push(node->constant_properties());
// Should the object literal have fast elements?
frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
Result clone;
if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
frame_->Push(&clone);
@ -3600,7 +3602,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in rdx and the formal

13
deps/v8/src/x64/codegen-x64.h

@ -337,6 +337,10 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@ -506,6 +510,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
@ -537,7 +542,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
@ -575,14 +580,10 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
// Fast case for sqrt
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.

5
deps/v8/src/x64/full-codegen-x64.cc

@ -903,10 +903,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_properties());
__ Push(Smi::FromInt(expr->fast_elements() ? 1 : 0));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
// If result_saved is true the result is on top of the stack. If

12
deps/v8/src/x64/macro-assembler-x64.cc

@ -197,9 +197,9 @@ void MacroAssembler::RecordWrite(Register object,
// avoid having the fast case for smis leave the registers
// unchanged.
if (FLAG_debug_code) {
movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
@ -270,9 +270,9 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}

131
deps/v8/src/x64/stub-cache-x64.cc

@ -655,6 +655,118 @@ class CallInterceptorCompiler BASE_EMBEDDED {
#define __ ACCESS_MASM((masm()))
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// TODO(639): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss;
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
rbx, rax, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
// ...
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// TODO(642): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
Label miss;
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
rbx, rax, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
// Handle call cache miss.
__ bind(&miss);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
String* function_name = NULL;
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@ -670,6 +782,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
return generator(this, object, holder, function, name, check);
}
Label miss;
// Get the receiver from the stack.
@ -759,18 +878,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), rdx, holder,
rbx, rax, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
Factory::fixed_array_map());
__ j(not_equal, &miss);
break;
default:
UNREACHABLE();
}

2
deps/v8/src/x64/virtual-frame-x64.h

@ -416,6 +416,8 @@ class VirtualFrame : public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;

4
deps/v8/test/cctest/SConscript

@ -34,6 +34,7 @@ Import('context object_files')
SOURCES = {
'all': [
'gay_shortest.cc',
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
@ -43,8 +44,11 @@ SOURCES = {
'test-dataflow.cc',
'test-debug.cc',
'test-decls.cc',
'test-diy_fp.cc',
'test-double.cc',
'test-flags.cc',
'test-func-name-inference.cc',
'test-grisu3.cc',
'test-hashmap.cc',
'test-heap.cc',
'test-heap-profiler.cc',

100048
deps/v8/test/cctest/gay_shortest.cc

File diff suppressed because it is too large

44
deps/v8/test/cctest/gay_shortest.h

@ -0,0 +1,44 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GAY_SHORTEST_H_
#define GAY_SHORTEST_H_
namespace v8 {
namespace internal {
struct GayShortest {
double v;
const char* representation;
int decimal_point;
};
Vector<const GayShortest> PrecomputedShortestRepresentations();
} } // namespace v8::internal
#endif // GAY_SHORTEST_H_

67
deps/v8/test/cctest/test-diy_fp.cc

@ -0,0 +1,67 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
using namespace v8::internal;
TEST(Subtract) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(1, 0);
DiyFp diff = DiyFp::Minus(diy_fp1, diy_fp2);
CHECK(2 == diff.f()); // NOLINT
CHECK_EQ(0, diff.e());
diy_fp1.Subtract(diy_fp2);
CHECK(2 == diy_fp1.f()); // NOLINT
CHECK_EQ(0, diy_fp1.e());
}
TEST(Multiply) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(2, 0);
DiyFp product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(64, product.e());
diy_fp1.Multiply(diy_fp2);
CHECK(0 == diy_fp1.f()); // NOLINT
CHECK_EQ(64, diy_fp1.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000000), 11);
diy_fp2 = DiyFp(2, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Test rounding.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000001), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7fffffff, ffffffff), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Halfway cases are allowed to round either way. So don't check for it.
// Big numbers.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 11);
diy_fp2 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 13);
// 128bit result: 0xfffffffffffffffe0000000000000001
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFe) == product.f());
CHECK_EQ(11 + 13 + 64, product.e());
}

204
deps/v8/test/cctest/test-double.cc

@ -0,0 +1,204 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
#include "double.h"
using namespace v8::internal;
TEST(Uint64Conversions) {
// Start by checking the byte-order.
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
CHECK_EQ(3512700564088504e-318, Double(ordered).value());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(5e-324, Double(min_double64).value());
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
CHECK_EQ(1.7976931348623157e308, Double(max_double64).value());
}
TEST(AsDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52, diy_fp.e());
// The 52 mantissa bits, plus the implicit 1 in bit 52 as a UINT64.
CHECK(V8_2PART_UINT64_C(0x00134567, 89ABCDEF) == diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsDiyFp();
CHECK_EQ(-0x3FF - 52 + 1, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(1 == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52, diy_fp.e());
CHECK(V8_2PART_UINT64_C(0x001fffff, ffffffff) == diy_fp.f()); // NOLINT
}
TEST(AsNormalizedDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsNormalizedDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x00134567, 89ABCDEF) << 11) ==
diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
CHECK_EQ(-0x3FF - 52 + 1 - 63, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(V8_2PART_UINT64_C(0x80000000, 00000000) == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x001fffff, ffffffff) << 11) ==
diy_fp.f()); // NOLINT
}
TEST(IsDenormal) {
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(Double(min_double64).IsDenormal());
uint64_t bits = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
CHECK(Double(bits).IsDenormal());
bits = V8_2PART_UINT64_C(0x00100000, 00000000);
CHECK(!Double(bits).IsDenormal());
}
TEST(IsSpecial) {
CHECK(Double(V8_INFINITY).IsSpecial());
CHECK(Double(-V8_INFINITY).IsSpecial());
CHECK(Double(OS::nan_value()).IsSpecial());
uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
CHECK(Double(bits).IsSpecial());
// Denormals are not special:
CHECK(!Double(5e-324).IsSpecial());
CHECK(!Double(-5e-324).IsSpecial());
// And some random numbers:
CHECK(!Double(0.0).IsSpecial());
CHECK(!Double(-0.0).IsSpecial());
CHECK(!Double(1.0).IsSpecial());
CHECK(!Double(-1.0).IsSpecial());
CHECK(!Double(1000000.0).IsSpecial());
CHECK(!Double(-1000000.0).IsSpecial());
CHECK(!Double(1e23).IsSpecial());
CHECK(!Double(-1e23).IsSpecial());
CHECK(!Double(1.7976931348623157e308).IsSpecial());
CHECK(!Double(-1.7976931348623157e308).IsSpecial());
}
TEST(IsInfinite) {
CHECK(Double(V8_INFINITY).IsInfinite());
CHECK(Double(-V8_INFINITY).IsInfinite());
CHECK(!Double(OS::nan_value()).IsInfinite());
CHECK(!Double(0.0).IsInfinite());
CHECK(!Double(-0.0).IsInfinite());
CHECK(!Double(1.0).IsInfinite());
CHECK(!Double(-1.0).IsInfinite());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsInfinite());
}
TEST(IsNan) {
CHECK(Double(OS::nan_value()).IsNan());
uint64_t other_nan = V8_2PART_UINT64_C(0xFFFFFFFF, 00000001);
CHECK(Double(other_nan).IsNan());
CHECK(!Double(V8_INFINITY).IsNan());
CHECK(!Double(-V8_INFINITY).IsNan());
CHECK(!Double(0.0).IsNan());
CHECK(!Double(-0.0).IsNan());
CHECK(!Double(1.0).IsNan());
CHECK(!Double(-1.0).IsNan());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsNan());
}
TEST(Sign) {
CHECK_EQ(1, Double(1.0).Sign());
CHECK_EQ(1, Double(V8_INFINITY).Sign());
CHECK_EQ(-1, Double(-V8_INFINITY).Sign());
CHECK_EQ(1, Double(0.0).Sign());
CHECK_EQ(-1, Double(-0.0).Sign());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(1, Double(min_double64).Sign());
}
TEST(NormalizedBoundaries) {
DiyFp boundary_plus;
DiyFp boundary_minus;
DiyFp diy_fp = Double(1.5).AsNormalizedDiyFp();
Double(1.5).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.5 does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
diy_fp = Double(1.0).AsNormalizedDiyFp();
Double(1.0).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.0 does have a significand of the form 2^p (for some p).
// Therefore its lower boundary is twice as close as the upper boundary.
CHECK_GT(boundary_plus.f() - diy_fp.f(), diy_fp.f() - boundary_minus.f());
CHECK((1 << 9) == diy_fp.f() - boundary_minus.f()); // NOLINT
CHECK((1 << 10) == boundary_plus.f() - diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
Double(min_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// min-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
// Denormals have their boundaries much closer.
CHECK((static_cast<uint64_t>(1) << 62) ==
diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
diy_fp = Double(smallest_normal64).AsNormalizedDiyFp();
Double(smallest_normal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// Even though the significand is of the form 2^p (for some p), its boundaries
// are at the same distance. (This is the only exception).
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
diy_fp = Double(largest_denormal64).AsNormalizedDiyFp();
Double(largest_denormal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 11) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
Double(max_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// max-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
}

116
deps/v8/test/cctest/test-grisu3.cc

@ -0,0 +1,116 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
#include "double.h"
#include "gay_shortest.h"
#include "grisu3.h"
using namespace v8::internal;
static const int kBufferSize = 100;
TEST(GrisuVariousDoubles) {
char buffer[kBufferSize];
int sign;
int length;
int point;
int status;
double min_double = 5e-324;
status = grisu3(min_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5", buffer);
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = grisu3(max_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("17976931348623157", buffer);
CHECK_EQ(309, point);
status = grisu3(4294967272.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4294967272", buffer);
CHECK_EQ(10, point);
status = grisu3(4.1855804968213567e298, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4185580496821357", buffer);
CHECK_EQ(299, point);
status = grisu3(5.5626846462680035e-309, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5562684646268003", buffer);
CHECK_EQ(-308, point);
status = grisu3(2147483648.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("2147483648", buffer);
CHECK_EQ(10, point);
status = grisu3(3.5844466002796428e+298, buffer, &sign, &length, &point);
if (status) { // Not all grisu3 variants manage to compute this number.
CHECK_EQ("35844466002796428", buffer);
CHECK_EQ(0, sign);
CHECK_EQ(299, point);
}
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = grisu3(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("22250738585072014", buffer);
CHECK_EQ(-307, point);
}
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
status = grisu3(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("2225073858507201", buffer);
CHECK_EQ(-307, point);
}
}
TEST(GrisuGayShortest) {
char buffer[kBufferSize];
bool status;
int sign;
int length;
int point;
int succeeded = 0;
int total = 0;
bool needed_max_length = false;
Vector<const GayShortest> precomputed = PrecomputedShortestRepresentations();
for (int i = 0; i < precomputed.length(); ++i) {
const GayShortest current_test = precomputed[i];
total++;
double v = current_test.v;
status = grisu3(v, buffer, &sign, &length, &point);
CHECK_GE(kGrisu3MaximalLength, length);
if (!status) continue;
if (length == kGrisu3MaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_EQ(current_test.representation, buffer);
}
CHECK_GT(succeeded*1.0/total, 0.99);
CHECK(needed_max_length);
}

8
deps/v8/test/cctest/test-log-stack-tracer.cc

@ -232,10 +232,12 @@ class CodeGeneratorPatcher {
public:
CodeGeneratorPatcher() {
CodeGenerator::InlineRuntimeLUT genGetFramePointer =
{&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer"};
// _FastCharCodeAt is not used in our tests.
{&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer", 0};
// _RandomPositiveSmi is not used in our tests. The one we replace need to
// have the same number of arguments as the one we put in, which is zero in
// this case.
bool result = CodeGenerator::PatchInlineRuntimeEntry(
NewString("_FastCharCodeAt"),
NewString("_RandomPositiveSmi"),
genGetFramePointer, &oldInlineEntry);
CHECK(result);
}

6
deps/v8/test/cctest/test-log.cc

@ -52,13 +52,9 @@ TEST(GetMessages) {
CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
char log_lines[100];
memset(log_lines, 0, sizeof(log_lines));
// Requesting data size which is smaller than first log message length.
CHECK_EQ(0, Logger::GetLogLines(0, log_lines, 3));
// See Logger::StringEvent.
const char* line_1 = "aaa,\"bbb\"\n";
const int line_1_len = StrLength(line_1);
// Still smaller than log message length.
CHECK_EQ(0, Logger::GetLogLines(0, log_lines, line_1_len - 1));
// The exact size.
CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len));
CHECK_EQ(line_1, log_lines);
@ -72,8 +68,6 @@ TEST(GetMessages) {
const int line_2_len = StrLength(line_2);
// Now start with line_2 beginning.
CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 0));
CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 3));
CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, line_2_len - 1));
CHECK_EQ(line_2_len, Logger::GetLogLines(line_1_len, log_lines, line_2_len));
CHECK_EQ(line_2, log_lines);
memset(log_lines, 0, sizeof(log_lines));

1
deps/v8/test/cctest/test-serialize.cc

@ -283,7 +283,6 @@ static void SanityCheck() {
#endif
CHECK(Top::global()->IsJSObject());
CHECK(Top::global_context()->IsContext());
CHECK(Top::special_function_table()->IsFixedArray());
CHECK(Heap::symbol_table()->IsSymbolTable());
CHECK(!Factory::LookupAsciiSymbol("Empty")->IsFailure());
}

48
deps/v8/test/mjsunit/abs.js

@ -0,0 +1,48 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test Math.sin and Math.abs.
assertEquals(1, Math.abs(1)); // Positive SMI.
assertEquals(1, Math.abs(-1)); // Negative SMI.
assertEquals(0.5, Math.abs(0.5)); // Positive double.
assertEquals(0.5, Math.abs(-0.5)); // Negative double.
assertEquals('Infinity', Math.abs(Number('+Infinity').toString()));
assertEquals('Infinity', Math.abs(Number('-Infinity').toString()));
assertEquals('NaN', Math.abs(NaN).toString());
assertEquals('NaN', Math.abs(-NaN).toString());
var minusZero = 1 / (-1 / 0);
function isMinusZero(x) {
return x === 0 && 1 / x < 0;
}
assertTrue(!isMinusZero(0));
assertTrue(isMinusZero(minusZero));
assertEquals(0, Math.abs(minusZero));
assertTrue(!isMinusZero(Math.abs(minusZero)));
assertTrue(!isMinusZero(Math.abs(0.0)));

61
deps/v8/test/mjsunit/array-pop.js

@ -0,0 +1,61 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Check pops with various number of arguments.
(function() {
var a = [];
for (var i = 0; i < 7; i++) {
a = [7, 6, 5, 4, 3, 2, 1];
assertEquals(1, a.pop(), "1st pop");
assertEquals(6, a.length, "length 1st pop");
assertEquals(2, a.pop(1), "2nd pop");
assertEquals(5, a.length, "length 2nd pop");
assertEquals(3, a.pop(1, 2), "3rd pop");
assertEquals(4, a.length, "length 3rd pop");
assertEquals(4, a.pop(1, 2, 3), "4th pop");
assertEquals(3, a.length, "length 4th pop");
assertEquals(5, a.pop(), "5th pop");
assertEquals(2, a.length, "length 5th pop");
assertEquals(6, a.pop(), "6th pop");
assertEquals(1, a.length, "length 6th pop");
assertEquals(7, a.pop(), "7th pop");
assertEquals(0, a.length, "length 7th pop");
assertEquals(undefined, a.pop(), "8th pop");
assertEquals(0, a.length, "length 8th pop");
assertEquals(undefined, a.pop(1, 2, 3), "9th pop");
assertEquals(0, a.length, "length 9th pop");
}
})();

68
deps/v8/test/mjsunit/array-push.js

@ -0,0 +1,68 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Check pushes with various number of arguments.
(function() {
var a = [];
for (var i = 0; i < 7; i++) {
a = [];
assertEquals(0, a.push());
assertEquals([], a, "after .push()");
assertEquals(1, a.push(1), "length after .push(1)");
assertEquals([1], a, "after .push(1)");
assertEquals(3, a.push(2, 3), "length after .push(2, 3)");
assertEquals([1, 2, 3], a, "after .push(2, 3)");
assertEquals(6, a.push(4, 5, 6),
"length after .push(4, 5, 6)");
assertEquals([1, 2, 3, 4, 5, 6], a,
"after .push(4, 5, 5)");
assertEquals(10, a.push(7, 8, 9, 10),
"length after .push(7, 8, 9, 10)");
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], a,
"after .push(7, 8, 9, 10)");
assertEquals(15, a.push(11, 12, 13, 14, 15),
"length after .push(11, 12, 13, 14, 15)");
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], a,
"after .push(11, 12, 13, 14, 15)");
assertEquals(21, a.push(16, 17, 18, 19, 20, 21),
"length after .push(16, 17, 18, 19, 20, 21)");
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], a,
"after .push(16, 17, 18, 19, 20, 21)");
assertEquals(28, a.push(22, 23, 24, 25, 26, 27, 28),
"length hafter .push(22, 23, 24, 25, 26, 27, 28)");
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], a,
"after .push(22, 23, 24, 25, 26, 27, 28)");
}
})();

14
deps/v8/test/mjsunit/fuzz-natives.js

@ -57,9 +57,17 @@ function makeFunction(name, argc) {
return new Function(args.join(", "), "return %" + name + "(" + argsStr + ");");
}
function testArgumentCount(name) {
function testArgumentCount(name, argc) {
for (var i = 0; i < 10; i++) {
var func = makeFunction(name, i);
var func = null;
try {
func = makeFunction(name, i);
} catch (e) {
if (e != "SyntaxError: illegal access") throw e;
}
if (func === null && i == argc) {
throw "unexpected exception";
}
var args = [ ];
for (var j = 0; j < i; j++)
args.push(0);
@ -176,7 +184,7 @@ function testNatives() {
continue;
print(name);
var argc = nativeInfo[1];
testArgumentCount(name);
testArgumentCount(name, argc);
testArgumentTypes(name, argc);
}
}

42
deps/v8/test/mjsunit/regexp-compile.js

@ -0,0 +1,42 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that we don't cache the result of a regexp match across a
// compile event.
var re = /x/;
assertEquals("a.yb", "axyb".replace(re, "."));
re.compile("y")
assertEquals("ax.b", "axyb".replace(re, "."));
re.compile("(x)");
assertEquals("x,x", re.exec("axyb"));
re.compile("(y)");
assertEquals("y,y", re.exec("axyb"));

35
deps/v8/test/mjsunit/regress/regress-641.js

@ -0,0 +1,35 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Regression test for http://code.google.com/p/v8/issues/detail?id=641.
function f(){
while (window + 1) {
const window=[,];
}
}
f()

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save