Browse Source

Upgrade V8 to 2.1.10

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
6192b8659a
  1. 7
      deps/v8/ChangeLog
  2. 6
      deps/v8/SConstruct
  3. 2
      deps/v8/include/v8.h
  4. 1
      deps/v8/src/SConscript
  5. 11
      deps/v8/src/arguments.h
  6. 27
      deps/v8/src/arm/codegen-arm.cc
  7. 3
      deps/v8/src/arm/codegen-arm.h
  8. 141
      deps/v8/src/arm/ic-arm.cc
  9. 7
      deps/v8/src/arm/stub-cache-arm.cc
  10. 10
      deps/v8/src/code-stubs.cc
  11. 2
      deps/v8/src/codegen.h
  12. 93
      deps/v8/src/compiler.cc
  13. 15
      deps/v8/src/compiler.h
  14. 512
      deps/v8/src/conversions.cc
  15. 1709
      deps/v8/src/data-flow.cc
  16. 341
      deps/v8/src/data-flow.h
  17. 588
      deps/v8/src/flow-graph.cc
  18. 379
      deps/v8/src/flow-graph.h
  19. 2
      deps/v8/src/handles.cc
  20. 2
      deps/v8/src/heap.cc
  21. 2
      deps/v8/src/heap.h
  22. 17
      deps/v8/src/ia32/codegen-ia32.cc
  23. 3
      deps/v8/src/ia32/codegen-ia32.h
  24. 24
      deps/v8/src/ia32/ic-ia32.cc
  25. 29
      deps/v8/src/ia32/stub-cache-ia32.cc
  26. 58
      deps/v8/src/oprofile-agent.cc
  27. 10
      deps/v8/src/oprofile-agent.h
  28. 2
      deps/v8/src/platform.h
  29. 36
      deps/v8/src/stub-cache.cc
  30. 9
      deps/v8/src/utils.h
  31. 4
      deps/v8/src/version.cc
  32. 20
      deps/v8/src/x64/codegen-x64.cc
  33. 3
      deps/v8/src/x64/codegen-x64.h
  34. 26
      deps/v8/src/x64/ic-x64.cc
  35. 7
      deps/v8/src/x64/stub-cache-x64.cc
  36. 62
      deps/v8/test/cctest/test-api.cc
  37. 87
      deps/v8/test/cctest/test-conversions.cc
  38. 112
      deps/v8/test/cctest/test-heap.cc
  39. 22
      deps/v8/test/mjsunit/str-to-num.js
  40. 2
      deps/v8/tools/gyp/v8.gyp
  41. 8
      deps/v8/tools/visual_studio/v8_base.vcproj
  42. 8
      deps/v8/tools/visual_studio/v8_base_arm.vcproj
  43. 8
      deps/v8/tools/visual_studio/v8_base_x64.vcproj

7
deps/v8/ChangeLog

@ -1,3 +1,10 @@
2010-03-26: Version 2.1.10
Fixed scons build issues.
Fixed a couple of minor bugs.
2010-03-25: Version 2.1.9 2010-03-25: Version 2.1.9
Added API support for reattaching a global object to a context. Added API support for reattaching a global object to a context.

6
deps/v8/SConstruct

@ -275,6 +275,7 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
@ -1008,7 +1009,6 @@ def BuildSpecific(env, mode, env_overrides):
# Link the object files into a library. # Link the object files into a library.
env.Replace(**context.flags['v8']) env.Replace(**context.flags['v8'])
env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(env) context.ApplyEnvOverrides(env)
if context.options['library'] == 'static': if context.options['library'] == 'static':
@ -1043,7 +1043,9 @@ def BuildSpecific(env, mode, env_overrides):
sample_env.Depends(sample_program, library) sample_env.Depends(sample_program, library)
context.sample_targets.append(sample_program) context.sample_targets.append(sample_program)
cctest_program = env.SConscript( cctest_env = env.Copy()
cctest_env.Prepend(LIBS=[library_name])
cctest_program = cctest_env.SConscript(
join('test', 'cctest', 'SConscript'), join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id), build_dir=join('obj', 'test', target_id),
exports='context object_files', exports='context object_files',

2
deps/v8/include/v8.h

@ -3361,7 +3361,7 @@ External* External::Cast(v8::Value* value) {
Local<Value> AccessorInfo::Data() const { Local<Value> AccessorInfo::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&args_[-3])); return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
} }

1
deps/v8/src/SConscript

@ -61,6 +61,7 @@ SOURCES = {
execution.cc execution.cc
factory.cc factory.cc
flags.cc flags.cc
flow-graph.cc
frame-element.cc frame-element.cc
frames.cc frames.cc
full-codegen.cc full-codegen.cc

11
deps/v8/src/arguments.h

@ -72,7 +72,7 @@ class Arguments BASE_EMBEDDED {
}; };
// Cursom arguments replicate a small segment of stack that can be // Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack // accessed through an Arguments object the same way the actual stack
// can. // can.
class CustomArguments : public Relocatable { class CustomArguments : public Relocatable {
@ -80,15 +80,14 @@ class CustomArguments : public Relocatable {
inline CustomArguments(Object* data, inline CustomArguments(Object* data,
JSObject* self, JSObject* self,
JSObject* holder) { JSObject* holder) {
values_[3] = self; values_[2] = self;
values_[2] = holder; values_[1] = holder;
values_[1] = Smi::FromInt(0);
values_[0] = data; values_[0] = data;
} }
void IterateInstance(ObjectVisitor* v); void IterateInstance(ObjectVisitor* v);
Object** end() { return values_ + 3; } Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
private: private:
Object* values_[4]; Object* values_[3];
}; };

27
deps/v8/src/arm/codegen-arm.cc

@ -3996,14 +3996,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
} }
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
// According to ECMA-262 section 11.11, page 58, the binary logical // According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions // operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value // before any ToBoolean() conversions. This means that the value
@ -4015,8 +4008,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// after evaluating the left hand side (due to the shortcut // after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result // semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not. // of compiling the binary operation is materialized or not.
if (node->op() == Token::AND) {
if (op == Token::AND) {
JumpTarget is_true; JumpTarget is_true;
LoadConditionAndSpill(node->left(), LoadConditionAndSpill(node->left(),
&is_true, &is_true,
@ -4062,7 +4054,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked()); ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
} }
} else if (op == Token::OR) { } else {
ASSERT(node->op() == Token::OR);
JumpTarget is_false; JumpTarget is_false;
LoadConditionAndSpill(node->left(), LoadConditionAndSpill(node->left(),
true_target(), true_target(),
@ -4107,7 +4100,19 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// Nothing to do. // Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked()); ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
} }
}
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BinaryOperation");
if (node->op() == Token::AND || node->op() == Token::OR) {
GenerateLogicalBooleanOperation(node);
} else { } else {
// Optimize for the case where (at least) one of the expressions // Optimize for the case where (at least) one of the expressions
// is a literal small integer. // is a literal small integer.

3
deps/v8/src/arm/codegen-arm.h

@ -306,6 +306,9 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target); void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation(Token::Value op, void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode, OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue); int known_rhs = kUnknownIntValue);

141
deps/v8/src/arm/ic-arm.cc

@ -65,11 +65,11 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check for the absence of an interceptor. // Check for the absence of an interceptor.
// Load the map into t0. // Load the map into t0.
__ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset)); __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
__ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset)); // Bail out if the receiver has a named interceptor.
__ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8)))); __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
// Jump to miss if the interceptor bit is set. __ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
__ b(ne, miss); __ b(nz, miss);
// Bail out if we have a JS global proxy object. // Bail out if we have a JS global proxy object.
__ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset)); __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@ -144,6 +144,95 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
} }
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register t0,
Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ mvn(t1, Operand(t0));
__ add(t0, t1, Operand(t0, LSL, 15));
// hash = hash ^ (hash >> 12);
__ eor(t0, t0, Operand(t0, LSR, 12));
// hash = hash + (hash << 2);
__ add(t0, t0, Operand(t0, LSL, 2));
// hash = hash ^ (hash >> 4);
__ eor(t0, t0, Operand(t0, LSR, 4));
// hash = hash * 2057;
__ mov(t1, Operand(2057));
__ mul(t0, t0, t1);
// hash = hash ^ (hash >> 16);
__ eor(t0, t0, Operand(t0, LSR, 16));
// Compute the capacity mask.
__ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
__ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
__ sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
__ mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
}
__ and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
__ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
__ cmp(key, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
} else {
__ b(ne, miss);
}
}
__ bind(&done);
// Check that the value is a normal property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
__ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
__ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ ldr(t0, FieldMemOperand(t2, kValueOffset));
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) { void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
@ -530,7 +619,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- sp[0] : key // -- sp[0] : key
// -- sp[4] : receiver // -- sp[4] : receiver
// ----------------------------------- // -----------------------------------
Label slow, fast, check_pixel_array; Label slow, fast, check_pixel_array, check_number_dictionary;
// Get the key and receiver object from the stack. // Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit()); __ ldm(ia, sp, r0.bit() | r1.bit());
@ -554,6 +643,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow); __ BranchOnNotSmi(r0, &slow);
// Save key in r2 in case we want it for the number dictionary case.
__ mov(r2, r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); __ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Get the elements array of the object. // Get the elements array of the object.
@ -562,17 +653,26 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, ip); __ cmp(r3, ip);
__ b(ne, &slow); __ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds. // Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3)); __ cmp(r0, Operand(r3));
__ b(lo, &fast); __ b(ge, &slow);
// Fast case: Do the load.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ Ret();
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip); __ cmp(r3, ip);
__ b(ne, &slow); __ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
__ cmp(r0, ip); __ cmp(r0, ip);
__ b(hs, &slow); __ b(hs, &slow);
@ -581,22 +681,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi.
__ Ret(); __ Ret();
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: untagged index
// r1: elements
// r2: key
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
__ Ret();
// Slow case: Push extra copies of the arguments (2). // Slow case: Push extra copies of the arguments (2).
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
GenerateRuntimeGetProperty(masm); GenerateRuntimeGetProperty(masm);
// Fast case: Do the load.
__ bind(&fast);
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ Ret();
} }

7
deps/v8/src/arm/stub-cache-arm.cc

@ -396,15 +396,14 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder, Register holder,
Register name, Register name,
JSObject* holder_obj) { JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
__ push(name); __ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor)); ASSERT(!Heap::InNewSpace(interceptor));
Register scratch = name;
Register scratch = receiver;
__ mov(scratch, Operand(Handle<Object>(interceptor))); __ mov(scratch, Operand(Handle<Object>(interceptor)));
__ push(scratch); __ push(scratch);
__ push(receiver);
__ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch); __ push(scratch);
} }

10
deps/v8/src/code-stubs.cc

@ -61,13 +61,9 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey()); code->set_major_key(MajorKey());
#ifdef ENABLE_OPROFILE_AGENT OPROFILE(CreateNativeCodeRegion(GetName(),
// Register the generated stub with the OPROFILE agent. code->instruction_start(),
OProfileAgent::CreateNativeCodeRegion(GetName(), code->instruction_size()));
code->instruction_start(),
code->instruction_size());
#endif
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size()); Counters::total_stubs_code_size.Increment(code->instruction_size());

2
deps/v8/src/codegen.h

@ -450,7 +450,7 @@ class ApiGetterEntryStub : public CodeStub {
virtual bool GetCustomCache(Code** code_out); virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value); virtual void SetCustomCache(Code* value);
static const int kStackSpace = 6; static const int kStackSpace = 5;
static const int kArgc = 4; static const int kArgc = 4;
private: private:
Handle<AccessorInfo> info() { return info_; } Handle<AccessorInfo> info() { return info_; }

93
deps/v8/src/compiler.cc

@ -34,6 +34,7 @@
#include "data-flow.h" #include "data-flow.h"
#include "debug.h" #include "debug.h"
#include "fast-codegen.h" #include "fast-codegen.h"
#include "flow-graph.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "liveedit.h" #include "liveedit.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
@ -235,27 +236,19 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT if (script->name()->IsString()) {
// Log the code generation for the script. Check explicit whether logging is LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
// to avoid allocating when not required. *code, String::cast(script->name())));
if (Logger::is_logging() || OProfileAgent::is_enabled()) { OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
if (script->name()->IsString()) { code->instruction_start(),
SmartPointer<char> data = code->instruction_size()));
String::cast(script->name())->ToCString(DISALLOW_NULLS); } else {
LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
*code, *data)); *code, ""));
OProfileAgent::CreateNativeCodeRegion(*data, OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
code->instruction_start(), code->instruction_start(),
code->instruction_size()); code->instruction_size()));
} else {
LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
*code, ""));
OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
code->instruction_start(),
code->instruction_size());
}
} }
#endif
// Allocate function. // Allocate function.
Handle<SharedFunctionInfo> result = Handle<SharedFunctionInfo> result =
@ -443,14 +436,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
return false; return false;
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG, name,
name, Handle<String>(shared->inferred_name()),
Handle<String>(shared->inferred_name()), start_position,
start_position, info->script(),
info->script(), code);
code);
#endif
// Update the shared function info with the compiled code. // Update the shared function info with the compiled code.
shared->set_code(*code); shared->set_code(*code);
@ -578,15 +569,12 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
} }
// Function compilation complete. // Function compilation complete.
RecordFunctionCompilation(Logger::FUNCTION_TAG,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT literal->name(),
LogCodeCreateEvent(Logger::FUNCTION_TAG, literal->inferred_name(),
literal->name(), literal->start_position(),
literal->inferred_name(), script,
literal->start_position(), code);
script,
code);
#endif
} }
// Create a boilerplate function. // Create a boilerplate function.
@ -628,13 +616,12 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag, Handle<String> name,
Handle<String> name, Handle<String> inferred_name,
Handle<String> inferred_name, int start_position,
int start_position, Handle<Script> script,
Handle<Script> script, Handle<Code> code) {
Handle<Code> code) {
// Log the code generation. If source information is available // Log the code generation. If source information is available
// include script name and line number. Check explicitly whether // include script name and line number. Check explicitly whether
// logging is enabled as finding the line number is not free. // logging is enabled as finding the line number is not free.
@ -642,21 +629,21 @@ void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
Handle<String> func_name(name->length() > 0 ? *name : *inferred_name); Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
if (script->name()->IsString()) { if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1; int line_num = GetScriptLineNumber(script, start_position) + 1;
USE(line_num);
LOG(CodeCreateEvent(tag, *code, *func_name, LOG(CodeCreateEvent(tag, *code, *func_name,
String::cast(script->name()), line_num)); String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name, OPROFILE(CreateNativeCodeRegion(*func_name,
String::cast(script->name()), String::cast(script->name()),
line_num, line_num,
code->instruction_start(), code->instruction_start(),
code->instruction_size()); code->instruction_size()));
} else { } else {
LOG(CodeCreateEvent(tag, *code, *func_name)); LOG(CodeCreateEvent(tag, *code, *func_name));
OProfileAgent::CreateNativeCodeRegion(*func_name, OPROFILE(CreateNativeCodeRegion(*func_name,
code->instruction_start(), code->instruction_start(),
code->instruction_size()); code->instruction_size()));
} }
} }
} }
#endif
} } // namespace v8::internal } } // namespace v8::internal

15
deps/v8/src/compiler.h

@ -266,15 +266,12 @@ class Compiler : public AllStatic {
Handle<Script> script); Handle<Script> script);
private: private:
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT Handle<String> name,
static void LogCodeCreateEvent(Logger::LogEventsAndTags tag, Handle<String> inferred_name,
Handle<String> name, int start_position,
Handle<String> inferred_name, Handle<Script> script,
int start_position, Handle<Code> code);
Handle<Script> script,
Handle<Code> code);
#endif
}; };

512
deps/v8/src/conversions.cc

@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h> #include <stdarg.h>
#include <limits.h>
#include "v8.h" #include "v8.h"
@ -92,80 +93,80 @@ static inline const char* GetCString(String* str, int index) {
} }
static inline void ReleaseCString(const char* original, const char* str) { namespace {
}
// C++-style iterator adaptor for StringInputBuffer
// (unlike C++ iterators the end-marker has different type).
class StringInputBufferIterator {
public:
class EndMarker {};
static inline void ReleaseCString(String* original, const char* str) { explicit StringInputBufferIterator(StringInputBuffer* buffer);
DeleteArray(const_cast<char *>(str));
}
int operator*() const;
void operator++();
bool operator==(EndMarker const&) const { return end_; }
bool operator!=(EndMarker const& m) const { return !end_; }
static inline bool IsSpace(const char* str, int index) { private:
ASSERT(index >= 0 && index < StrLength(str)); StringInputBuffer* const buffer_;
return Scanner::kIsWhiteSpace.get(str[index]); int current_;
} bool end_;
};
static inline bool IsSpace(String* str, int index) { StringInputBufferIterator::StringInputBufferIterator(
return Scanner::kIsWhiteSpace.get(str->Get(index)); StringInputBuffer* buffer) : buffer_(buffer) {
++(*this);
}
int StringInputBufferIterator::operator*() const {
return current_;
} }
static inline bool SubStringEquals(const char* str, void StringInputBufferIterator::operator++() {
int index, end_ = !buffer_->has_more();
const char* other) { if (!end_) {
return strncmp(str + index, other, strlen(other)) != 0; current_ = buffer_->GetNext();
}
}
} }
static inline bool SubStringEquals(String* str, int index, const char* other) { static inline void ReleaseCString(const char* original, const char* str) {
HandleScope scope;
int str_length = str->length();
int other_length = StrLength(other);
int end = index + other_length < str_length ?
index + other_length :
str_length;
Handle<String> substring =
Factory::NewSubString(Handle<String>(str), index, end);
return substring->IsEqualTo(Vector<const char>(other, other_length));
} }
// Check if a string should be parsed as an octal number. The string static inline void ReleaseCString(String* original, const char* str) {
// can be either a char* or a String*. DeleteArray(const_cast<char *>(str));
template<class S> }
static bool ShouldParseOctal(S* s, int i) {
int index = i;
int len = GetLength(s);
if (index < len && GetChar(s, index) != '0') return false;
// If the first real character (following '0') is not an octal
// digit, bail out early. This also takes care of numbers of the
// forms 0.xxx and 0exxx by not allowing the first 0 to be
// interpreted as an octal.
index++;
if (index < len) {
int d = GetChar(s, index) - '0';
if (d < 0 || d > 7) return false;
} else {
return false;
}
// Traverse all digits (including the first). If there is an octal
// prefix which is not a part of a longer decimal prefix, we return template <class Iterator, class EndMark>
// true. Otherwise, false is returned. static bool SubStringEquals(Iterator* current,
while (index < len) { EndMark end,
int d = GetChar(s, index++) - '0'; const char* substring) {
if (d == 8 || d == 9) return false; ASSERT(**current == *substring);
if (d < 0 || d > 7) return true; for (substring++; *substring != '\0'; substring++) {
++*current;
if (*current == end || **current != *substring) return false;
} }
++*current;
return true; return true;
} }
extern "C" double gay_strtod(const char* s00, const char** se); extern "C" double gay_strtod(const char* s00, const char** se);
// Maximum number of significant digits in decimal representation.
// The longest possible double in decimal representation is
// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
// (768 digits). If we parse a number whose first digits are equal to a
// mean of 2 adjacent doubles (that could have up to 769 digits) the result
// must be rounded to the bigger one unless the tail consists of zeros, so
// we don't need to preserve all the digits.
const int kMaxSignificantDigits = 772;
// Parse an int from a string starting a given index and in a given // Parse an int from a string starting a given index and in a given
// radix. The string can be either a char* or a String*. // radix. The string can be either a char* or a String*.
@ -262,95 +263,372 @@ int StringToInt(const char* str, int index, int radix, double* value) {
static const double JUNK_STRING_VALUE = OS::nan_value(); static const double JUNK_STRING_VALUE = OS::nan_value();
// Convert a string to a double value. The string can be either a // Returns true if a nonspace found and false if the end has reached.
// char* or a String*. template <class Iterator, class EndMark>
template<class S> static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
static double InternalStringToDouble(S* str, while (*current != end) {
if (!Scanner::kIsWhiteSpace.get(**current)) return true;
++*current;
}
return false;
}
template <class Iterator, class EndMark>
static double InternalHexadecimalStringToDouble(Iterator current,
EndMark end,
char* buffer,
bool allow_trailing_junk) {
ASSERT(current != end);
const int max_hex_significant_digits = 52 / 4 + 2;
// We reuse the buffer of InternalStringToDouble. Since hexadecimal
// numbers may have much less digits than decimal the buffer won't overflow.
ASSERT(max_hex_significant_digits < kMaxSignificantDigits);
int significant_digits = 0;
int insignificant_digits = 0;
bool leading_zero = false;
// A double has a 53bit significand (once the hidden bit has been added).
// Halfway cases thus have at most 54bits. Therefore 54/4 + 1 digits are
// sufficient to represent halfway cases. By adding another digit we can keep
// track of dropped digits.
int buffer_pos = 0;
bool nonzero_digit_dropped = false;
// Skip leading 0s.
while (*current == '0') {
leading_zero = true;
++current;
if (current == end) return 0;
}
int begin_pos = buffer_pos;
while ((*current >= '0' && *current <= '9')
|| (*current >= 'a' && *current <= 'f')
|| (*current >= 'A' && *current <= 'F')) {
if (significant_digits <= max_hex_significant_digits) {
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
} else {
insignificant_digits++;
nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
}
++current;
if (current == end) break;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JUNK_STRING_VALUE;
}
if (significant_digits == 0) {
return leading_zero ? 0 : JUNK_STRING_VALUE;
}
if (nonzero_digit_dropped) {
ASSERT(insignificant_digits > 0);
insignificant_digits--;
buffer[buffer_pos++] = '1';
}
buffer[buffer_pos] = '\0';
double result;
StringToInt(buffer, begin_pos, 16, &result);
if (insignificant_digits > 0) {
// Multiplying by a power of 2 doesn't cause a loss of precision.
result *= pow(16.0, insignificant_digits);
}
return result;
}
// Converts a string to a double value. Assumes the Iterator supports
// the following operations:
// 1. current == end (other ops are not allowed), current != end.
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
static double InternalStringToDouble(Iterator current,
EndMark end,
int flags, int flags,
double empty_string_val) { double empty_string_val) {
double result = 0.0; // To make sure that iterator dereferencing is valid the following
int index = 0; // convention is used:
// 1. Each '++current' statement is followed by check for equality to 'end'.
// 2. If AdvanceToNonspace returned false then current == end.
// 3. If 'current' becomes be equal to 'end' the function returns or goes to
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
if (!AdvanceToNonspace(&current, end)) return empty_string_val;
const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
// The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
const int kBufferSize = kMaxSignificantDigits + 10;
char buffer[kBufferSize]; // NOLINT: size is known at compile time.
int buffer_pos = 0;
// Exponent will be adjusted if insignificant digits of the integer part
// or insignificant leading zeros of the fractional part are dropped.
int exponent = 0;
int significant_digits = 0;
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
double signed_zero = 0.0;
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
} else if (*current == '-') {
buffer[buffer_pos++] = '-';
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
signed_zero = -0.0;
}
int len = GetLength(str); static const char kInfinitySymbol[] = "Infinity";
if (*current == kInfinitySymbol[0]) {
if (!SubStringEquals(&current, end, kInfinitySymbol)) {
return JUNK_STRING_VALUE;
}
// Skip leading spaces. if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
while ((index < len) && IsSpace(str, index)) index++; return JUNK_STRING_VALUE;
}
// Is the string empty? ASSERT(buffer_pos == 0 || buffer[0] == '-');
if (index >= len) return empty_string_val; return buffer_pos > 0 ? -V8_INFINITY : V8_INFINITY;
}
// Get the first character. bool leading_zero = false;
uint16_t first = GetChar(str, index); if (*current == '0') {
++current;
if (current == end) return signed_zero;
// Numbers can only start with '-', '+', '.', 'I' (Infinity), or a digit. leading_zero = true;
if (first != '-' && first != '+' && first != '.' && first != 'I' &&
(first > '9' || first < '0')) { // It could be hexadecimal value.
return JUNK_STRING_VALUE; if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
if (current == end) return JUNK_STRING_VALUE; // "0x".
double result = InternalHexadecimalStringToDouble(current,
end,
buffer + buffer_pos,
allow_trailing_junk);
return (buffer_pos > 0 && buffer[0] == '-') ? -result : result;
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
if (current == end) return signed_zero;
}
} }
// Compute sign of result based on first character. bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
int sign = 1;
if (first == '-') { // Copy significant digits of the integer part (if any) to the buffer.
sign = -1; while (*current >= '0' && *current <= '9') {
index++; if (significant_digits < kMaxSignificantDigits) {
// String only containing a '-' are junk chars. ASSERT(buffer_pos < kBufferSize);
if (index == len) return JUNK_STRING_VALUE; buffer[buffer_pos++] = static_cast<char>(*current);
} significant_digits++;
// Will later check if it's an octal in the buffer.
// do we have a hex number?
// (since the string is 0-terminated, it's ok to look one char beyond the end)
if ((flags & ALLOW_HEX) != 0 &&
(index + 1) < len &&
GetChar(str, index) == '0' &&
(GetChar(str, index + 1) == 'x' || GetChar(str, index + 1) == 'X')) {
index += 2;
index = StringToInt(str, index, 16, &result);
} else if ((flags & ALLOW_OCTALS) != 0 && ShouldParseOctal(str, index)) {
// NOTE: We optimistically try to parse the number as an octal (if
// we're allowed to), even though this is not as dictated by
// ECMA-262. The reason for doing this is compatibility with IE and
// Firefox.
index = StringToInt(str, index, 8, &result);
} else {
const char* cstr = GetCString(str, index);
const char* end;
// Optimistically parse the number and then, if that fails,
// check if it might have been {+,-,}Infinity.
result = gay_strtod(cstr, &end);
ReleaseCString(str, cstr);
if (result != 0.0 || end != cstr) {
// It appears that strtod worked
index += static_cast<int>(end - cstr);
} else { } else {
// Check for {+,-,}Infinity insignificant_digits++; // Move the digit into the exponential part.
bool is_negative = (GetChar(str, index) == '-'); nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
if (GetChar(str, index) == '+' || GetChar(str, index) == '-') }
index++; octal = octal && *current < '8';
if (!SubStringEquals(str, index, "Infinity")) ++current;
if (current == end) goto parsing_done;
}
if (significant_digits == 0) {
octal = false;
}
if (*current == '.') {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = '.';
++current;
if (current == end) {
if (significant_digits == 0 && !leading_zero) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
result = is_negative ? -V8_INFINITY : V8_INFINITY; } else {
index += 8; goto parsing_done;
}
}
if (significant_digits == 0) {
// octal = false;
// Integer part consists of 0 or is absent. Significant digits start after
// leading zeros (if any).
while (*current == '0') {
++current;
if (current == end) return signed_zero;
exponent--; // Move this 0 into the exponent.
}
}
// There is the fractional part.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
} else {
// Ignore insignificant digits in the fractional part.
nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
}
++current;
if (current == end) goto parsing_done;
} }
} }
if ((flags & ALLOW_TRAILING_JUNK) == 0) { if (!leading_zero && exponent == 0 && significant_digits == 0) {
// skip trailing spaces // If leading_zeros is true then the string contains zeros.
while ((index < len) && IsSpace(str, index)) index++; // If exponent < 0 then string was [+-]\.0*...
// string ending with junk? // If significant_digits != 0 the string is not equal to 0.
if (index < len) return JUNK_STRING_VALUE; // Otherwise there are no digits in the string.
return JUNK_STRING_VALUE;
} }
return sign * result; // Parse exponential part.
} if (*current == 'e' || *current == 'E') {
if (octal) return JUNK_STRING_VALUE;
++current;
if (current == end) {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
}
}
char sign = '+';
if (*current == '+' || *current == '-') {
sign = static_cast<char>(*current);
++current;
if (current == end) {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
}
}
}
if (current == end || *current < '0' || *current > '9') {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
}
}
const int max_exponent = INT_MAX / 2;
ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
int num = 0;
do {
// Check overflow.
int digit = *current - '0';
if (num >= max_exponent / 10
&& !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
num = max_exponent;
} else {
num = num * 10 + digit;
}
++current;
} while (current != end && *current >= '0' && *current <= '9');
exponent += (sign == '-' ? -num : num);
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JUNK_STRING_VALUE;
}
parsing_done:
exponent += insignificant_digits;
if (octal) {
buffer[buffer_pos] = '\0';
// ALLOW_OCTALS is set and there is no '8' or '9' in insignificant
// digits. Check significant digits now.
char sign = '+';
const char* s = buffer;
if (*s == '-' || *s == '+') sign = *s++;
double result;
s += StringToInt(s, 0, 8, &result);
if (!allow_trailing_junk && *s != '\0') return JUNK_STRING_VALUE;
if (sign == '-') result = -result;
if (insignificant_digits > 0) {
result *= pow(8.0, insignificant_digits);
}
return result;
}
if (nonzero_digit_dropped) {
if (insignificant_digits) buffer[buffer_pos++] = '.';
buffer[buffer_pos++] = '1';
}
if (exponent != 0) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = 'e';
if (exponent < 0) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = '-';
exponent = -exponent;
}
if (exponent > 999) exponent = 999; // Result will be Infinity or 0 or -0.
const int exp_digits = 3;
for (int i = 0; i < exp_digits; i++) {
buffer[buffer_pos + exp_digits - 1 - i] = '0' + exponent % 10;
exponent /= 10;
}
ASSERT(exponent == 0);
buffer_pos += exp_digits;
}
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
return gay_strtod(buffer, NULL);
}
double StringToDouble(String* str, int flags, double empty_string_val) { double StringToDouble(String* str, int flags, double empty_string_val) {
return InternalStringToDouble(str, flags, empty_string_val); StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToDouble(begin, end, flags, empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToDouble(begin, end, flags, empty_string_val);
} else {
StringInputBuffer buffer(str);
return InternalStringToDouble(StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
flags,
empty_string_val);
}
} }
double StringToDouble(const char* str, int flags, double empty_string_val) { double StringToDouble(const char* str, int flags, double empty_string_val) {
return InternalStringToDouble(str, flags, empty_string_val); const char* end = str + StrLength(str);
return InternalStringToDouble(str, end, flags, empty_string_val);
} }

1709
deps/v8/src/data-flow.cc

File diff suppressed because it is too large

341
deps/v8/src/data-flow.h

@ -37,6 +37,9 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Forward declarations.
class Node;
class BitVector: public ZoneObject { class BitVector: public ZoneObject {
public: public:
explicit BitVector(int length) explicit BitVector(int length)
@ -205,344 +208,6 @@ struct ReachingDefinitionsData BASE_EMBEDDED {
}; };
// Flow-graph nodes.
class Node: public ZoneObject {
public:
Node() : number_(-1), mark_(false) {}
virtual ~Node() {}
virtual bool IsExitNode() { return false; }
virtual bool IsBlockNode() { return false; }
virtual bool IsBranchNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
virtual void AddSuccessor(Node* successor) = 0;
bool IsMarkedWith(bool mark) { return mark_ == mark; }
void MarkWith(bool mark) { mark_ = mark; }
// Perform a depth first search and record preorder and postorder
// traversal orders.
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) = 0;
int number() { return number_; }
void set_number(int number) { number_ = number; }
// Functions used by data-flow analyses.
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result) = 0;
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
// Functions used by dead-code elimination.
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG
void AssignNodeNumber();
void PrintReachingDefinitions();
virtual void PrintText() = 0;
#endif
protected:
ReachingDefinitionsData rd_;
private:
int number_;
bool mark_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
public:
ExitNode() : predecessors_(4) {}
virtual bool IsExitNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
DISALLOW_COPY_AND_ASSIGN(ExitNode);
};
// Block nodes have a single successor and predecessor and a list of
// instructions.
class BlockNode: public Node {
public:
BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
static BlockNode* cast(Node* node) {
ASSERT(node->IsBlockNode());
return reinterpret_cast<BlockNode*>(node);
}
virtual bool IsBlockNode() { return true; }
bool is_empty() { return instructions_.is_empty(); }
ZoneList<AstNode*>* instructions() { return &instructions_; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void AddInstruction(AstNode* instruction) {
instructions_.Add(instruction);
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
Node* predecessor_;
Node* successor_;
ZoneList<AstNode*> instructions_;
DISALLOW_COPY_AND_ASSIGN(BlockNode);
};
// Branch nodes have a single predecessor and a pair of successors.
class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
virtual bool IsBranchNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor1_ == NULL && successor != NULL);
if (successor0_ == NULL) {
successor0_ = successor;
} else {
successor1_ = successor;
}
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
Node* predecessor_;
Node* successor0_;
Node* successor1_;
DISALLOW_COPY_AND_ASSIGN(BranchNode);
};
// Join nodes have arbitrarily many predecessors and a single successor.
class JoinNode: public Node {
public:
JoinNode() : predecessors_(2), successor_(NULL) {}
static JoinNode* cast(Node* node) {
ASSERT(node->IsJoinNode());
return reinterpret_cast<JoinNode*>(node);
}
virtual bool IsJoinNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(JoinNode);
};
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
public:
static FlowGraph Empty() {
FlowGraph graph;
graph.entry_ = new BlockNode();
graph.exit_ = graph.entry_;
return graph;
}
bool is_empty() const {
return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
}
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
#ifdef DEBUG
void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
#endif
private:
FlowGraph() : entry_(NULL), exit_(NULL) {}
Node* entry_;
Node* exit_;
};
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor {
public:
explicit FlowGraphBuilder(int variable_count)
: graph_(FlowGraph::Empty()),
global_exit_(NULL),
preorder_(4),
postorder_(4),
variable_count_(variable_count),
body_definitions_(4) {
}
void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* preorder() { return &preorder_; }
ZoneList<Node*>* postorder() { return &postorder_; }
ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
private:
ExitNode* global_exit() { return global_exit_; }
// Helpers to allow tranforming the ast during flow graph construction.
void VisitStatements(ZoneList<Statement*>* stmts);
Statement* ProcessStatement(Statement* stmt);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
FlowGraph graph_;
ExitNode* global_exit_;
ZoneList<Node*> preorder_;
ZoneList<Node*> postorder_;
// The flow graph builder collects a list of explicit definitions
// (assignments and count operations) to stack-allocated variables to use
// for reaching definitions analysis. It does not count the implicit
// definition at function entry. AST node numbers in the AST are used to
// refer into this list.
int variable_count_;
ZoneList<Expression*> body_definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
// This class is used to number all expressions in the AST according to // This class is used to number all expressions in the AST according to
// their evaluation order (post-order left-to-right traversal). // their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor { class AstLabeler: public AstVisitor {

588
deps/v8/src/flow-graph.cc

@ -0,0 +1,588 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "flow-graph.h"
namespace v8 {
namespace internal {
void FlowGraph::AppendInstruction(AstNode* instruction) {
// Add a (non-null) AstNode to the end of the graph fragment.
ASSERT(instruction != NULL);
if (exit()->IsExitNode()) return;
if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
BlockNode::cast(exit())->AddInstruction(instruction);
}
void FlowGraph::AppendNode(Node* node) {
// Add a node to the end of the graph. An empty block is added to
// maintain edge-split form (that no join nodes or exit nodes as
// successors to branch nodes).
ASSERT(node != NULL);
if (exit()->IsExitNode()) return;
if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
AppendNode(new BlockNode());
}
exit()->AddSuccessor(node);
node->AddPredecessor(exit());
exit_ = node;
}
void FlowGraph::AppendGraph(FlowGraph* graph) {
// Add a flow graph fragment to the end of this one. An empty block is
// added to maintain edge-split form (that no join nodes or exit nodes as
// successors to branch nodes).
ASSERT(graph != NULL);
if (exit()->IsExitNode()) return;
Node* node = graph->entry();
if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
AppendNode(new BlockNode());
}
exit()->AddSuccessor(node);
node->AddPredecessor(exit());
exit_ = graph->exit();
}
void FlowGraph::Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* join) {
// Add the branch node, left flowgraph, join node.
AppendNode(branch);
AppendGraph(left);
AppendNode(join);
// Splice in the right flowgraph.
right->AppendNode(join);
branch->AddSuccessor(right->entry());
right->entry()->AddPredecessor(branch);
}
void FlowGraph::Loop(JoinNode* join,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body) {
// Add the join, condition and branch. Add join's predecessors in
// left-to-right order.
AppendNode(join);
body->AppendNode(join);
AppendGraph(condition);
AppendNode(branch);
// Splice in the body flowgraph.
branch->AddSuccessor(body->entry());
body->entry()->AddPredecessor(branch);
}
void ExitNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
preorder->Add(this);
postorder->Add(this);
}
void BlockNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void BranchNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor0_ != NULL && successor1_ != NULL);
preorder->Add(this);
if (!successor1_->IsMarkedWith(mark)) {
successor1_->MarkWith(mark);
successor1_->Traverse(mark, preorder, postorder);
}
if (!successor0_->IsMarkedWith(mark)) {
successor0_->MarkWith(mark);
successor0_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void JoinNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void FlowGraphBuilder::Build(FunctionLiteral* lit) {
global_exit_ = new ExitNode();
VisitStatements(lit->body());
if (HasStackOverflow()) return;
// The graph can end with a branch node (if the function ended with a
// loop). Maintain edge-split form (no join nodes or exit nodes as
// successors to branch nodes).
if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
graph_.AppendNode(global_exit_);
// Build preorder and postorder traversal orders. All the nodes in
// the graph have the same mark flag. For the traversal, use that
// flag's negation. Traversal will flip all the flags.
bool mark = graph_.entry()->IsMarkedWith(false);
graph_.entry()->MarkWith(mark);
graph_.entry()->Traverse(mark, &preorder_, &postorder_);
}
// This function peels off one iteration of a for-loop. The return value
// is either a block statement containing the peeled loop or NULL in case
// there is a stack overflow.
static Statement* PeelForLoop(ForStatement* stmt) {
// Mark this for-statement as processed.
stmt->set_peel_this_loop(false);
// Create new block containing the init statement of the for-loop and
// an if-statement containing the peeled iteration and the original
// loop without the init-statement.
Block* block = new Block(NULL, 2, false);
if (stmt->init() != NULL) {
Statement* init = stmt->init();
// The init statement gets the statement position of the for-loop
// to make debugging of peeled loops possible.
init->set_statement_pos(stmt->statement_pos());
block->AddStatement(init);
}
// Copy the condition.
CopyAstVisitor copy_visitor;
Expression* cond_copy = stmt->cond() != NULL
? copy_visitor.DeepCopyExpr(stmt->cond())
: new Literal(Factory::true_value());
if (copy_visitor.HasStackOverflow()) return NULL;
// Construct a block with the peeled body and the rest of the for-loop.
Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
if (copy_visitor.HasStackOverflow()) return NULL;
Statement* next_copy = stmt->next() != NULL
? copy_visitor.DeepCopyStmt(stmt->next())
: new EmptyStatement();
if (copy_visitor.HasStackOverflow()) return NULL;
Block* peeled_body = new Block(NULL, 3, false);
peeled_body->AddStatement(body_copy);
peeled_body->AddStatement(next_copy);
peeled_body->AddStatement(stmt);
// Remove the duplicated init statement from the for-statement.
stmt->set_init(NULL);
// Create new test at the top and add it to the newly created block.
IfStatement* test = new IfStatement(cond_copy,
peeled_body,
new EmptyStatement());
block->AddStatement(test);
return block;
}
void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) {
stmts->at(i) = ProcessStatement(stmts->at(i));
}
}
Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
if (FLAG_loop_peeling &&
stmt->AsForStatement() != NULL &&
stmt->AsForStatement()->peel_this_loop()) {
Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
if (tmp_stmt == NULL) {
SetStackOverflow();
} else {
stmt = tmp_stmt;
}
}
Visit(stmt);
return stmt;
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FlowGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
// Nothing to do.
}
void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
Visit(stmt->condition());
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
FlowGraph left = graph_;
graph_ = FlowGraph::Empty();
stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
if (HasStackOverflow()) return;
JoinNode* join = new JoinNode();
original.Split(branch, &left, &graph_, join);
graph_ = original;
}
void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
JoinNode* join = new JoinNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
if (stmt->cond() != NULL) Visit(stmt->cond());
BranchNode* branch = new BranchNode();
FlowGraph condition = graph_;
graph_ = FlowGraph::Empty();
stmt->set_body(ProcessStatement(stmt->body()));
if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
if (HasStackOverflow()) return;
original.Loop(join, &condition, branch, &graph_);
graph_ = original;
}
void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitConditional(Conditional* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitLiteral(Literal* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// Left-hand side can be a variable or property (or reference error) but
// not both.
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (expr->is_compound()) Visit(expr->target());
Visit(expr->value());
if (var->IsStackAllocated()) {
// The first definition in the body is numbered n, where n is the
// number of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
}
} else if (prop != NULL) {
Visit(prop->obj());
if (!prop->key()->IsPropertyName()) Visit(prop->key());
Visit(expr->value());
}
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitThrow(Throw* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitProperty(Property* expr) {
Visit(expr->obj());
if (!expr->key()->IsPropertyName()) Visit(expr->key());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCall(Call* expr) {
Visit(expr->expression());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::BIT_NOT:
case Token::DELETE:
case Token::TYPEOF:
case Token::VOID:
SetStackOverflow();
break;
case Token::ADD:
case Token::SUB:
Visit(expr->expression());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var != NULL && var->IsStackAllocated()) {
// The first definition in the body is numbered n, where n is the number
// of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
}
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
case Token::OR:
case Token::AND:
SetStackOverflow();
break;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::SAR:
Visit(expr->left());
Visit(expr->right());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
switch (expr->op()) {
case Token::EQ:
case Token::NE:
case Token::EQ_STRICT:
case Token::NE_STRICT:
case Token::INSTANCEOF:
case Token::IN:
SetStackOverflow();
break;
case Token::LT:
case Token::GT:
case Token::LTE:
case Token::GTE:
Visit(expr->left());
Visit(expr->right());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
SetStackOverflow();
}
} } // namespace v8::internal

379
deps/v8/src/flow-graph.h

@ -0,0 +1,379 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FLOW_GRAPH_H_
#define V8_FLOW_GRAPH_H_
#include "v8.h"
#include "data-flow.h"
#include "zone.h"
namespace v8 {
namespace internal {
// Flow-graph nodes.
class Node: public ZoneObject {
public:
Node() : number_(-1), mark_(false) {}
virtual ~Node() {}
virtual bool IsExitNode() { return false; }
virtual bool IsBlockNode() { return false; }
virtual bool IsBranchNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
virtual void AddSuccessor(Node* successor) = 0;
bool IsMarkedWith(bool mark) { return mark_ == mark; }
void MarkWith(bool mark) { mark_ = mark; }
// Perform a depth first search and record preorder and postorder
// traversal orders.
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) = 0;
int number() { return number_; }
void set_number(int number) { number_ = number; }
// Functions used by data-flow analyses.
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result) = 0;
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
// Functions used by dead-code elimination.
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG
void AssignNodeNumber();
void PrintReachingDefinitions();
virtual void PrintText() = 0;
#endif
protected:
ReachingDefinitionsData rd_;
private:
int number_;
bool mark_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
public:
ExitNode() : predecessors_(4) {}
virtual bool IsExitNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
DISALLOW_COPY_AND_ASSIGN(ExitNode);
};
// Block nodes have a single successor and predecessor and a list of
// instructions.
class BlockNode: public Node {
public:
BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
static BlockNode* cast(Node* node) {
ASSERT(node->IsBlockNode());
return reinterpret_cast<BlockNode*>(node);
}
virtual bool IsBlockNode() { return true; }
bool is_empty() { return instructions_.is_empty(); }
ZoneList<AstNode*>* instructions() { return &instructions_; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void AddInstruction(AstNode* instruction) {
instructions_.Add(instruction);
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
Node* predecessor_;
Node* successor_;
ZoneList<AstNode*> instructions_;
DISALLOW_COPY_AND_ASSIGN(BlockNode);
};
// Branch nodes have a single predecessor and a pair of successors.
class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
virtual bool IsBranchNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor1_ == NULL && successor != NULL);
if (successor0_ == NULL) {
successor0_ = successor;
} else {
successor1_ = successor;
}
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
Node* predecessor_;
Node* successor0_;
Node* successor1_;
DISALLOW_COPY_AND_ASSIGN(BranchNode);
};
// Join nodes have arbitrarily many predecessors and a single successor.
class JoinNode: public Node {
public:
JoinNode() : predecessors_(2), successor_(NULL) {}
static JoinNode* cast(Node* node) {
ASSERT(node->IsJoinNode());
return reinterpret_cast<JoinNode*>(node);
}
virtual bool IsJoinNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(JoinNode);
};
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
public:
static FlowGraph Empty() {
FlowGraph graph;
graph.entry_ = new BlockNode();
graph.exit_ = graph.entry_;
return graph;
}
bool is_empty() const {
return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
}
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
#ifdef DEBUG
void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
#endif
private:
FlowGraph() : entry_(NULL), exit_(NULL) {}
Node* entry_;
Node* exit_;
};
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor {
public:
explicit FlowGraphBuilder(int variable_count)
: graph_(FlowGraph::Empty()),
global_exit_(NULL),
preorder_(4),
postorder_(4),
variable_count_(variable_count),
body_definitions_(4) {
}
void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* preorder() { return &preorder_; }
ZoneList<Node*>* postorder() { return &postorder_; }
ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
private:
ExitNode* global_exit() { return global_exit_; }
// Helpers to allow tranforming the ast during flow graph construction.
void VisitStatements(ZoneList<Statement*>* stmts);
Statement* ProcessStatement(Statement* stmt);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
FlowGraph graph_;
ExitNode* global_exit_;
ZoneList<Node*> preorder_;
ZoneList<Node*> postorder_;
// The flow graph builder collects a list of explicit definitions
// (assignments and count operations) to stack-allocated variables to use
// for reaching definitions analysis. It does not count the implicit
// definition at function entry. AST node numbers in the AST are used to
// refer into this list.
int variable_count_;
ZoneList<Expression*> body_definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
} } // namespace v8::internal
#endif // V8_FLOW_GRAPH_H_

2
deps/v8/src/handles.cc

@ -541,7 +541,7 @@ int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
void CustomArguments::IterateInstance(ObjectVisitor* v) { void CustomArguments::IterateInstance(ObjectVisitor* v) {
v->VisitPointers(values_, values_ + 4); v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
} }

2
deps/v8/src/heap.cc

@ -2663,7 +2663,7 @@ Object* Heap::CopyJSObject(JSObject* source) {
FixedArray* elements = FixedArray::cast(source->elements()); FixedArray* elements = FixedArray::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties()); FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary. // Update elements if necessary.
if (elements->length()> 0) { if (elements->length() > 0) {
Object* elem = CopyFixedArray(elements); Object* elem = CopyFixedArray(elements);
if (elem->IsFailure()) return elem; if (elem->IsFailure()) return elem;
JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); JSObject::cast(clone)->set_elements(FixedArray::cast(elem));

2
deps/v8/src/heap.h

@ -938,6 +938,8 @@ class Heap : public AllStatic {
static void RecordStats(HeapStats* stats); static void RecordStats(HeapStats* stats);
static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
private: private:
static int reserved_semispace_size_; static int reserved_semispace_size_;
static int max_semispace_size_; static int max_semispace_size_;

17
deps/v8/src/ia32/codegen-ia32.cc

@ -7424,10 +7424,8 @@ void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
} }
} }
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// According to ECMA-262 section 11.11, page 58, the binary logical // According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions // operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value // before any ToBoolean() conversions. This means that the value
@ -7437,7 +7435,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// control flow), we force the right hand side to do the same. This // control flow), we force the right hand side to do the same. This
// is necessary because we assume that if we get control flow on the // is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths. // last path out of an expression we got it on all paths.
if (op == Token::AND) { if (node->op() == Token::AND) {
ASSERT(!in_safe_int32_mode()); ASSERT(!in_safe_int32_mode());
JumpTarget is_true; JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true); ControlDestination dest(&is_true, destination()->false_target(), true);
@ -7501,7 +7499,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
exit.Bind(); exit.Bind();
} }
} else if (op == Token::OR) { } else {
ASSERT(node->op() == Token::OR);
ASSERT(!in_safe_int32_mode()); ASSERT(!in_safe_int32_mode());
JumpTarget is_false; JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false); ControlDestination dest(destination()->true_target(), &is_false, false);
@ -7563,7 +7562,15 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// Exit (always with a materialized value). // Exit (always with a materialized value).
exit.Bind(); exit.Bind();
} }
}
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
if (node->op() == Token::AND || node->op() == Token::OR) {
GenerateLogicalBooleanOperation(node);
} else if (in_safe_int32_mode()) { } else if (in_safe_int32_mode()) {
Visit(node->left()); Visit(node->left());
Visit(node->right()); Visit(node->right());

3
deps/v8/src/ia32/codegen-ia32.h

@ -489,6 +489,9 @@ class CodeGenerator: public AstVisitor {
// control destination. // control destination.
void ToBoolean(ControlDestination* destination); void ToBoolean(ControlDestination* destination);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation( void GenericBinaryOperation(
Token::Value op, Token::Value op,
StaticType* type, StaticType* type,

24
deps/v8/src/ia32/ic-ia32.cc

@ -73,11 +73,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check for the absence of an interceptor. // Check for the absence of an interceptor.
// Load the map into r0. // Load the map into r0.
__ mov(r0, FieldOperand(receiver, JSObject::kMapOffset)); __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
__ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
// Jump to miss if the interceptor bit is set. // Bail out if the receiver has a named interceptor.
__ test(FieldOperand(r0, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label, not_taken); __ j(not_zero, miss_label, not_taken);
// Bail out if we have a JS global proxy object. // Bail out if we have a JS global proxy object.
@ -202,17 +201,10 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ xor_(r0, Operand(r1)); __ xor_(r0, Operand(r1));
// Compute capacity mask. // Compute capacity mask.
const int kCapacityOffset = __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
NumberDictionary::kHeaderSize +
NumberDictionary::kCapacityIndex * kPointerSize;
__ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int __ shr(r1, kSmiTagSize); // convert smi to int
__ dec(r1); __ dec(r1);
const int kElementsStartOffset =
NumberDictionary::kHeaderSize +
NumberDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before giving up. // Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4; const int kProbes = 4;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
@ -232,7 +224,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ cmp(key, FieldOperand(elements, __ cmp(key, FieldOperand(elements,
r2, r2,
times_pointer_size, times_pointer_size,
kElementsStartOffset)); NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) { if (i != (kProbes - 1)) {
__ j(equal, &done, taken); __ j(equal, &done, taken);
} else { } else {
@ -242,14 +234,16 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ bind(&done); __ bind(&done);
// Check that the value is a normal propety. // Check that the value is a normal propety.
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0); ASSERT_EQ(NORMAL, 0);
__ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss); __ j(not_zero, miss);
// Get the value at the masked, scaled index. // Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize; const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); __ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
} }

29
deps/v8/src/ia32/stub-cache-ia32.cc

@ -276,14 +276,15 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder, Register holder,
Register name, Register name,
JSObject* holder_obj) { JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
__ push(name); __ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor)); ASSERT(!Heap::InNewSpace(interceptor));
__ mov(receiver, Immediate(Handle<Object>(interceptor))); Register scratch = name;
__ mov(scratch, Immediate(Handle<Object>(interceptor)));
__ push(scratch);
__ push(receiver); __ push(receiver);
__ push(FieldOperand(receiver, InterceptorInfo::kDataOffset)); __ push(holder);
__ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
} }
@ -1045,17 +1046,16 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver); // receiver __ push(receiver); // receiver
__ push(reg); // holder __ push(reg); // holder
__ mov(other, Immediate(callback_handle)); __ mov(other, Immediate(callback_handle));
__ push(other);
__ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name __ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer. // Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const Arguments& to the C++ callback. // This will be passed as the const AccessorInfo& to the C++ callback.
__ mov(eax, esp); __ mov(eax, esp);
__ add(Operand(eax), Immediate(5 * kPointerSize)); __ add(Operand(eax), Immediate(4 * kPointerSize));
__ mov(ebx, esp); __ mov(ebx, esp);
// Do call through the api. // Do call through the api.
ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace); ASSERT_EQ(5, ApiGetterEntryStub::kStackSpace);
Address getter_address = v8::ToCData<Address>(callback->getter()); Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address); ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun); ApiGetterEntryStub stub(callback_handle, &fun);
@ -1251,8 +1251,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss); __ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin. if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update, Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
attempt_to_grow_elements, finish_push;
// Get the array's length into eax and calculate new length. // Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1278,8 +1277,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(ecx, Operand(esp, argc * kPointerSize)); __ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx); __ mov(Operand(edx, 0), ecx);
__ bind(&finish_push);
// Check if value is a smi. // Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask)); __ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update); __ j(not_zero, &with_rset_update);
@ -1318,10 +1315,13 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// We fit and could grow elements. // We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx); __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
__ mov(ecx, Operand(esp, argc * kPointerSize)); __ mov(ecx, Operand(esp, argc * kPointerSize));
// Push the argument...
__ mov(Operand(edx, 0), ecx); __ mov(Operand(edx, 0), ecx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) { for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize), __ mov(Operand(edx, i * kPointerSize),
Immediate(Factory::undefined_value())); Immediate(Factory::the_hole_value()));
} }
// Restore receiver to edx as finish sequence assumes it's here. // Restore receiver to edx as finish sequence assumes it's here.
@ -1332,7 +1332,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
Immediate(kAllocationDelta)); Immediate(kAllocationDelta));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ jmp(&finish_push); // Elements are in new space, so no remembered set updates are necessary.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin); __ bind(&call_builtin);
} }

58
deps/v8/src/oprofile-agent.cc

@ -32,10 +32,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef ENABLE_OPROFILE_AGENT
op_agent_t OProfileAgent::handle_ = NULL;
#endif
bool OProfileAgent::Initialize() { bool OProfileAgent::Initialize() {
#ifdef ENABLE_OPROFILE_AGENT #ifdef ENABLE_OPROFILE_AGENT
@ -70,47 +66,43 @@ void OProfileAgent::TearDown() {
} }
#ifdef ENABLE_OPROFILE_AGENT
op_agent_t OProfileAgent::handle_ = NULL;
void OProfileAgent::CreateNativeCodeRegion(const char* name, void OProfileAgent::CreateNativeCodeRegion(const char* name,
const void* ptr, unsigned int size) { const void* ptr, unsigned int size) {
#ifdef ENABLE_OPROFILE_AGENT
if (handle_ == NULL) return;
op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size); op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
#endif
} }
void OProfileAgent::CreateNativeCodeRegion(String* name, void OProfileAgent::CreateNativeCodeRegion(String* name,
const void* ptr, unsigned int size) { const void* ptr, unsigned int size) {
#ifdef ENABLE_OPROFILE_AGENT const char* func_name;
if (handle_ != NULL) { SmartPointer<char> str =
const char* func_name; name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> str = func_name = name->length() > 0 ? *str : "<anonymous>";
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); CreateNativeCodeRegion(func_name, ptr, size);
func_name = name->length() > 0 ? *str : "<anonymous>";
CreateNativeCodeRegion(func_name, ptr, size);
}
#endif
} }
void OProfileAgent::CreateNativeCodeRegion(String* name, String* source, void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
int line_num, const void* ptr, unsigned int size) { int line_num, const void* ptr, unsigned int size) {
#ifdef ENABLE_OPROFILE_AGENT Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
if (handle_ != NULL) { const char* func_name;
Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize); SmartPointer<char> str =
const char* func_name; name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> str = func_name = name->length() > 0 ? *str : "<anonymous>";
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); SmartPointer<char> source_str =
func_name = name->length() > 0 ? *str : "<anonymous>"; source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> source_str = if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); func_name, *source_str, line_num) != -1) {
if (v8::internal::OS::SNPrintF(buf, "%s %s:%d", CreateNativeCodeRegion(buf.start(), ptr, size);
func_name, *source_str, line_num) != -1) { } else {
CreateNativeCodeRegion(buf.start(), ptr, size); CreateNativeCodeRegion("<script/func name too long>", ptr, size);
} else {
CreateNativeCodeRegion("<script/func name too long>", ptr, size);
}
} }
#endif
} }
} }
#endif // ENABLE_OPROFILE_AGENT
} } // namespace v8::internal

10
deps/v8/src/oprofile-agent.h

@ -37,6 +37,14 @@
// system headers (they have __uint64_t), but is defined // system headers (they have __uint64_t), but is defined
// in V8's headers. // in V8's headers.
#include <opagent.h> // NOLINT #include <opagent.h> // NOLINT
#define OPROFILE(Call) \
do { \
if (v8::internal::OProfileAgent::is_enabled()) \
v8::internal::OProfileAgent::Call; \
} while (false)
#else
#define OPROFILE(Call) ((void) 0)
#endif #endif
namespace v8 { namespace v8 {
@ -46,13 +54,13 @@ class OProfileAgent {
public: public:
static bool Initialize(); static bool Initialize();
static void TearDown(); static void TearDown();
#ifdef ENABLE_OPROFILE_AGENT
static void CreateNativeCodeRegion(const char* name, static void CreateNativeCodeRegion(const char* name,
const void* ptr, unsigned int size); const void* ptr, unsigned int size);
static void CreateNativeCodeRegion(String* name, static void CreateNativeCodeRegion(String* name,
const void* ptr, unsigned int size); const void* ptr, unsigned int size);
static void CreateNativeCodeRegion(String* name, String* source, int line_num, static void CreateNativeCodeRegion(String* name, String* source, int line_num,
const void* ptr, unsigned int size); const void* ptr, unsigned int size);
#ifdef ENABLE_OPROFILE_AGENT
static bool is_enabled() { return handle_ != NULL; } static bool is_enabled() { return handle_ != NULL; }
private: private:

2
deps/v8/src/platform.h

@ -505,7 +505,6 @@ class Socket {
}; };
#ifdef ENABLE_LOGGING_AND_PROFILING
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Sampler // Sampler
// //
@ -533,6 +532,7 @@ class TickSample {
int frames_count; // Number of captured frames. int frames_count; // Number of captured frames.
}; };
#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler { class Sampler {
public: public:
// Initialize sampler. // Initialize sampler.

36
deps/v8/src/stub-cache.cc

@ -782,6 +782,10 @@ Object* StoreCallbackProperty(Arguments args) {
return *value; return *value;
} }
static const int kAccessorInfoOffsetInInterceptorArgs = 2;
/** /**
* Attempts to load a property with an interceptor (which must be present), * Attempts to load a property with an interceptor (which must be present),
* but doesn't search the prototype chain. * but doesn't search the prototype chain.
@ -790,11 +794,12 @@ Object* StoreCallbackProperty(Arguments args) {
* provide any value for the given name. * provide any value for the given name.
*/ */
Object* LoadPropertyWithInterceptorOnly(Arguments args) { Object* LoadPropertyWithInterceptorOnly(Arguments args) {
JSObject* receiver_handle = JSObject::cast(args[0]); Handle<String> name_handle = args.at<String>(0);
JSObject* holder_handle = JSObject::cast(args[1]); Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
Handle<String> name_handle = args.at<String>(2); ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3); ASSERT(args[2]->IsJSObject()); // Receiver.
Object* data_handle = args[4]; ASSERT(args[3]->IsJSObject()); // Holder.
ASSERT(args.length() == 5); // Last arg is data object.
Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter = v8::NamedPropertyGetter getter =
@ -803,8 +808,8 @@ Object* LoadPropertyWithInterceptorOnly(Arguments args) {
{ {
// Use the interceptor getter. // Use the interceptor getter.
CustomArguments args(data_handle, receiver_handle, holder_handle); v8::AccessorInfo info(args.arguments() -
v8::AccessorInfo info(args.end()); kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope; HandleScope scope;
v8::Handle<v8::Value> r; v8::Handle<v8::Value> r;
{ {
@ -842,11 +847,12 @@ static Object* ThrowReferenceError(String* name) {
static Object* LoadWithInterceptor(Arguments* args, static Object* LoadWithInterceptor(Arguments* args,
PropertyAttributes* attrs) { PropertyAttributes* attrs) {
Handle<JSObject> receiver_handle = args->at<JSObject>(0); Handle<String> name_handle = args->at<String>(0);
Handle<JSObject> holder_handle = args->at<JSObject>(1); Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
Handle<String> name_handle = args->at<String>(2); ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3); Handle<JSObject> receiver_handle = args->at<JSObject>(2);
Handle<Object> data_handle = args->at<Object>(4); Handle<JSObject> holder_handle = args->at<JSObject>(3);
ASSERT(args->length() == 5); // Last arg is data object.
Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter = v8::NamedPropertyGetter getter =
@ -855,8 +861,8 @@ static Object* LoadWithInterceptor(Arguments* args,
{ {
// Use the interceptor getter. // Use the interceptor getter.
CustomArguments args(*data_handle, *receiver_handle, *holder_handle); v8::AccessorInfo info(args->arguments() -
v8::AccessorInfo info(args.end()); kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope; HandleScope scope;
v8::Handle<v8::Value> r; v8::Handle<v8::Value> r;
{ {
@ -891,7 +897,7 @@ Object* LoadPropertyWithInterceptorForLoad(Arguments args) {
// If the property is present, return it. // If the property is present, return it.
if (attr != ABSENT) return result; if (attr != ABSENT) return result;
return ThrowReferenceError(String::cast(args[2])); return ThrowReferenceError(String::cast(args[0]));
} }

9
deps/v8/src/utils.h

@ -581,11 +581,12 @@ static inline void MemsetPointer(T** dest, T* value, int counter) {
#endif #endif
#if defined(__GNUC__) && defined(STOS) #if defined(__GNUC__) && defined(STOS)
asm("cld;" asm volatile(
"cld;"
"rep ; " STOS "rep ; " STOS
: /* no output */ : "+&c" (counter), "+&D" (dest)
: "c" (counter), "a" (value), "D" (dest) : "a" (value)
: /* no clobbered list as all inputs are considered clobbered */); : "memory", "cc");
#else #else
for (int i = 0; i < counter; i++) { for (int i = 0; i < counter; i++) {
dest[i] = value; dest[i] = value;

4
deps/v8/src/version.cc

@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 1 #define MINOR_VERSION 1
#define BUILD_NUMBER 9 #define BUILD_NUMBER 10
#define PATCH_LEVEL 1 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the // Define SONAME to have the SCons build the put a specific SONAME into the

20
deps/v8/src/x64/codegen-x64.cc

@ -3281,13 +3281,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
} }
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// TODO(X64): This code was copied verbatim from codegen-ia32.
// Either find a reason to change it or move it to a shared location.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
// According to ECMA-262 section 11.11, page 58, the binary logical // According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions // operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value // before any ToBoolean() conversions. This means that the value
@ -3297,7 +3291,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// control flow), we force the right hand side to do the same. This // control flow), we force the right hand side to do the same. This
// is necessary because we assume that if we get control flow on the // is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths. // last path out of an expression we got it on all paths.
if (op == Token::AND) { if (node->op() == Token::AND) {
JumpTarget is_true; JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true); ControlDestination dest(&is_true, destination()->false_target(), true);
LoadCondition(node->left(), &dest, false); LoadCondition(node->left(), &dest, false);
@ -3360,7 +3354,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
exit.Bind(); exit.Bind();
} }
} else if (op == Token::OR) { } else {
ASSERT(node->op() == Token::OR);
JumpTarget is_false; JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false); ControlDestination dest(destination()->true_target(), &is_false, false);
LoadCondition(node->left(), &dest, false); LoadCondition(node->left(), &dest, false);
@ -3421,7 +3416,14 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// Exit (always with a materialized value). // Exit (always with a materialized value).
exit.Bind(); exit.Bind();
} }
}
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
if (node->op() == Token::AND || node->op() == Token::OR) {
GenerateLogicalBooleanOperation(node);
} else { } else {
// NOTE: The code below assumes that the slow cases (calls to runtime) // NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object. // never return a constant/immutable object.

3
deps/v8/src/x64/codegen-x64.h

@ -454,6 +454,9 @@ class CodeGenerator: public AstVisitor {
// control destination. // control destination.
void ToBoolean(ControlDestination* destination); void ToBoolean(ControlDestination* destination);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation( void GenericBinaryOperation(
Token::Value op, Token::Value op,
StaticType* type, StaticType* type,

26
deps/v8/src/x64/ic-x64.cc

@ -72,11 +72,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check for the absence of an interceptor. // Check for the absence of an interceptor.
// Load the map into r0. // Load the map into r0.
__ movq(r0, FieldOperand(r1, JSObject::kMapOffset)); __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
__ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
// Jump to miss if the interceptor bit is set. // Bail out if the receiver has a named interceptor.
__ testl(FieldOperand(r0, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label); __ j(not_zero, miss_label);
// Bail out if we have a JS global proxy object. // Bail out if we have a JS global proxy object.
@ -201,17 +200,10 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ xorl(r0, r1); __ xorl(r0, r1);
// Compute capacity mask. // Compute capacity mask.
const int kCapacityOffset = __ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r1, FieldOperand(elements, kCapacityOffset));
__ SmiToInteger32(r1, r1); __ SmiToInteger32(r1, r1);
__ decl(r1); __ decl(r1);
const int kElementsStartOffset =
NumberDictionary::kHeaderSize +
NumberDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before giving up. // Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4; const int kProbes = 4;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
@ -231,7 +223,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ cmpq(key, FieldOperand(elements, __ cmpq(key, FieldOperand(elements,
r2, r2,
times_pointer_size, times_pointer_size,
kElementsStartOffset)); NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) { if (i != (kProbes - 1)) {
__ j(equal, &done); __ j(equal, &done);
} else { } else {
@ -241,14 +233,16 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ bind(&done); __ bind(&done);
// Check that the value is a normal propety. // Check that the value is a normal propety.
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0); ASSERT_EQ(NORMAL, 0);
__ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::mask())); Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss); __ j(not_zero, miss);
// Get the value at the masked, scaled index. // Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize; const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); __ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
} }
@ -1404,7 +1398,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Check for non-global object that requires access check. // Check for non-global object that requires access check.
__ testl(FieldOperand(rbx, Map::kBitFieldOffset), __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded)); Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss); __ j(not_zero, &miss);
// Search the dictionary placing the result in rax. // Search the dictionary placing the result in rax.

7
deps/v8/src/x64/stub-cache-x64.cc

@ -138,14 +138,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder, Register holder,
Register name, Register name,
JSObject* holder_obj) { JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
__ push(name); __ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor)); ASSERT(!Heap::InNewSpace(interceptor));
__ movq(kScratchRegister, Handle<Object>(interceptor), __ Move(kScratchRegister, Handle<Object>(interceptor));
RelocInfo::EMBEDDED_OBJECT);
__ push(kScratchRegister); __ push(kScratchRegister);
__ push(receiver);
__ push(holder);
__ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset)); __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
} }

62
deps/v8/test/cctest/test-api.cc

@ -2644,6 +2644,36 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
} }
THREADED_TEST(NamedInterceptorDictionaryIC) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
// Create an object with a named interceptor.
context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
Local<Script> script = Script::Compile(v8_str("interceptor_obj.x"));
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
}
// Create a slow case object and a function accessing a property in
// that slow case object (with dictionary probing in generated
// code). Then force object with a named interceptor into slow-case,
// pass it to the function, and check that the interceptor is called
// instead of accessing the local property.
Local<Value> result =
CompileRun("function get_x(o) { return o.x; };"
"var obj = { x : 42, y : 0 };"
"delete obj.y;"
"for (var i = 0; i < 10; i++) get_x(obj);"
"interceptor_obj.x = 42;"
"interceptor_obj.y = 10;"
"delete interceptor_obj.y;"
"get_x(interceptor_obj)");
CHECK_EQ(result, v8_str("x"));
}
static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property, static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property,
const AccessorInfo& info) { const AccessorInfo& info) {
// Set x on the prototype object and do not handle the get request. // Set x on the prototype object and do not handle the get request.
@ -5974,6 +6004,38 @@ THREADED_TEST(InterceptorLoadICInvalidatedField) {
} }
static int interceptor_load_not_handled_calls = 0;
static v8::Handle<Value> InterceptorLoadNotHandled(Local<String> name,
const AccessorInfo& info) {
++interceptor_load_not_handled_calls;
return v8::Handle<v8::Value>();
}
// Test how post-interceptor lookups are done in the non-cacheable
// case: the interceptor should not be invoked during this lookup.
THREADED_TEST(InterceptorLoadICPostInterceptor) {
interceptor_load_not_handled_calls = 0;
CheckInterceptorLoadIC(InterceptorLoadNotHandled,
"receiver = new Object();"
"receiver.__proto__ = o;"
"proto = new Object();"
"/* Make proto a slow-case object. */"
"for (var i = 0; i < 1000; i++) {"
" proto[\"xxxxxxxx\" + i] = [];"
"}"
"proto.x = 17;"
"o.__proto__ = proto;"
"var result = 0;"
"for (var i = 0; i < 1000; i++) {"
" result += receiver.x;"
"}"
"result;",
17 * 1000);
CHECK_EQ(1000, interceptor_load_not_handled_calls);
}
// Test the case when we stored field into // Test the case when we stored field into
// a stub, but it got invalidated later on due to override on // a stub, but it got invalidated later on due to override on
// global object which is between interceptor and fields' holders. // global object which is between interceptor and fields' holders.

87
deps/v8/test/cctest/test-conversions.cc

@ -41,6 +41,10 @@ TEST(Octal) {
CHECK_EQ(7.0, StringToDouble("07", ALLOW_HEX)); CHECK_EQ(7.0, StringToDouble("07", ALLOW_HEX));
CHECK_EQ(10.0, StringToDouble("010", ALLOW_HEX)); CHECK_EQ(10.0, StringToDouble("010", ALLOW_HEX));
CHECK_EQ(77.0, StringToDouble("077", ALLOW_HEX)); CHECK_EQ(77.0, StringToDouble("077", ALLOW_HEX));
const double x = 010000000000; // Power of 2, no rounding errors.
CHECK_EQ(x * x * x * x * x, StringToDouble("01" "0000000000" "0000000000"
"0000000000" "0000000000" "0000000000", ALLOW_OCTALS));
} }
@ -80,6 +84,8 @@ TEST(MalformedOctal) {
TEST(TrailingJunk) { TEST(TrailingJunk) {
CHECK_EQ(8.0, StringToDouble("8q", ALLOW_TRAILING_JUNK)); CHECK_EQ(8.0, StringToDouble("8q", ALLOW_TRAILING_JUNK));
CHECK_EQ(63.0, StringToDouble("077qqq", ALLOW_OCTALS | ALLOW_TRAILING_JUNK)); CHECK_EQ(63.0, StringToDouble("077qqq", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
CHECK_EQ(10.0, StringToDouble("10e", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
CHECK_EQ(10.0, StringToDouble("10e-", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
} }
@ -91,6 +97,87 @@ TEST(NonStrDecimalLiteral) {
CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS)); CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS));
} }
TEST(IntegerStrLiteral) {
CHECK_EQ(0.0, StringToDouble("0.0", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble("0", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble("00", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble("000", NO_FLAGS));
CHECK_EQ(1.0, StringToDouble("1", NO_FLAGS));
CHECK_EQ(-1.0, StringToDouble("-1", NO_FLAGS));
CHECK_EQ(-1.0, StringToDouble(" - 1 ", NO_FLAGS));
CHECK_EQ(1.0, StringToDouble(" + 1 ", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble("0e0", ALLOW_HEX | ALLOW_OCTALS));
CHECK_EQ(0.0, StringToDouble("0e1", ALLOW_HEX | ALLOW_OCTALS));
CHECK_EQ(0.0, StringToDouble("0e-1", ALLOW_HEX | ALLOW_OCTALS));
CHECK_EQ(0.0, StringToDouble("0e-100000", ALLOW_HEX | ALLOW_OCTALS));
CHECK_EQ(0.0, StringToDouble("0e+100000", ALLOW_HEX | ALLOW_OCTALS));
CHECK_EQ(0.0, StringToDouble("0.", ALLOW_HEX | ALLOW_OCTALS));
}
TEST(LongNumberStr) {
CHECK_EQ(1e10, StringToDouble("1" "0000000000", NO_FLAGS));
CHECK_EQ(1e20, StringToDouble("1" "0000000000" "0000000000", NO_FLAGS));
CHECK_EQ(1e60, StringToDouble("1" "0000000000" "0000000000" "0000000000"
"0000000000" "0000000000" "0000000000", NO_FLAGS));
CHECK_EQ(1e-2, StringToDouble("." "0" "1", NO_FLAGS));
CHECK_EQ(1e-11, StringToDouble("." "0000000000" "1", NO_FLAGS));
CHECK_EQ(1e-21, StringToDouble("." "0000000000" "0000000000" "1", NO_FLAGS));
CHECK_EQ(1e-61, StringToDouble("." "0000000000" "0000000000" "0000000000"
"0000000000" "0000000000" "0000000000" "1", NO_FLAGS));
// x = 24414062505131248.0 and y = 24414062505131252.0 are representable in
// double. Check chat z = (x + y) / 2 is rounded to x...
CHECK_EQ(24414062505131248.0,
StringToDouble("24414062505131250.0", NO_FLAGS));
// ... and z = (x + y) / 2 + delta is rounded to y.
CHECK_EQ(24414062505131252.0,
StringToDouble("24414062505131250.000000001", NO_FLAGS));
}
extern "C" double gay_strtod(const char* s00, const char** se);
TEST(MaximumSignificantDigits) {
char num[] =
"4.4501477170144020250819966727949918635852426585926051135169509"
"122872622312493126406953054127118942431783801370080830523154578"
"251545303238277269592368457430440993619708911874715081505094180"
"604803751173783204118519353387964161152051487413083163272520124"
"606023105869053620631175265621765214646643181420505164043632222"
"668006474326056011713528291579642227455489682133472873831754840"
"341397809846934151055619529382191981473003234105366170879223151"
"087335413188049110555339027884856781219017754500629806224571029"
"581637117459456877330110324211689177656713705497387108207822477"
"584250967061891687062782163335299376138075114200886249979505279"
"101870966346394401564490729731565935244123171539810221213221201"
"847003580761626016356864581135848683152156368691976240370422601"
"6998291015625000000000000000000000000000000000e-308";
CHECK_EQ(gay_strtod(num, NULL), StringToDouble(num, NO_FLAGS));
// Changes the result of strtod (at least in glibc implementation).
num[sizeof(num) - 8] = '1';
CHECK_EQ(gay_strtod(num, NULL), StringToDouble(num, NO_FLAGS));
}
TEST(ExponentNumberStr) {
CHECK_EQ(1e1, StringToDouble("1e1", NO_FLAGS));
CHECK_EQ(1e1, StringToDouble("1e+1", NO_FLAGS));
CHECK_EQ(1e-1, StringToDouble("1e-1", NO_FLAGS));
CHECK_EQ(1e100, StringToDouble("1e+100", NO_FLAGS));
CHECK_EQ(1e-100, StringToDouble("1e-100", NO_FLAGS));
CHECK_EQ(1e-106, StringToDouble(".000001e-100", NO_FLAGS));
}
class OneBit1: public BitField<uint32_t, 0, 1> {}; class OneBit1: public BitField<uint32_t, 0, 1> {};
class OneBit2: public BitField<uint32_t, 7, 1> {}; class OneBit2: public BitField<uint32_t, 7, 1> {};
class EightBit1: public BitField<uint32_t, 0, 8> {}; class EightBit1: public BitField<uint32_t, 0, 8> {};

112
deps/v8/test/cctest/test-heap.cc

@ -852,3 +852,115 @@ TEST(LargeObjectSpaceContains) {
CHECK(Heap::new_space()->Contains(addr)); CHECK(Heap::new_space()->Contains(addr));
CHECK(!Heap::lo_space()->Contains(addr)); CHECK(!Heap::lo_space()->Contains(addr));
} }
TEST(EmptyHandleEscapeFrom) {
InitializeVM();
v8::HandleScope scope;
Handle<JSObject> runaway;
{
v8::HandleScope nested;
Handle<JSObject> empty;
runaway = empty.EscapeFrom(&nested);
}
CHECK(runaway.is_null());
}
static int LenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
TEST(Regression39128) {
// Test case for crbug.com/39128.
InitializeVM();
// Increase the chance of 'bump-the-pointer' allocation in old space.
bool force_compaction = true;
Heap::CollectAllGarbage(force_compaction);
v8::HandleScope scope;
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
// that only bits pertaining to the object are updated in remembered set.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(Top::global_context()->object_function());
CHECK(object_ctor->has_initial_map());
Handle<Map> object_map(object_ctor->initial_map());
// Create a map with single inobject property.
Handle<Map> my_map = Factory::CopyMap(object_map, 1);
int n_properties = my_map->inobject_properties();
CHECK_GT(n_properties, 0);
int object_size = my_map->instance_size();
// Step 2: allocate a lot of objects so to almost fill new space: we need
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
Heap::MaxObjectSizeInNewSpace());
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = Heap::new_space();
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
CHECK(!Heap::always_allocate());
Object* array = Heap::AllocateFixedArray(allocation_len);
CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
}
// Step 3: now allocate fixed array and JSObject to fill the whole new space.
int to_fill = *limit_addr - *top_addr - object_size;
int fixed_array_len = LenFromSize(to_fill);
CHECK(fixed_array_len < FixedArray::kMaxLength);
CHECK(!Heap::always_allocate());
Object* array = Heap::AllocateFixedArray(fixed_array_len);
CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
Object* object = Heap::AllocateJSObjectFromMap(*my_map);
CHECK(!object->IsFailure());
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, jsobject->elements()->length());
CHECK_EQ(0, jsobject->properties()->length());
// Create a reference to object in new space in jsobject.
jsobject->FastPropertyAtPut(-1, array);
CHECK_EQ(0L, (*limit_addr - *top_addr));
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
Address old_pointer_space_top = Heap::old_pointer_space()->top();
AlwaysAllocateScope aa_scope;
Object* clone_obj = Heap::CopyJSObject(jsobject);
CHECK(!object->IsFailure());
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_pointer_space_top) {
// Alas, got allocated from free list, we cannot do checks.
return;
}
CHECK(Heap::old_pointer_space()->Contains(clone->address()));
// Step 5: verify validity of remembered set.
Address clone_addr = clone->address();
Page* page = Page::FromAddress(clone_addr);
// Check that remembered set tracks a reference from inobject property 1.
CHECK(page->IsRSetSet(clone_addr, object_size - kPointerSize));
// Probe several addresses after the object.
for (int i = 0; i < 7; i++) {
int offset = object_size + i * kPointerSize;
if (clone_addr + offset >= page->ObjectAreaEnd()) {
break;
}
CHECK(!page->IsRSetSet(clone_addr, offset));
}
}

22
deps/v8/test/mjsunit/str-to-num.js

@ -29,6 +29,19 @@ function toNumber(val) {
return Number(val); return Number(val);
} }
function repeat(s, num) {
var result = '';
while (num > 0) {
if ((num & 1) != 0) result += s;
s += s;
num >>= 1;
}
return result;
}
assertEquals('0000000000', repeat('0', 10));
// assertEquals(, toNumber()); // assertEquals(, toNumber());
@ -61,6 +74,7 @@ assertEquals(Infinity, toNumber("+Infinity "), "+Infinity");
assertEquals(0, toNumber("0")); assertEquals(0, toNumber("0"));
assertEquals(0, toNumber("+0")); assertEquals(0, toNumber("+0"));
assertEquals(-0, toNumber("-0")); assertEquals(-0, toNumber("-0"));
assertEquals(-Infinity, 1 / toNumber("-0"));
assertEquals(1, toNumber("1")); assertEquals(1, toNumber("1"));
assertEquals(1, toNumber("+1")); assertEquals(1, toNumber("+1"));
@ -130,11 +144,16 @@ assertEquals(15, toNumber("0Xf"));
assertEquals(15, toNumber("0XF")); assertEquals(15, toNumber("0XF"));
assertEquals(0, toNumber("0x000")); assertEquals(0, toNumber("0x000"));
assertEquals(-Infinity, 1 / toNumber("-0x000"));
assertEquals(0, toNumber("0x000" + repeat('0', 1000)));
assertEquals(9, toNumber("0x009")); assertEquals(9, toNumber("0x009"));
assertEquals(10, toNumber("0x00a")); assertEquals(10, toNumber("0x00a"));
assertEquals(10, toNumber("0x00A")); assertEquals(10, toNumber("0x00A"));
assertEquals(15, toNumber("0x00f")); assertEquals(15, toNumber("0x00f"));
assertEquals(15, toNumber("0x00F")); assertEquals(15, toNumber("0x00F"));
assertEquals(Infinity, toNumber("0x" + repeat('0', 1000) + '1'
+ repeat('0', 1000)));
assertEquals(-Infinity, toNumber("-0x1" + repeat('0', 1000)));
assertEquals(0, toNumber("00")); assertEquals(0, toNumber("00"));
assertEquals(1, toNumber("01")); assertEquals(1, toNumber("01"));
@ -156,3 +175,6 @@ assertTrue(isNaN(toNumber("0x100 junk")), "0x100 junk");
assertTrue(isNaN(toNumber("100.0 junk")), "100.0 junk"); assertTrue(isNaN(toNumber("100.0 junk")), "100.0 junk");
assertTrue(isNaN(toNumber(".1e4 junk")), ".1e4 junk"); assertTrue(isNaN(toNumber(".1e4 junk")), ".1e4 junk");
assertTrue(isNaN(toNumber("Infinity junk")), "Infinity junk"); assertTrue(isNaN(toNumber("Infinity junk")), "Infinity junk");
assertTrue(isNaN(toNumber("1e")), "1e");
assertTrue(isNaN(toNumber("1e ")), "1e_");
assertTrue(isNaN(toNumber("1" + repeat('0', 1000) + 'junk')), "1e1000 junk");

2
deps/v8/tools/gyp/v8.gyp

@ -282,6 +282,8 @@
'../../src/flag-definitions.h', '../../src/flag-definitions.h',
'../../src/flags.cc', '../../src/flags.cc',
'../../src/flags.h', '../../src/flags.h',
'../../src/flow-graph.cc',
'../../src/flow-graph.h',
'../../src/frame-element.cc', '../../src/frame-element.cc',
'../../src/frame-element.h', '../../src/frame-element.h',
'../../src/frames-inl.h', '../../src/frames-inl.h',

8
deps/v8/tools/visual_studio/v8_base.vcproj

@ -464,6 +464,14 @@
RelativePath="..\..\src\flags.h" RelativePath="..\..\src\flags.h"
> >
</File> </File>
<File
RelativePath="..\..\src\flow-graph.cc"
>
</File>
<File
RelativePath="..\..\src\flow-graph.h"
>
</File>
<File <File
RelativePath="..\..\src\frame-element.cc" RelativePath="..\..\src\frame-element.cc"
> >

8
deps/v8/tools/visual_studio/v8_base_arm.vcproj

@ -448,6 +448,14 @@
RelativePath="..\..\src\flags.h" RelativePath="..\..\src\flags.h"
> >
</File> </File>
<File
RelativePath="..\..\src\flow-graph.cc"
>
</File>
<File
RelativePath="..\..\src\flow-graph.h"
>
</File>
<File <File
RelativePath="..\..\src\frame-element.cc" RelativePath="..\..\src\frame-element.cc"
> >

8
deps/v8/tools/visual_studio/v8_base_x64.vcproj

@ -440,6 +440,14 @@
RelativePath="..\..\src\flags.h" RelativePath="..\..\src\flags.h"
> >
</File> </File>
<File
RelativePath="..\..\src\flow-graph.cc"
>
</File>
<File
RelativePath="..\..\src\flow-graph.h"
>
</File>
<File <File
RelativePath="..\..\src\frame-element.cc" RelativePath="..\..\src\frame-element.cc"
> >

Loading…
Cancel
Save