From 60040a4f366d436d759729530f4d19e7e1958e51 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Mon, 16 Jan 2012 14:37:57 -0800 Subject: [PATCH] Upgrade V8 to 3.8.6 --- deps/v8/ChangeLog | 19 + deps/v8/include/v8.h | 20 +- deps/v8/src/api.cc | 34 +- deps/v8/src/arm/assembler-arm-inl.h | 9 +- deps/v8/src/arm/assembler-arm.cc | 6 +- deps/v8/src/arm/assembler-arm.h | 10 +- deps/v8/src/arm/builtins-arm.cc | 13 +- deps/v8/src/arm/code-stubs-arm.cc | 44 +- deps/v8/src/arm/cpu-arm.cc | 2 +- deps/v8/src/arm/deoptimizer-arm.cc | 7 +- deps/v8/src/arm/full-codegen-arm.cc | 2 +- deps/v8/src/arm/lithium-arm.cc | 42 +- deps/v8/src/arm/lithium-arm.h | 14 +- deps/v8/src/arm/lithium-codegen-arm.cc | 78 ++- deps/v8/src/arm/lithium-codegen-arm.h | 10 +- deps/v8/src/arm/lithium-gap-resolver-arm.cc | 21 +- deps/v8/src/arm/macro-assembler-arm.cc | 119 +++-- deps/v8/src/arm/macro-assembler-arm.h | 34 +- deps/v8/src/arm/simulator-arm.cc | 8 +- deps/v8/src/arm/stub-cache-arm.cc | 44 +- deps/v8/src/assembler.h | 2 +- deps/v8/src/atomicops_internals_x86_macosx.h | 44 +- deps/v8/src/bootstrapper.cc | 152 +++--- deps/v8/src/bootstrapper.h | 2 +- deps/v8/src/builtins.cc | 2 +- deps/v8/src/builtins.h | 2 +- deps/v8/src/code-stubs.cc | 6 +- deps/v8/src/compiler.cc | 2 +- deps/v8/src/cpu-profiler.cc | 4 +- deps/v8/src/cpu-profiler.h | 6 +- deps/v8/src/cpu.h | 2 +- deps/v8/src/d8-debug.cc | 2 +- deps/v8/src/d8.cc | 189 +++++-- deps/v8/src/d8.h | 3 +- deps/v8/src/debug-debugger.js | 2 +- deps/v8/src/debug.cc | 12 +- deps/v8/src/debug.h | 6 +- deps/v8/src/elements.cc | 19 +- deps/v8/src/execution.cc | 4 +- deps/v8/src/execution.h | 2 +- deps/v8/src/factory.cc | 48 +- deps/v8/src/factory.h | 17 +- deps/v8/src/flag-definitions.h | 8 +- deps/v8/src/frames.cc | 5 +- deps/v8/src/full-codegen.cc | 4 +- deps/v8/src/gdb-jit.cc | 44 +- deps/v8/src/handles.cc | 162 ------ deps/v8/src/handles.h | 65 --- deps/v8/src/heap-inl.h | 2 +- deps/v8/src/heap-profiler.cc | 2 +- deps/v8/src/heap-profiler.h | 2 +- deps/v8/src/heap.cc | 103 ++-- deps/v8/src/heap.h | 31 +- deps/v8/src/hydrogen-instructions.cc | 25 +- deps/v8/src/hydrogen-instructions.h | 45 +- deps/v8/src/hydrogen.cc | 99 +++- deps/v8/src/hydrogen.h | 2 +- deps/v8/src/ia32/assembler-ia32.cc | 6 +- deps/v8/src/ia32/builtins-ia32.cc | 2 +- deps/v8/src/ia32/code-stubs-ia32.cc | 37 +- deps/v8/src/ia32/cpu-ia32.cc | 2 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 2 +- deps/v8/src/ia32/full-codegen-ia32.cc | 2 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 94 +++- deps/v8/src/ia32/lithium-codegen-ia32.h | 9 +- deps/v8/src/ia32/lithium-ia32.cc | 55 +- deps/v8/src/ia32/lithium-ia32.h | 19 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 124 +++-- deps/v8/src/ia32/macro-assembler-ia32.h | 20 +- deps/v8/src/ia32/stub-cache-ia32.cc | 42 +- deps/v8/src/ic.cc | 6 +- deps/v8/src/incremental-marking.cc | 2 +- deps/v8/src/incremental-marking.h | 5 +- deps/v8/src/inspector.cc | 4 +- deps/v8/src/inspector.h | 8 +- deps/v8/src/isolate.cc | 59 +- deps/v8/src/isolate.h | 7 + deps/v8/src/json-parser.h | 5 +- deps/v8/src/jsregexp.cc | 2 +- deps/v8/src/lithium-allocator.cc | 4 +- deps/v8/src/lithium.h | 14 +- deps/v8/src/liveedit.cc | 4 +- deps/v8/src/liveobjectlist-inl.h | 4 +- deps/v8/src/liveobjectlist.cc | 48 +- deps/v8/src/liveobjectlist.h | 10 +- deps/v8/src/log.cc | 6 +- deps/v8/src/log.h | 6 +- deps/v8/src/mark-compact.cc | 1 + deps/v8/src/mips/assembler-mips-inl.h | 2 +- deps/v8/src/mips/assembler-mips.cc | 6 +- deps/v8/src/mips/builtins-mips.cc | 13 +- deps/v8/src/mips/code-stubs-mips.cc | 37 +- deps/v8/src/mips/constants-mips.h | 4 +- deps/v8/src/mips/cpu-mips.cc | 2 +- deps/v8/src/mips/deoptimizer-mips.cc | 7 +- deps/v8/src/mips/full-codegen-mips.cc | 2 +- deps/v8/src/mips/lithium-codegen-mips.cc | 25 +- deps/v8/src/mips/lithium-codegen-mips.h | 8 +- deps/v8/src/mips/lithium-gap-resolver-mips.cc | 4 +- deps/v8/src/mips/lithium-mips.cc | 40 +- deps/v8/src/mips/lithium-mips.h | 14 +- deps/v8/src/mips/macro-assembler-mips.cc | 92 ++-- deps/v8/src/mips/macro-assembler-mips.h | 18 +- deps/v8/src/mips/simulator-mips.cc | 10 +- deps/v8/src/mips/stub-cache-mips.cc | 4 +- deps/v8/src/objects-debug.cc | 9 +- deps/v8/src/objects-inl.h | 38 +- deps/v8/src/objects-printer.cc | 9 + deps/v8/src/objects.cc | 502 +++++++++++++----- deps/v8/src/objects.h | 232 +++++++- deps/v8/src/parser.cc | 22 +- deps/v8/src/platform-cygwin.cc | 4 +- deps/v8/src/platform-freebsd.cc | 2 +- deps/v8/src/platform-linux.cc | 4 +- deps/v8/src/platform-macos.cc | 2 +- deps/v8/src/platform-nullos.cc | 2 +- deps/v8/src/platform-openbsd.cc | 4 +- deps/v8/src/platform-posix.cc | 2 +- deps/v8/src/platform-solaris.cc | 2 +- deps/v8/src/platform-win32.cc | 4 +- deps/v8/src/platform.h | 8 +- deps/v8/src/preparser.h | 2 +- deps/v8/src/profile-generator.cc | 54 +- deps/v8/src/profile-generator.h | 14 +- deps/v8/src/runtime-profiler.cc | 10 +- deps/v8/src/runtime-profiler.h | 6 +- deps/v8/src/runtime.cc | 253 +++++---- deps/v8/src/scopes.cc | 2 +- deps/v8/src/spaces.cc | 29 +- deps/v8/src/spaces.h | 45 +- deps/v8/src/store-buffer.cc | 2 +- deps/v8/src/store-buffer.h | 2 +- deps/v8/src/stub-cache.cc | 35 +- deps/v8/src/type-info.cc | 6 +- deps/v8/src/type-info.h | 2 +- deps/v8/src/utils.h | 8 +- deps/v8/src/v8.cc | 12 +- deps/v8/src/v8.h | 2 +- deps/v8/src/v8globals.h | 3 +- deps/v8/src/version.cc | 2 +- deps/v8/src/win32-headers.h | 2 + deps/v8/src/x64/assembler-x64-inl.h | 2 +- deps/v8/src/x64/assembler-x64.cc | 6 +- deps/v8/src/x64/builtins-x64.cc | 4 +- deps/v8/src/x64/code-stubs-x64.cc | 29 +- deps/v8/src/x64/cpu-x64.cc | 2 +- deps/v8/src/x64/deoptimizer-x64.cc | 2 +- deps/v8/src/x64/full-codegen-x64.cc | 2 +- deps/v8/src/x64/lithium-codegen-x64.cc | 98 +++- deps/v8/src/x64/lithium-codegen-x64.h | 10 +- deps/v8/src/x64/lithium-x64.cc | 49 +- deps/v8/src/x64/lithium-x64.h | 14 +- deps/v8/src/x64/macro-assembler-x64.cc | 120 +++-- deps/v8/src/x64/macro-assembler-x64.h | 24 +- deps/v8/src/x64/stub-cache-x64.cc | 40 +- deps/v8/test/cctest/SConscript | 1 + deps/v8/test/cctest/cctest.h | 2 +- deps/v8/test/cctest/test-alloc.cc | 4 +- deps/v8/test/cctest/test-api.cc | 118 +++- deps/v8/test/cctest/test-assembler-x64.cc | 14 +- deps/v8/test/cctest/test-cpu-profiler.cc | 4 +- deps/v8/test/cctest/test-debug.cc | 16 +- deps/v8/test/cctest/test-disasm-arm.cc | 14 +- deps/v8/test/cctest/test-disasm-mips.cc | 6 +- deps/v8/test/cctest/test-hashing.cc | 153 ++++-- deps/v8/test/cctest/test-heap.cc | 96 ++++ deps/v8/test/cctest/test-platform-linux.cc | 2 +- deps/v8/test/cctest/test-platform-win32.cc | 2 +- deps/v8/test/cctest/test-sockets.cc | 2 +- deps/v8/test/cctest/test-spaces.cc | 18 +- deps/v8/test/cctest/test-utils.cc | 2 +- deps/v8/test/mjsunit/external-array.js | 46 +- deps/v8/test/mjsunit/math-min-max.js | 64 +++ .../v8/test/mjsunit/regress/regress-109195.js | 65 +++ deps/v8/test/mjsunit/regress/regress-1898.js | 37 ++ deps/v8/tools/grokdump.py | 59 +- deps/v8/tools/ll_prof.py | 8 +- 177 files changed, 3329 insertions(+), 1703 deletions(-) create mode 100644 deps/v8/test/mjsunit/regress/regress-109195.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1898.js diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 059b69252e..1c1bddda82 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,22 @@ +2012-01-16: Version 3.8.6 + + Add primitive WebGL array support to d8. + + Improve heap size estimation (issue 1893). + + Hash collision DOS workaround extended from string keys + to numeric keys. + + Provide an API for iterating through all external strings referenced + from the JS heap. + + Adjust position recorded for call expressions. http://crbug.com/109195 + + Fix GC crash related to instanceof. http://crbug.com/109448 + + Performance improvements and bug fixes. + + 2012-01-05: Version 3.8.5 Fix broken test that assumes that no GC can clear the regexp cache (GC diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 1819943580..294b14d386 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -2848,6 +2848,17 @@ class V8EXPORT StartupDataDecompressor { // NOLINT */ typedef bool (*EntropySource)(unsigned char* buffer, size_t length); + +/** + * Interface for iterating though all external resources in the heap. + */ +class V8EXPORT ExternalResourceVisitor { // NOLINT + public: + virtual ~ExternalResourceVisitor() {} + virtual void VisitExternalString(Handle string) {} +}; + + /** * Container class for static utility functions. */ @@ -3203,6 +3214,13 @@ class V8EXPORT V8 { */ static void GetHeapStatistics(HeapStatistics* heap_statistics); + /** + * Iterates through all external resources referenced from current isolate + * heap. This method is not expected to be used except for debugging purposes + * and may be quite slow. + */ + static void VisitExternalResources(ExternalResourceVisitor* visitor); + /** * Optional notification that the embedder is idle. * V8 uses the notification to reduce memory footprint. @@ -3816,7 +3834,7 @@ class Internals { static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; - static const int kJSObjectType = 0xa6; + static const int kJSObjectType = 0xa7; static const int kFirstNonstringType = 0x80; static const int kForeignType = 0x85; diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 3bd5a3180f..bac3069308 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -2165,6 +2165,11 @@ bool Value::IsInt32() const { if (obj->IsSmi()) return true; if (obj->IsNumber()) { double value = obj->Number(); + static const i::DoubleRepresentation minus_zero(-0.0); + i::DoubleRepresentation rep(value); + if (rep.bits == minus_zero.bits) { + return false; + } return i::FastI2D(i::FastD2I(value)) == value; } return false; @@ -2177,6 +2182,11 @@ bool Value::IsUint32() const { if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0; if (obj->IsNumber()) { double value = obj->Number(); + static const i::DoubleRepresentation minus_zero(-0.0); + i::DoubleRepresentation rep(value); + if (rep.bits == minus_zero.bits) { + return false; + } return i::FastUI2D(i::FastD2UI(value)) == value; } return false; @@ -2739,7 +2749,7 @@ bool v8::Object::Set(uint32_t index, v8::Handle value) { i::Handle self = Utils::OpenHandle(this); i::Handle value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); - i::Handle obj = i::SetElement( + i::Handle obj = i::JSObject::SetElement( self, index, value_obj, @@ -2845,7 +2855,7 @@ Local v8::Object::GetPrototype() { return Local()); ENTER_V8(isolate); i::Handle self = Utils::OpenHandle(this); - i::Handle result = i::GetPrototype(self); + i::Handle result(self->GetPrototype()); return Utils::ToLocal(result); } @@ -2999,7 +3009,7 @@ bool v8::Object::Delete(v8::Handle key) { i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); - return i::DeleteProperty(self, key_obj)->IsTrue(); + return i::JSObject::DeleteProperty(self, key_obj)->IsTrue(); } @@ -3020,7 +3030,7 @@ bool v8::Object::Delete(uint32_t index) { ENTER_V8(isolate); HandleScope scope; i::Handle self = Utils::OpenHandle(this); - return i::DeleteElement(self, index)->IsTrue(); + return i::JSObject::DeleteElement(self, index)->IsTrue(); } @@ -3225,7 +3235,7 @@ int v8::Object::GetIdentityHash() { ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); - return i::GetIdentityHash(self); + return i::JSObject::GetIdentityHash(self); } @@ -3238,7 +3248,8 @@ bool v8::Object::SetHiddenValue(v8::Handle key, i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); - i::Handle result = i::SetHiddenProperty(self, key_obj, value_obj); + i::Handle result = + i::JSObject::SetHiddenProperty(self, key_obj, value_obj); return *result == *self; } @@ -4038,6 +4049,13 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { } +void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::V8::VisitExternalResources"); + isolate->heap()->VisitExternalResources(visitor); +} + + bool v8::V8::IdleNotification(int hint) { // Returning true tells the caller that it need not // continue to call IdleNotification. @@ -5542,7 +5560,7 @@ void Debug::DisableAgent() { void Debug::ProcessDebugMessages() { - i::Execution::ProcessDebugMesssages(true); + i::Execution::ProcessDebugMessages(true); } Local Debug::GetDebugContext() { diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 79f9c7bd2b..2ec6c7cfa7 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -32,7 +32,7 @@ // The original source code covered by the above license above has been modified // significantly by Google Inc. -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_ #define V8_ARM_ASSEMBLER_ARM_INL_H_ @@ -46,6 +46,13 @@ namespace v8 { namespace internal { +int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { + ASSERT(!reg.is(kDoubleRegZero)); + ASSERT(!reg.is(kScratchDoubleReg)); + return reg.code(); +} + + void RelocInfo::apply(intptr_t delta) { if (RelocInfo::IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 329493a340..25922361a2 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -317,7 +317,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) own_buffer_ = false; } - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -349,7 +349,7 @@ void Assembler::GetCode(CodeDesc* desc) { CheckConstPool(true, false); ASSERT(num_pending_reloc_info_ == 0); - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -2446,7 +2446,7 @@ void Assembler::GrowBuffer() { } CHECK_GT(desc.buffer_size, 0); // no overflow - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 247479d730..e88739e497 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -32,7 +32,7 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // A light-weight ARM Assembler // Generates user mode instructions for the ARM architecture up to version 5 @@ -176,14 +176,11 @@ struct DwVfpRegister { static const int kNumAllocatableRegisters = kNumRegisters - kNumReservedRegisters; - static int ToAllocationIndex(DwVfpRegister reg) { - ASSERT(reg.code() != 0); - return reg.code() - 1; - } + inline static int ToAllocationIndex(DwVfpRegister reg); static DwVfpRegister FromAllocationIndex(int index) { ASSERT(index >= 0 && index < kNumAllocatableRegisters); - return from_code(index + 1); + return from_code(index); } static const char* AllocationIndexToString(int index) { @@ -307,6 +304,7 @@ const DwVfpRegister d15 = { 15 }; static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8; static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15; static const DwVfpRegister& kDoubleRegZero = d14; +static const DwVfpRegister& kScratchDoubleReg = d15; // Coprocessor register diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 69ef1872c7..2a650a44a5 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -333,7 +333,7 @@ static void ArrayNativeCode(MacroAssembler* masm, r5, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, r3, r4); - // Setup return value, remove receiver from stack and return. + // Set up return value, remove receiver from stack and return. __ mov(r0, r2); __ add(sp, sp, Operand(kPointerSize)); __ Jump(lr); @@ -376,7 +376,7 @@ static void ArrayNativeCode(MacroAssembler* masm, true, call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, r2, r4); - // Setup return value, remove receiver and argument from stack and return. + // Set up return value, remove receiver and argument from stack and return. __ mov(r0, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Jump(lr); @@ -951,10 +951,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // sp[4]: number of arguments (smi-tagged) __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - // Setup pointer to last argument. + // Set up pointer to last argument. __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - // Setup number of arguments for function call below + // Set up number of arguments for function call below __ mov(r0, Operand(r3, LSR, kSmiTagSize)); // Copy arguments and receiver to the expression stack. @@ -1082,10 +1082,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Set up the context from the function argument. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_array_start = - ExternalReference::roots_array_start(masm->isolate()); - __ mov(r10, Operand(roots_array_start)); + __ InitializeRootRegister(); // Push the function and the receiver onto the stack. __ push(r1); diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index e95e2cf421..15ef9bcf9d 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -156,13 +156,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Load the function from the stack. __ ldr(r3, MemOperand(sp, 0)); - // Setup the object header. + // Set up the object header. __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ mov(r2, Operand(Smi::FromInt(length))); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - // Setup the fixed slots. + // Set up the fixed slots. __ mov(r1, Operand(Smi::FromInt(0))); __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); @@ -207,7 +207,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Load the serialized scope info from the stack. __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ mov(r2, Operand(Smi::FromInt(length))); @@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); @@ -717,7 +717,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); - // Get mantisssa[51:20]. + // Get mantissa[51:20]. // Get the position of the first set bit. __ CountLeadingZeros(dst1, int_scratch, scratch2); @@ -951,7 +951,7 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // non zero bits left. So we need the (30 - exponent) last bits of the // 31 higher bits of the mantissa to be null. // Because bits [21:0] are null, we can check instead that the - // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. + // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, @@ -3842,7 +3842,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); - // Setup argc and the builtin function in callee-saved registers. + // Set up argc and the builtin function in callee-saved registers. __ mov(r4, Operand(r0)); __ mov(r5, Operand(r1)); @@ -3919,7 +3919,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r2: receiver // r3: argc - // Setup argv in r4. + // Set up argv in r4. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; if (CpuFeatures::IsSupported(VFP3)) { offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; @@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ ldr(r5, MemOperand(r5)); __ Push(r8, r7, r6, r5); - // Setup frame pointer for the frame to be pushed. + // Set up frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); // If this is the outermost JS call, set js_entry_sp value. @@ -4081,7 +4081,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register inline_site = r9; const Register scratch = r2; - const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; + const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; Label slow, loop, is_instance, is_not_instance, not_js_object; @@ -4132,7 +4132,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); - __ str(map, MemOperand(scratch)); + __ ldr(scratch, MemOperand(scratch)); + __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); } // Register mapping: r3 is object map and r4 is function prototype. @@ -4401,7 +4402,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ str(r3, FieldMemOperand(r0, i)); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); const int kCalleeOffset = JSObject::kHeaderSize + @@ -4414,7 +4415,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize; __ str(r2, FieldMemOperand(r0, kLengthOffset)); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, r4 will point there, otherwise // it will point to the backing store. __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); @@ -4509,7 +4510,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ Ret(); // Do the runtime call to allocate the arguments object. - // r2 = argument count (taggged) + // r2 = argument count (tagged) __ bind(&runtime); __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); @@ -4582,7 +4583,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); @@ -4594,7 +4595,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Copy the fixed array slots. Label loop; - // Setup r4 to point to the first array slot. + // Set up r4 to point to the first array slot. __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ bind(&loop); // Pre-decrement r2 with kPointerSize on each iteration. @@ -5209,7 +5210,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // of the original receiver from the call site). __ bind(&non_function); __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Setup the number of arguments. + __ mov(r0, Operand(argc_)); // Set up the number of arguments. __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ SetCallKind(r5, CALL_AS_METHOD); @@ -5730,7 +5731,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, Register hash, Register character) { // hash = character + (character << 10); - __ LoadRoot(hash, Heap::kStringHashSeedRootIndex); + __ LoadRoot(hash, Heap::kHashSeedRootIndex); // Untag smi seed and add the character. __ add(hash, character, Operand(hash, LSR, kSmiTagSize)); // hash += hash << 10; @@ -5759,13 +5760,12 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, // hash ^= hash >> 11; __ eor(hash, hash, Operand(hash, LSR, 11)); // hash += hash << 15; - __ add(hash, hash, Operand(hash, LSL, 15), SetCC); + __ add(hash, hash, Operand(hash, LSL, 15)); - uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1; - __ and_(hash, hash, Operand(kHashShiftCutOffMask)); + __ and_(hash, hash, Operand(String::kHashBitMask), SetCC); // if (hash == 0) hash = 27; - __ mov(hash, Operand(27), LeaveCC, eq); + __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); } diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 51cfeb6c87..7b08ed8c2f 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -41,7 +41,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 4b54b6dbc2..3689a9f6b6 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -319,7 +319,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code())); output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code())); @@ -723,10 +723,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(ip); // remove sp __ pop(ip); // remove lr - // Set up the roots register. - ExternalReference roots_array_start = - ExternalReference::roots_array_start(isolate); - __ mov(r10, Operand(roots_array_start)); + __ InitializeRootRegister(); __ pop(ip); // remove pc __ pop(r7); // get continuation, leave pc on stack diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 7e9a889116..38999a8e36 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1009,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(r0); // Map. __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset)); __ mov(r0, Operand(Smi::FromInt(0))); diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index b001ecada0..e063ef1132 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1038,14 +1038,23 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* v = instr->value(); - if (v->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(v)->ToBoolean() + HValue* value = instr->value(); + if (value->EmitAtUses()) { + HBasicBlock* successor = HConstant::cast(value)->ToBoolean() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new LGoto(successor->block_id()); } - return AssignEnvironment(new LBranch(UseRegister(v))); + + LBranch* result = new LBranch(UseRegister(value)); + // Tagged values that are not known smis or booleans require a + // deoptimization environment. + Representation rep = value->representation(); + HType type = value->type(); + if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) { + return AssignEnvironment(result); + } + return result; } @@ -1344,7 +1353,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { } else { left = UseRegisterAtStart(instr->LeastConstantOperand()); } - return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp))); + LMulI* mul = new LMulI(left, right, temp); + if (instr->CheckFlag(HValue::kCanOverflow) || + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + AssignEnvironment(mul); + } + return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); @@ -1413,6 +1427,15 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { } +LInstruction* LChunkBuilder::DoRandom(HRandom* instr) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->global_object()->representation().IsTagged()); + LOperand* global_object = UseFixed(instr->global_object(), r0); + LRandom* result = new LRandom(global_object); + return MarkAsCall(DefineFixedDouble(result, d7), instr); +} + + LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); @@ -1529,7 +1552,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LClassOfTestAndBranch(UseTempRegister(instr->value()), + return new LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister()); } @@ -1556,7 +1579,7 @@ LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LOperand* object = UseRegister(instr->value()); LValueOf* result = new LValueOf(object, TempRegister()); - return AssignEnvironment(DefineAsRegister(result)); + return DefineAsRegister(result); } @@ -1874,7 +1897,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement( LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterAtStart(instr->key()); LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key); - return AssignEnvironment(DefineAsRegister(result)); + if (instr->RequiresHoleCheck()) AssignEnvironment(result); + return DefineAsRegister(result); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 703666c4a1..d3aff76e18 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -141,6 +141,7 @@ class LCodeGen; V(Parameter) \ V(Power) \ V(PushArgument) \ + V(Random) \ V(RegExpLiteral) \ V(Return) \ V(ShiftI) \ @@ -1026,6 +1027,17 @@ class LPower: public LTemplateInstruction<1, 2, 0> { }; +class LRandom: public LTemplateInstruction<1, 1, 0> { + public: + explicit LRandom(LOperand* global_object) { + inputs_[0] = global_object; + } + + DECLARE_CONCRETE_INSTRUCTION(Random, "random") + DECLARE_HYDROGEN_ACCESSOR(Random) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index e1e35d251e..b5ed517087 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1994,7 +1994,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch( // Branches to a label or falls through with the answer in flags. Trashes -// the temp registers, but not the input. Only input and temp2 may alias. +// the temp registers, but not the input. void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, Handleclass_name, @@ -2002,7 +2002,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Register temp, Register temp2) { ASSERT(!input.is(temp)); - ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. + ASSERT(!input.is(temp2)); + ASSERT(!temp.is(temp2)); + __ JumpIfSmi(input, is_false); if (class_name->IsEqualTo(CStrVector("Function"))) { @@ -2141,7 +2143,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - __ mov(ip, Operand(factory()->the_hole_value())); + Handle cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ mov(ip, Operand(Handle(cell))); + __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); __ cmp(map, Operand(ip)); __ b(ne, &cache_miss); // We use Factory::the_hole_value() on purpose instead of loading from the @@ -2901,7 +2906,7 @@ void LCodeGen::CallKnownFunction(Handle function, __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ Call(ip); - // Setup deoptimization. + // Set up deoptimization. RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); // Restore context. @@ -3190,6 +3195,30 @@ void LCodeGen::DoPower(LPower* instr) { } +void LCodeGen::DoRandom(LRandom* instr) { + // Having marked this instruction as a call we can use any + // registers. + ASSERT(ToDoubleRegister(instr->result()).is(d7)); + ASSERT(ToRegister(instr->InputAt(0)).is(r0)); + + __ PrepareCallCFunction(1, scratch0()); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); + __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); + + // 0x41300000 is the top half of 1.0 x 2^20 as a double. + // Create this constant using mov/orr to avoid PC relative load. + __ mov(r1, Operand(0x41000000)); + __ orr(r1, r1, Operand(0x300000)); + // Move 0x41300000xxxxxxxx (x = random bits) to VFP. + __ vmov(d7, r0, r1); + // Move 0x4130000000000000 to VFP. + __ mov(r0, Operand(0, RelocInfo::NONE)); + __ vmov(d8, r0, r1); + // Subtract and store the result in the heap number. + __ vsub(d7, d7, d8); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::LOG, @@ -3874,6 +3903,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, DoubleRegister result_reg, bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, LEnvironment* env) { Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); @@ -3909,6 +3939,14 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Heap number to double register conversion. __ sub(ip, input_reg, Operand(kHeapObjectTag)); __ vldr(result_reg, ip, HeapNumber::kValueOffset); + if (deoptimize_on_minus_zero) { + __ vmov(ip, result_reg.low()); + __ cmp(ip, Operand(0)); + __ b(ne, &done); + __ vmov(ip, result_reg.high()); + __ cmp(ip, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(eq, env); + } __ jmp(&done); // Smi to double register conversion @@ -4042,6 +4080,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { EmitNumberUntagD(input_reg, result_reg, instr->hydrogen()->deoptimize_on_undefined(), + instr->hydrogen()->deoptimize_on_minus_zero(), instr->environment()); } @@ -4155,14 +4194,26 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { } +void LCodeGen::DoCheckMapCommon(Register reg, + Register scratch, + Handle map, + CompareMapMode mode, + LEnvironment* env) { + Label success; + __ CompareMap(reg, scratch, map, &success, mode); + DeoptimizeIf(ne, env); + __ bind(&success); +} + + void LCodeGen::DoCheckMap(LCheckMap* instr) { Register scratch = scratch0(); LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); Register reg = ToRegister(input); - __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(instr->hydrogen()->map())); - DeoptimizeIf(ne, instr->environment()); + Handle map = instr->hydrogen()->map(); + DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(), + instr->environment()); } @@ -4231,9 +4282,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { // Check prototype maps up to the holder. while (!current_prototype.is_identical_to(holder)) { - __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); - __ cmp(temp2, Operand(Handle(current_prototype->map()))); - DeoptimizeIf(ne, instr->environment()); + DoCheckMapCommon(temp1, temp2, + Handle(current_prototype->map()), + ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); current_prototype = Handle(JSObject::cast(current_prototype->GetPrototype())); // Load next prototype object. @@ -4241,8 +4292,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { } // Check the holder map. - __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); - __ cmp(temp2, Operand(Handle(current_prototype->map()))); + DoCheckMapCommon(temp1, temp2, + Handle(current_prototype->map()), + ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); DeoptimizeIf(ne, instr->environment()); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index e6626481b3..00823e1638 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -119,6 +119,9 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); + void DoCheckMapCommon(Register reg, Register scratch, Handle map, + CompareMapMode mode, LEnvironment* env); + // Parallel move support. void DoParallelMove(LParallelMove* move); void DoGap(LGap* instr); @@ -153,7 +156,7 @@ class LCodeGen BASE_EMBEDDED { HGraph* graph() const { return chunk_->graph(); } Register scratch0() { return r9; } - DwVfpRegister double_scratch0() { return d15; } + DwVfpRegister double_scratch0() { return kScratchDoubleReg; } int GetNextEmittedBlock(int block); LInstruction* GetNextInstruction(); @@ -270,6 +273,7 @@ class LCodeGen BASE_EMBEDDED { void EmitNumberUntagD(Register input, DoubleRegister result, bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, LEnvironment* env); // Emits optimized code for typeof x == "y". Modifies input register. @@ -408,7 +412,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index ee6cb8ec91..cefca476ad 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -34,7 +34,6 @@ namespace v8 { namespace internal { static const Register kSavedValueRegister = { 9 }; -static const DoubleRegister kSavedDoubleValueRegister = { 0 }; LGapResolver::LGapResolver(LCodeGen* owner) : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), @@ -172,9 +171,9 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source)); + __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source)); + __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); } @@ -193,11 +192,9 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { - __ vmov(cgen_->ToDoubleRegister(saved_destination_), - kSavedDoubleValueRegister); + __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { - __ vstr(kSavedDoubleValueRegister, - cgen_->ToMemOperand(saved_destination_)); + __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); } @@ -235,8 +232,8 @@ void LGapResolver::EmitMove(int index) { // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. - __ vldr(kSavedDoubleValueRegister.low(), source_operand); - __ vstr(kSavedDoubleValueRegister.low(), destination_operand); + __ vldr(kScratchDoubleReg.low(), source_operand); + __ vstr(kScratchDoubleReg.low(), destination_operand); } else { __ ldr(ip, source_operand); __ str(ip, destination_operand); @@ -297,8 +294,8 @@ void LGapResolver::EmitMove(int index) { __ ldr(kSavedValueRegister, source_high_operand); __ str(kSavedValueRegister, destination_high_operand); } else { - __ vldr(kSavedDoubleValueRegister, source_operand); - __ vstr(kSavedDoubleValueRegister, destination_operand); + __ vldr(kScratchDoubleReg, source_operand); + __ vstr(kScratchDoubleReg, destination_operand); } } } else { diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 59a5e5ba79..fa97611cf8 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -396,14 +396,14 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { - ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond); + ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index, Condition cond) { - str(source, MemOperand(roots, index << kPointerSizeLog2), cond); + str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -496,13 +496,10 @@ void MacroAssembler::RecordWrite(Register object, // registers are cp. ASSERT(!address.is(cp) && !value.is(cp)); - if (FLAG_debug_code) { - Label ok; + if (emit_debug_code()) { ldr(ip, MemOperand(address)); cmp(ip, value); - b(eq, &ok); - stop("Wrong address or value passed to RecordWrite"); - bind(&ok); + Check(eq, "Wrong address or value passed to RecordWrite"); } Label done; @@ -551,7 +548,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. SaveFPRegsMode fp_mode, RememberedSetFinalAction and_then) { Label done; - if (FLAG_debug_code) { + if (emit_debug_code()) { Label ok; JumpIfNotInNewSpace(object, scratch, &ok); stop("Remembered set pointer is in new space"); @@ -820,12 +817,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { - // Setup the frame structure on the stack. + // Set up the frame structure on the stack. ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); Push(lr, fp); - mov(fp, Operand(sp)); // Setup new frame pointer. + mov(fp, Operand(sp)); // Set up new frame pointer. // Reserve room for saved entry sp and code object. sub(sp, sp, Operand(2 * kPointerSize)); if (emit_debug_code()) { @@ -1414,6 +1411,35 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::GetNumberHash(Register t0, Register scratch) { + // First of all we assign the hash seed to scratch. + LoadRoot(scratch, Heap::kHashSeedRootIndex); + SmiUntag(scratch); + + // Xor original key with a seed. + eor(t0, t0, Operand(scratch)); + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + mvn(scratch, Operand(t0)); + add(t0, scratch, Operand(t0, LSL, 15)); + // hash = hash ^ (hash >> 12); + eor(t0, t0, Operand(t0, LSR, 12)); + // hash = hash + (hash << 2); + add(t0, t0, Operand(t0, LSL, 2)); + // hash = hash ^ (hash >> 4); + eor(t0, t0, Operand(t0, LSR, 4)); + // hash = hash * 2057; + mov(scratch, Operand(t0, LSL, 11)); + add(t0, t0, Operand(t0, LSL, 3)); + add(t0, t0, scratch); + // hash = hash ^ (hash >> 16); + eor(t0, t0, Operand(t0, LSR, 16)); +} + + void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements, Register key, @@ -1443,26 +1469,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, // t2 - used for the index into the dictionary. Label done; - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - mvn(t1, Operand(t0)); - add(t0, t1, Operand(t0, LSL, 15)); - // hash = hash ^ (hash >> 12); - eor(t0, t0, Operand(t0, LSR, 12)); - // hash = hash + (hash << 2); - add(t0, t0, Operand(t0, LSL, 2)); - // hash = hash ^ (hash >> 4); - eor(t0, t0, Operand(t0, LSR, 4)); - // hash = hash * 2057; - mov(t1, Operand(2057)); - mul(t0, t0, t1); - // hash = hash ^ (hash >> 16); - eor(t0, t0, Operand(t0, LSR, 16)); + GetNumberHash(t0, t1); // Compute the capacity mask. - ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); + ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int sub(t1, t1, Operand(1)); @@ -1473,17 +1483,17 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(t2, t0); // Compute the masked index: (hash + i + i * i) & mask. if (i > 0) { - add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i))); + add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); } and_(t2, t2, Operand(t1)); // Scale the index by multiplying by the element size. - ASSERT(NumberDictionary::kEntrySize == 3); + ASSERT(SeededNumberDictionary::kEntrySize == 3); add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 // Check if the key is identical to the name. add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); - ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset)); + ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); cmp(key, Operand(ip)); if (i != kProbes - 1) { b(eq, &done); @@ -1496,14 +1506,14 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, // Check that the value is a normal property. // t2: elements + (index * kPointerSize) const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; ldr(t1, FieldMemOperand(t2, kDetailsOffset)); tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); b(ne, miss); // Get the value at the masked, scaled index and return. const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; + SeededNumberDictionary::kElementsStartOffset + kPointerSize; ldr(result, FieldMemOperand(t2, kValueOffset)); } @@ -1992,18 +2002,49 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, } +void MacroAssembler::CompareMap(Register obj, + Register scratch, + Handle map, + Label* early_success, + CompareMapMode mode) { + ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); + cmp(scratch, Operand(map)); + if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { + Map* transitioned_fast_element_map( + map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); + ASSERT(transitioned_fast_element_map == NULL || + map->elements_kind() != FAST_ELEMENTS); + if (transitioned_fast_element_map != NULL) { + b(eq, early_success); + cmp(scratch, Operand(Handle(transitioned_fast_element_map))); + } + + Map* transitioned_double_map( + map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); + ASSERT(transitioned_double_map == NULL || + map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); + if (transitioned_double_map != NULL) { + b(eq, early_success); + cmp(scratch, Operand(Handle(transitioned_double_map))); + } + } +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle map, Label* fail, - SmiCheckType smi_check_type) { + SmiCheckType smi_check_type, + CompareMapMode mode) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } - ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - mov(ip, Operand(map)); - cmp(scratch, ip); + + Label success; + CompareMap(obj, scratch, map, &success, mode); b(ne, fail); + bind(&success); } @@ -3460,7 +3501,7 @@ void MacroAssembler::EnsureNotWhite( tst(mask_scratch, load_scratch); b(ne, &done); - if (FLAG_debug_code) { + if (emit_debug_code()) { // Check for impossible bit pattern. Label ok; // LSL may overflow, making the check conservative. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 392c2f7503..4b55a3b064 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -52,7 +52,7 @@ inline Operand SmiUntagOperand(Register object) { // Give alias names to registers const Register cp = { 8 }; // JavaScript context pointer -const Register roots = { 10 }; // Roots array pointer. +const Register kRootRegister = { 10 }; // Roots array pointer. // Flags used for the AllocateInNewSpace functions. enum AllocationFlags { @@ -499,10 +499,16 @@ class MacroAssembler: public Assembler { Register map, Register scratch); + void InitializeRootRegister() { + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(kRootRegister, Operand(roots_array_start)); + } + // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in ecx. The method takes ecx as an + // Set up call kind marking in ecx. The method takes ecx as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); @@ -584,6 +590,7 @@ class MacroAssembler: public Assembler { Register scratch, Label* miss); + void GetNumberHash(Register t0, Register scratch); void LoadFromNumberDictionary(Label* miss, Register elements, @@ -790,15 +797,26 @@ class MacroAssembler: public Assembler { Register scratch4, Label* fail); - // Check if the map of an object is equal to a specified map (either - // given directly or as an index into the root list) and branch to - // label if not. Skip the smi check if not required (object is known - // to be a heap object) + // Compare an object's map with the specified map and its transitioned + // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are + // set with result of map compare. If multiple map compares are required, the + // compare sequences branches to early_success. + void CompareMap(Register obj, + Register scratch, + Handle map, + Label* early_success, + CompareMapMode mode = REQUIRE_EXACT_MAP); + + // Check if the map of an object is equal to a specified map and branch to + // label if not. Skip the smi check if not required (object is known to be a + // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match + // against maps that are ElementsKind transition maps of the specificed map. void CheckMap(Register obj, Register scratch, Handle map, Label* fail, - SmiCheckType smi_check_type); + SmiCheckType smi_check_type, + CompareMapMode mode = REQUIRE_EXACT_MAP); void CheckMap(Register obj, diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 0525529fde..1ae172c008 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -741,7 +741,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { isolate_->set_simulator_i_cache(i_cache_); } Initialize(isolate); - // Setup simulator support first. Some of this information is needed to + // Set up simulator support first. Some of this information is needed to // setup the architecture state. size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack stack_ = reinterpret_cast(malloc(stack_size)); @@ -750,7 +750,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { break_pc_ = NULL; break_instr_ = 0; - // Setup architecture state. + // Set up architecture state. // All registers are initialized to zero to start with. for (int i = 0; i < num_registers; i++) { registers_[i] = 0; @@ -3324,7 +3324,7 @@ void Simulator::Execute() { int32_t Simulator::Call(byte* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); - // Setup arguments + // Set up arguments // First four arguments passed in registers. ASSERT(argument_count >= 4); @@ -3367,7 +3367,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { int32_t r10_val = get_register(r10); int32_t r11_val = get_register(r11); - // Setup the callee-saved registers with a known value. To be able to check + // Set up the callee-saved registers with a known value. To be able to check // that they are preserved properly across JS execution. int32_t callee_saved_value = icount_; set_register(r4, callee_saved_value); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index b6b2ee2f04..c3a82ff934 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -376,13 +376,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // r0 : value Label exit; - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver_reg, miss_label); - - // Check that the map of the receiver hasn't changed. - __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(Handle(object->map()))); - __ b(ne, miss_label); + // Check that the map of the object hasn't changed. + __ CheckMap(receiver_reg, scratch, Handle(object->map()), miss_label, + DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -1019,10 +1015,9 @@ Register StubCompiler::CheckPrototypes(Handle object, __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); } else { Handle current_map(current->map()); - __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(scratch1, Operand(current_map)); - // Branch on the result of the map check. - __ b(ne, miss); + __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. @@ -1053,9 +1048,8 @@ Register StubCompiler::CheckPrototypes(Handle object, LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); // Check the holder map. - __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ cmp(scratch1, Operand(Handle(current->map()))); - __ b(ne, miss); + __ CheckMap(reg, scratch1, Handle(current->map()), miss, + DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -1150,7 +1144,7 @@ void StubCompiler::GenerateLoadCallback(Handle object, __ EnterExitFrame(false, kApiStackSpace); // Create AccessorInfo instance on the stack above the exit frame with - // scratch2 (internal::Object **args_) as the data. + // scratch2 (internal::Object** args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& @@ -2411,7 +2405,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ str(r3, MemOperand(sp, argc * kPointerSize)); } - // Setup the context (function already in r1). + // Set up the context (function already in r1). __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). @@ -2472,13 +2466,9 @@ Handle StoreStubCompiler::CompileStoreCallback( // ----------------------------------- Label miss; - // Check that the object isn't a smi. - __ JumpIfSmi(r1, &miss); - // Check that the map of the object hasn't changed. - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r3, Operand(Handle(object->map()))); - __ b(ne, &miss); + __ CheckMap(r1, r3, Handle(object->map()), &miss, + DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -2520,13 +2510,9 @@ Handle StoreStubCompiler::CompileStoreInterceptor( // ----------------------------------- Label miss; - // Check that the object isn't a smi. - __ JumpIfSmi(r1, &miss); - // Check that the map of the object hasn't changed. - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r3, Operand(Handle(receiver->map()))); - __ b(ne, &miss); + __ CheckMap(r1, r3, Handle(receiver->map()), &miss, + DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (receiver->IsJSGlobalProxy()) { diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index cec20fca07..8c705a84b4 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -371,7 +371,7 @@ class RelocInfo BASE_EMBEDDED { // routines expect to access these pointers indirectly. The following // location provides a place for these pointers to exist natually // when accessed via the Iterator. - Object *reconstructed_obj_ptr_; + Object* reconstructed_obj_ptr_; // External-reference pointers are also split across instruction-pairs // in mips, but are accessed via indirect pointers. This location // provides a place for that pointer to exist naturally. Its address diff --git a/deps/v8/src/atomicops_internals_x86_macosx.h b/deps/v8/src/atomicops_internals_x86_macosx.h index 2bac006bdc..bfb02b3851 100644 --- a/deps/v8/src/atomicops_internals_x86_macosx.h +++ b/deps/v8/src/atomicops_internals_x86_macosx.h @@ -35,7 +35,7 @@ namespace v8 { namespace internal { -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; @@ -49,7 +49,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, return prev_value; } -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value; do { @@ -59,12 +59,12 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, return old_value; } -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { return OSAtomicAdd32(increment, const_cast(ptr)); } -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { return OSAtomicAdd32Barrier(increment, const_cast(ptr)); } @@ -73,7 +73,7 @@ inline void MemoryBarrier() { OSMemoryBarrier(); } -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; @@ -87,7 +87,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, return prev_value; } -inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return Acquire_CompareAndSwap(ptr, old_value, new_value); @@ -97,12 +97,12 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } -inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } @@ -111,13 +111,13 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } -inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } @@ -126,7 +126,7 @@ inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { // 64-bit implementation on 64-bit platform -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; @@ -140,7 +140,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, return prev_value; } -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 old_value; do { @@ -150,17 +150,17 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, return old_value; } -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { return OSAtomicAdd64(increment, const_cast(ptr)); } -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { return OSAtomicAdd64Barrier(increment, const_cast(ptr)); } -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; @@ -174,7 +174,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, return prev_value; } -inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { // The lib kern interface does not distinguish between @@ -186,12 +186,12 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } -inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } @@ -200,13 +200,13 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } -inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } @@ -264,7 +264,7 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, old_value, new_value); } -inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { +inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { NoBarrier_Store( reinterpret_cast(ptr), value); } @@ -279,7 +279,7 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { reinterpret_cast(ptr), value); } -inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { +inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { return NoBarrier_Load( reinterpret_cast(ptr)); } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index d1bf975fb5..752b220e5b 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -264,13 +264,13 @@ class Genesis BASE_EMBEDDED { Handle CreateStrictModeFunctionMap( PrototypePropertyMode prototype_mode, Handle empty_function, - Handle arguments_callbacks, - Handle caller_callbacks); + Handle arguments_callbacks, + Handle caller_callbacks); Handle ComputeStrictFunctionInstanceDescriptor( PrototypePropertyMode propertyMode, - Handle arguments, - Handle caller); + Handle arguments, + Handle caller); static bool CompileBuiltin(Isolate* isolate, int index); static bool CompileExperimentalBuiltin(Isolate* isolate, int index); @@ -378,7 +378,9 @@ static Handle InstallFunction(Handle target, } else { attributes = DONT_ENUM; } - SetLocalPropertyNoThrow(target, symbol, function, attributes); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + target, symbol, function, attributes)); if (is_ecma_native) { function->shared()->set_instance_class_name(*symbol); } @@ -538,8 +540,8 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { Handle Genesis::ComputeStrictFunctionInstanceDescriptor( PrototypePropertyMode prototypeMode, - Handle arguments, - Handle caller) { + Handle arguments, + Handle caller) { Handle descriptors = factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 @@ -600,7 +602,7 @@ Handle Genesis::GetThrowTypeErrorFunction() { throw_type_error_function->shared()->set_code(*code); throw_type_error_function->shared()->DontAdaptArguments(); - PreventExtensions(throw_type_error_function); + JSObject::PreventExtensions(throw_type_error_function); } return throw_type_error_function; } @@ -609,8 +611,8 @@ Handle Genesis::GetThrowTypeErrorFunction() { Handle Genesis::CreateStrictModeFunctionMap( PrototypePropertyMode prototype_mode, Handle empty_function, - Handle arguments_callbacks, - Handle caller_callbacks) { + Handle arguments_callbacks, + Handle caller_callbacks) { Handle map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); Handle descriptors = ComputeStrictFunctionInstanceDescriptor(prototype_mode, @@ -627,8 +629,8 @@ void Genesis::CreateStrictModeFunctionMaps(Handle empty) { // Create the callbacks arrays for ThrowTypeError functions. // The get/set callacks are filled in after the maps are created below. Factory* factory = empty->GetIsolate()->factory(); - Handle arguments = factory->NewFixedArray(2, TENURED); - Handle caller = factory->NewFixedArray(2, TENURED); + Handle arguments(factory->NewAccessorPair()); + Handle caller(factory->NewAccessorPair()); // Allocate map for the strict mode function instances. Handle strict_mode_function_instance_map = @@ -663,11 +665,11 @@ void Genesis::CreateStrictModeFunctionMaps(Handle empty) { Handle throw_function = GetThrowTypeErrorFunction(); - // Complete the callback fixed arrays. - arguments->set(0, *throw_function); - arguments->set(1, *throw_function); - caller->set(0, *throw_function); - caller->set(1, *throw_function); + // Complete the callbacks. + arguments->set_getter(*throw_function); + arguments->set_setter(*throw_function); + caller->set_getter(*throw_function); + caller->set_setter(*throw_function); } @@ -753,11 +755,10 @@ Handle Genesis::CreateNewGlobals( Handle prototype = Handle( JSObject::cast(js_global_function->instance_prototype())); - SetLocalPropertyNoThrow( - prototype, - factory()->constructor_symbol(), - isolate()->object_function(), - NONE); + CHECK_NOT_EMPTY_HANDLE(isolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + prototype, factory()->constructor_symbol(), + isolate()->object_function(), NONE)); } else { Handle js_global_constructor( FunctionTemplateInfo::cast(js_global_template->constructor())); @@ -834,7 +835,7 @@ void Genesis::HookUpInnerGlobal(Handle inner_global) { factory()->LookupAsciiSymbol("global"), inner_global, attributes); - // Setup the reference from the global object to the builtins object. + // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global); TransferNamedProperties(inner_global_from_snapshot, inner_global); TransferIndexedProperties(inner_global_from_snapshot, inner_global); @@ -863,8 +864,10 @@ void Genesis::InitializeGlobal(Handle inner_global, Heap* heap = isolate->heap(); Handle object_name = Handle(heap->Object_symbol()); - SetLocalPropertyNoThrow(inner_global, object_name, - isolate->object_function(), DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + inner_global, object_name, + isolate->object_function(), DONT_ENUM)); Handle global = Handle(global_context()->global()); @@ -1046,14 +1049,15 @@ void Genesis::InitializeGlobal(Handle inner_global, { // -- J S O N Handle name = factory->NewStringFromAscii(CStrVector("JSON")); - Handle cons = factory->NewFunction( - name, - factory->the_hole_value()); + Handle cons = factory->NewFunction(name, + factory->the_hole_value()); cons->SetInstancePrototype(global_context()->initial_object_prototype()); cons->SetInstanceClassName(*name); Handle json_object = factory->NewJSObject(cons, TENURED); ASSERT(json_object->IsJSObject()); - SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + global, name, json_object, DONT_ENUM)); global_context()->set_json_object(*json_object); } @@ -1083,12 +1087,14 @@ void Genesis::InitializeGlobal(Handle inner_global, global_context()->set_arguments_boilerplate(*result); // Note: length must be added as the first property and // callee must be added as the second property. - SetLocalPropertyNoThrow(result, factory->length_symbol(), - factory->undefined_value(), - DONT_ENUM); - SetLocalPropertyNoThrow(result, factory->callee_symbol(), - factory->undefined_value(), - DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + result, factory->length_symbol(), + factory->undefined_value(), DONT_ENUM)); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + result, factory->callee_symbol(), + factory->undefined_value(), DONT_ENUM)); #ifdef DEBUG LookupResult lookup(isolate); @@ -1136,17 +1142,17 @@ void Genesis::InitializeGlobal(Handle inner_global, static_cast(DONT_ENUM | DONT_DELETE | READ_ONLY); // Create the ThrowTypeError functions. - Handle callee = factory->NewFixedArray(2, TENURED); - Handle caller = factory->NewFixedArray(2, TENURED); + Handle callee = factory->NewAccessorPair(); + Handle caller = factory->NewAccessorPair(); Handle throw_function = GetThrowTypeErrorFunction(); // Install the ThrowTypeError functions. - callee->set(0, *throw_function); - callee->set(1, *throw_function); - caller->set(0, *throw_function); - caller->set(1, *throw_function); + callee->set_getter(*throw_function); + callee->set_setter(*throw_function); + caller->set_getter(*throw_function); + caller->set_setter(*throw_function); // Create the descriptor array for the arguments object. Handle descriptors = factory->NewDescriptorArray(3); @@ -1183,9 +1189,10 @@ void Genesis::InitializeGlobal(Handle inner_global, global_context()->set_strict_mode_arguments_boilerplate(*result); // Add length property only for strict mode boilerplate. - SetLocalPropertyNoThrow(result, factory->length_symbol(), - factory->undefined_value(), - DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + result, factory->length_symbol(), + factory->undefined_value(), DONT_ENUM)); #ifdef DEBUG LookupResult lookup(isolate); @@ -1353,7 +1360,7 @@ bool Genesis::CompileScriptCached(Vector name, if (cache != NULL) cache->Add(name, function_info); } - // Setup the function context. Conceptually, we should clone the + // Set up the function context. Conceptually, we should clone the // function before overwriting the context but since we're in a // single-threaded environment it is not strictly necessary. ASSERT(top_context->IsGlobalContext()); @@ -1440,7 +1447,7 @@ bool Genesis::InstallNatives() { builtins->set_global_context(*global_context()); builtins->set_global_receiver(*builtins); - // Setup the 'global' properties of the builtins object. The + // Set up the 'global' properties of the builtins object. The // 'global' property that refers to the global object is the only // way to get from code running in the builtins context to the // global object. @@ -1448,9 +1455,11 @@ bool Genesis::InstallNatives() { static_cast(READ_ONLY | DONT_DELETE); Handle global_symbol = factory()->LookupAsciiSymbol("global"); Handle global_obj(global_context()->global()); - SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes); + CHECK_NOT_EMPTY_HANDLE(isolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + builtins, global_symbol, global_obj, attributes)); - // Setup the reference from the global object to the builtins object. + // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins); // Create a bridge function that has context in the global context. @@ -1674,7 +1683,7 @@ bool Genesis::InstallNatives() { InstallNativeFunctions(); // Store the map for the string prototype after the natives has been compiled - // and the String function has been setup. + // and the String function has been set up. Handle string_function(global_context()->string_function()); ASSERT(JSObject::cast( string_function->initial_map()->prototype())->HasFastProperties()); @@ -1911,25 +1920,28 @@ bool Bootstrapper::InstallExtensions(Handle global_context, void Genesis::InstallSpecialObjects(Handle global_context) { - Factory* factory = global_context->GetIsolate()->factory(); + Isolate* isolate = global_context->GetIsolate(); + Factory* factory = isolate->factory(); HandleScope scope; - Handle js_global( - JSGlobalObject::cast(global_context->global())); + Handle global(JSGlobalObject::cast(global_context->global())); // Expose the natives in global if a name for it is specified. if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { - Handle natives_string = - factory->LookupAsciiSymbol(FLAG_expose_natives_as); - SetLocalPropertyNoThrow(js_global, natives_string, - Handle(js_global->builtins()), DONT_ENUM); + Handle natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + global, natives, + Handle(global->builtins()), + DONT_ENUM)); } - Handle Error = GetProperty(js_global, "Error"); + Handle Error = GetProperty(global, "Error"); if (Error->IsJSObject()) { Handle name = factory->LookupAsciiSymbol("stackTraceLimit"); - SetLocalPropertyNoThrow(Handle::cast(Error), - name, - Handle(Smi::FromInt(FLAG_stack_trace_limit)), - NONE); + Handle stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit)); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + Handle::cast(Error), name, + stack_trace_limit, NONE)); } #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1948,7 +1960,9 @@ void Genesis::InstallSpecialObjects(Handle global_context) { Handle debug_string = factory->LookupAsciiSymbol(FLAG_expose_debug_as); Handle global_proxy(debug->debug_context()->global_proxy()); - SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate, + JSObject::SetLocalPropertyIgnoreAttributes( + global, debug_string, global_proxy, DONT_ENUM)); } #endif } @@ -2164,7 +2178,9 @@ void Genesis::TransferNamedProperties(Handle from, Handle key = Handle(descs->GetKey(i)); int index = descs->GetFieldIndex(i); Handle value = Handle(from->FastPropertyAt(index)); - SetLocalPropertyNoThrow(to, key, value, details.attributes()); + CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + to, key, value, details.attributes())); break; } case CONSTANT_FUNCTION: { @@ -2172,7 +2188,9 @@ void Genesis::TransferNamedProperties(Handle from, Handle key = Handle(descs->GetKey(i)); Handle fun = Handle(descs->GetConstantFunction(i)); - SetLocalPropertyNoThrow(to, key, fun, details.attributes()); + CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + to, key, fun, details.attributes())); break; } case CALLBACKS: { @@ -2187,7 +2205,7 @@ void Genesis::TransferNamedProperties(Handle from, Handle callbacks(descs->GetCallbacksObject(i)); PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, details.index()); - SetNormalizedProperty(to, key, callbacks, d); + JSObject::SetNormalizedProperty(to, key, callbacks, d); break; } case MAP_TRANSITION: @@ -2224,7 +2242,9 @@ void Genesis::TransferNamedProperties(Handle from, value = Handle(JSGlobalPropertyCell::cast(*value)->value()); } PropertyDetails details = properties->DetailsAt(i); - SetLocalPropertyNoThrow(to, key, value, details.attributes()); + CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + to, key, value, details.attributes())); } } } diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index abf61b9fe5..101c2e1b1f 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -88,7 +88,7 @@ class SourceCodeCache BASE_EMBEDDED { // context. class Bootstrapper { public: - // Requires: Heap::Setup has been called. + // Requires: Heap::SetUp has been called. void Initialize(bool create_heap_objects); void TearDown(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 916799499f..69e5161ce5 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -1719,7 +1719,7 @@ void Builtins::InitBuiltinFunctionTable() { #undef DEF_FUNCTION_PTR_A } -void Builtins::Setup(bool create_heap_objects) { +void Builtins::SetUp(bool create_heap_objects) { ASSERT(!initialized_); Isolate* isolate = Isolate::Current(); Heap* heap = isolate->heap(); diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 6a84f2ddd1..f20d97df5b 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -265,7 +265,7 @@ class Builtins { // Generate all builtin code objects. Should be called once during // isolate initialization. - void Setup(bool create_heap_objects); + void SetUp(bool create_heap_objects); void TearDown(); // Garbage collection support. diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 85410c3cc8..5fa9a2b5c0 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -40,7 +40,7 @@ namespace internal { bool CodeStub::FindCodeInCache(Code** code_out) { Heap* heap = Isolate::Current()->heap(); int index = heap->code_stubs()->FindEntry(GetKey()); - if (index != NumberDictionary::kNotFound) { + if (index != UnseededNumberDictionary::kNotFound) { *code_out = Code::cast(heap->code_stubs()->ValueAt(index)); return true; } @@ -132,9 +132,9 @@ Handle CodeStub::GetCode() { AddToSpecialCache(new_object); } else { // Update the dictionary and the root in Heap. - Handle dict = + Handle dict = factory->DictionaryAtNumberPut( - Handle(heap->code_stubs()), + Handle(heap->code_stubs()), GetKey(), new_object); heap->public_set_code_stubs(*dict); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index d2a4a0bfdc..362273e27a 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -628,7 +628,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // scope info. Please note, that the order of the shared function // info initialization is important since set_scope_info might // trigger a GC, causing the ASSERT below to be invalid if the code - // was flushed. By settting the code object last we avoid this. + // was flushed. By setting the code object last we avoid this. Handle scope_info = ScopeInfo::Create(info->scope()); shared->set_scope_info(*scope_info); shared->set_code(*code); diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index d74c034ac5..2bd62ad390 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -493,7 +493,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { NoBarrier_Store(&is_profiling_, true); processor_->Start(); // Enumerate stuff we already have in the heap. - if (isolate->heap()->HasBeenSetup()) { + if (isolate->heap()->HasBeenSetUp()) { if (!FLAG_prof_browser_mode) { bool saved_log_code_flag = FLAG_log_code; FLAG_log_code = true; @@ -562,7 +562,7 @@ void CpuProfiler::StopProcessor() { } -void CpuProfiler::Setup() { +void CpuProfiler::SetUp() { Isolate* isolate = Isolate::Current(); if (isolate->cpu_profiler() == NULL) { isolate->set_cpu_profiler(new CpuProfiler()); diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index a71c0e0ab4..3f4fec5f45 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -204,7 +204,7 @@ namespace internal { // TODO(isolates): isolatify this class. class CpuProfiler { public: - static void Setup(); + static void SetUp(); static void TearDown(); static void StartProfiling(const char* title); @@ -230,11 +230,11 @@ class CpuProfiler { Code* code, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, - SharedFunctionInfo *shared, + SharedFunctionInfo* shared, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, - SharedFunctionInfo *shared, + SharedFunctionInfo* shared, String* source, int line); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); diff --git a/deps/v8/src/cpu.h b/deps/v8/src/cpu.h index 2525484a01..247af71aa3 100644 --- a/deps/v8/src/cpu.h +++ b/deps/v8/src/cpu.h @@ -53,7 +53,7 @@ namespace internal { class CPU : public AllStatic { public: // Initializes the cpu architecture support. Called once at VM startup. - static void Setup(); + static void SetUp(); static bool SupportsCrankshaft(); diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index 8fbc876dab..1cbc0b39a0 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -169,7 +169,7 @@ void RemoteDebugger::Run() { bool ok; // Make sure that socket support is initialized. - ok = i::Socket::Setup(); + ok = i::Socket::SetUp(); if (!ok) { printf("Unable to initialize socket support %d\n", i::Socket::LastError()); return; diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index ad850f5ee7..97828a4ac4 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -281,63 +281,161 @@ Handle Shell::Load(const Arguments& args) { return Undefined(); } +static size_t convertToUint(Local value_in, TryCatch* try_catch) { + if (value_in->IsUint32()) { + return value_in->Uint32Value(); + } + + Local number = value_in->ToNumber(); + if (try_catch->HasCaught()) return 0; + + ASSERT(number->IsNumber()); + Local int32 = number->ToInt32(); + if (try_catch->HasCaught() || int32.IsEmpty()) return 0; + + int32_t raw_value = int32->Int32Value(); + if (try_catch->HasCaught()) return 0; + + if (raw_value < 0) { + ThrowException(String::New("Array length must not be negative.")); + return 0; + } + + static const int kMaxLength = 0x3fffffff; +#ifndef V8_SHARED + ASSERT(kMaxLength == i::ExternalArray::kMaxLength); +#endif // V8_SHARED + if (raw_value > static_cast(kMaxLength)) { + ThrowException( + String::New("Array length exceeds maximum length.")); + } + return static_cast(raw_value); +} + + +const char kArrayBufferReferencePropName[] = "_is_array_buffer_"; +const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_"; + Handle Shell::CreateExternalArray(const Arguments& args, ExternalArrayType type, size_t element_size) { + TryCatch try_catch; + bool is_array_buffer_construct = element_size == 0; + if (is_array_buffer_construct) { + type = v8::kExternalByteArray; + element_size = 1; + } ASSERT(element_size == 1 || element_size == 2 || element_size == 4 || element_size == 8); - if (args.Length() != 1) { + if (args.Length() == 0) { return ThrowException( - String::New("Array constructor needs one parameter.")); + String::New("Array constructor must have at least one " + "parameter.")); } - static const int kMaxLength = 0x3fffffff; -#ifndef V8_SHARED - ASSERT(kMaxLength == i::ExternalArray::kMaxLength); -#endif // V8_SHARED - size_t length = 0; - TryCatch try_catch; - if (args[0]->IsUint32()) { - length = args[0]->Uint32Value(); - } else { - Local number = args[0]->ToNumber(); - if (number.IsEmpty()) { - ASSERT(try_catch.HasCaught()); - return try_catch.Exception(); + bool first_arg_is_array_buffer = + args[0]->IsObject() && + args[0]->ToObject()->Get( + String::New(kArrayBufferMarkerPropName))->IsTrue(); + // Currently, only the following constructors are supported: + // TypedArray(unsigned long length) + // TypedArray(ArrayBuffer buffer, + // optional unsigned long byteOffset, + // optional unsigned long length) + if (args.Length() > 3) { + return ThrowException( + String::New("Array constructor from ArrayBuffer must " + "have 1-3 parameters.")); + } + + Local length_value = (args.Length() < 3) + ? (first_arg_is_array_buffer + ? args[0]->ToObject()->Get(String::New("length")) + : args[0]) + : args[2]; + size_t length = convertToUint(length_value, &try_catch); + if (try_catch.HasCaught()) return try_catch.Exception(); + + void* data = NULL; + size_t offset = 0; + + Handle array = Object::New(); + if (first_arg_is_array_buffer) { + Handle derived_from = args[0]->ToObject(); + data = derived_from->GetIndexedPropertiesExternalArrayData(); + + size_t array_buffer_length = convertToUint( + derived_from->Get(String::New("length")), + &try_catch); + if (try_catch.HasCaught()) return try_catch.Exception(); + + if (data == NULL && array_buffer_length != 0) { + return ThrowException( + String::New("ArrayBuffer doesn't have data")); } - ASSERT(number->IsNumber()); - Local int32 = number->ToInt32(); - if (int32.IsEmpty()) { - if (try_catch.HasCaught()) { - return try_catch.Exception(); + + if (args.Length() > 1) { + offset = convertToUint(args[1], &try_catch); + if (try_catch.HasCaught()) return try_catch.Exception(); + + // The given byteOffset must be a multiple of the element size of the + // specific type, otherwise an exception is raised. + if (offset % element_size != 0) { + return ThrowException( + String::New("offset must be multiple of element_size")); } } - int32_t raw_length = int32->Int32Value(); - if (try_catch.HasCaught()) { - return try_catch.Exception(); + + if (offset > array_buffer_length) { + return ThrowException( + String::New("byteOffset must be less than ArrayBuffer length.")); } - if (raw_length < 0) { - return ThrowException(String::New("Array length must not be negative.")); + + if (args.Length() == 2) { + // If length is not explicitly specified, the length of the ArrayBuffer + // minus the byteOffset must be a multiple of the element size of the + // specific type, or an exception is raised. + length = array_buffer_length - offset; + } + + if (args.Length() != 3) { + if (length % element_size != 0) { + return ThrowException( + String::New("ArrayBuffer length minus the byteOffset must be a " + "multiple of the element size")); + } + length /= element_size; } - if (raw_length > static_cast(kMaxLength)) { + + // If a given byteOffset and length references an area beyond the end of + // the ArrayBuffer an exception is raised. + if (offset + (length * element_size) > array_buffer_length) { return ThrowException( - String::New("Array length exceeds maximum length.")); + String::New("length references an area beyond the end of the " + "ArrayBuffer")); } - length = static_cast(raw_length); - } - if (length > static_cast(kMaxLength)) { - return ThrowException(String::New("Array length exceeds maximum length.")); + + // Hold a reference to the ArrayBuffer so its buffer doesn't get collected. + array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly); } - void* data = calloc(length, element_size); - if (data == NULL) { - return ThrowException(String::New("Memory allocation failed.")); + + if (is_array_buffer_construct) { + array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly); } - Handle array = Object::New(); + Persistent persistent_array = Persistent::New(array); persistent_array.MakeWeak(data, ExternalArrayWeakCallback); persistent_array.MarkIndependent(); - array->SetIndexedPropertiesToExternalArrayData(data, type, - static_cast(length)); + if (data == NULL && length != 0) { + data = calloc(length, element_size); + if (data == NULL) { + return ThrowException(String::New("Memory allocation failed.")); + } + } + + array->SetIndexedPropertiesToExternalArrayData( + reinterpret_cast(data) + offset, type, + static_cast(length)); array->Set(String::New("length"), Int32::New(static_cast(length)), ReadOnly); array->Set(String::New("BYTES_PER_ELEMENT"), @@ -347,11 +445,22 @@ Handle Shell::CreateExternalArray(const Arguments& args, void Shell::ExternalArrayWeakCallback(Persistent object, void* data) { - free(data); + HandleScope scope; + Handle prop_name = String::New(kArrayBufferReferencePropName); + Handle converted_object = object->ToObject(); + Local prop_value = converted_object->Get(prop_name); + if (data != NULL && !prop_value->IsObject()) { + free(data); + } object.Dispose(); } +Handle Shell::ArrayBuffer(const Arguments& args) { + return CreateExternalArray(args, v8::kExternalByteArray, 0); +} + + Handle Shell::Int8Array(const Arguments& args) { return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t)); } @@ -693,6 +802,8 @@ Handle Shell::CreateGlobalTemplate() { FunctionTemplate::New(DisableProfiler)); // Bind the handlers for external arrays. + global_template->Set(String::New("ArrayBuffer"), + FunctionTemplate::New(ArrayBuffer)); global_template->Set(String::New("Int8Array"), FunctionTemplate::New(Int8Array)); global_template->Set(String::New("Uint8Array"), diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 15d8d5d50f..6c7733ccfa 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -289,6 +289,7 @@ class Shell : public i::AllStatic { static Handle Read(const Arguments& args); static Handle ReadLine(const Arguments& args); static Handle Load(const Arguments& args); + static Handle ArrayBuffer(const Arguments& args); static Handle Int8Array(const Arguments& args); static Handle Uint8Array(const Arguments& args); static Handle Int16Array(const Arguments& args); diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 8cbe0b362c..120a297007 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -1547,7 +1547,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) { } } - // Setup the VM for stepping. + // Set up the VM for stepping. this.exec_state_.prepareStep(action, count); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 24e17823d8..ffba7821ca 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -682,7 +682,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent obj, void* data) { } -void Debug::Setup(bool create_heap_objects) { +void Debug::SetUp(bool create_heap_objects) { ThreadInit(); if (create_heap_objects) { // Get code to handle debug break on return. @@ -827,8 +827,8 @@ bool Debug::Load() { Handle global = Handle(context->global()); RETURN_IF_EMPTY_HANDLE_VALUE( isolate_, - SetProperty(global, key, Handle(global->builtins()), - NONE, kNonStrictMode), + JSReceiver::SetProperty(global, key, Handle(global->builtins()), + NONE, kNonStrictMode), false); // Compile the JavaScript for the debugger in the debugger context. @@ -1213,7 +1213,7 @@ void Debug::ClearAllBreakPoints() { void Debug::FloodWithOneShot(Handle shared) { PrepareForBreakPoints(); - // Make sure the function has setup the debug info. + // Make sure the function has set up the debug info. if (!EnsureDebugInfo(shared)) { // Return if we failed to retrieve the debug info. return; @@ -2855,7 +2855,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event, command.Dispose(); // Return from debug event processing if either the VM is put into the - // runnning state (through a continue command) or auto continue is active + // running state (through a continue command) or auto continue is active // and there are no more commands queued. if (running && !HasCommands()) { return; @@ -3065,7 +3065,7 @@ bool Debugger::StartAgent(const char* name, int port, v8::Debug::DebugBreak(); } - if (Socket::Setup()) { + if (Socket::SetUp()) { if (agent_ == NULL) { agent_ = new DebuggerAgent(name, port); agent_->Start(); diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index a39d8013e4..582aadae8d 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -178,7 +178,9 @@ class ScriptCache : private HashMap { private: // Calculate the hash value from the key (script id). - static uint32_t Hash(int key) { return ComputeIntegerHash(key); } + static uint32_t Hash(int key) { + return ComputeIntegerHash(key, v8::internal::kZeroHashSeed); + } // Scripts match if their keys (script id) match. static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; } @@ -222,7 +224,7 @@ class DebugInfoListNode { // DebugInfo. class Debug { public: - void Setup(bool create_heap_objects); + void SetUp(bool create_heap_objects); bool Load(); void Unload(); bool IsLoaded() { return !debug_context_.is_null(); } diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index fd2b6d248a..e54ec62691 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -549,11 +549,11 @@ class PixelElementsAccessor class DictionaryElementsAccessor : public ElementsAccessorBase { + SeededNumberDictionary> { public: // Adjusts the length of the dictionary backing store and returns the new // length according to ES5 section 15.4.5.2 behavior. - static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict, + static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict, JSArray* array, Object* length_object, uint32_t length) { @@ -619,9 +619,10 @@ class DictionaryElementsAccessor if (is_arguments) { backing_store = FixedArray::cast(backing_store->get(1)); } - NumberDictionary* dictionary = NumberDictionary::cast(backing_store); + SeededNumberDictionary* dictionary = + SeededNumberDictionary::cast(backing_store); int entry = dictionary->FindEntry(key); - if (entry != NumberDictionary::kNotFound) { + if (entry != SeededNumberDictionary::kNotFound) { Object* result = dictionary->DeleteProperty(entry, mode); if (result == heap->true_value()) { MaybeObject* maybe_elements = dictionary->Shrink(key); @@ -654,7 +655,7 @@ class DictionaryElementsAccessor protected: friend class ElementsAccessorBase; + SeededNumberDictionary>; virtual MaybeObject* Delete(JSObject* obj, uint32_t key, @@ -662,12 +663,12 @@ class DictionaryElementsAccessor return DeleteCommon(obj, key, mode); } - static MaybeObject* Get(NumberDictionary* backing_store, + static MaybeObject* Get(SeededNumberDictionary* backing_store, uint32_t key, JSObject* obj, Object* receiver) { int entry = backing_store->FindEntry(key); - if (entry != NumberDictionary::kNotFound) { + if (entry != SeededNumberDictionary::kNotFound) { Object* element = backing_store->ValueAt(entry); PropertyDetails details = backing_store->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -682,7 +683,7 @@ class DictionaryElementsAccessor return obj->GetHeap()->the_hole_value(); } - static uint32_t GetKeyForIndex(NumberDictionary* dict, + static uint32_t GetKeyForIndex(SeededNumberDictionary* dict, uint32_t index) { Object* key = dict->KeyAt(index); return Smi::cast(key)->value(); @@ -895,7 +896,7 @@ MaybeObject* ElementsAccessorBase:: if (length->IsNumber()) { uint32_t value; if (length->ToArrayIndex(&value)) { - NumberDictionary* dictionary; + SeededNumberDictionary* dictionary; MaybeObject* maybe_object = array->NormalizeElements(); if (!maybe_object->To(&dictionary)) return maybe_object; Object* new_length; diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index b16e7396ad..125241ceec 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -845,13 +845,13 @@ Object* Execution::DebugBreakHelper() { // Clear the debug break request flag. isolate->stack_guard()->Continue(DEBUGBREAK); - ProcessDebugMesssages(debug_command_only); + ProcessDebugMessages(debug_command_only); // Return to continue execution. return isolate->heap()->undefined_value(); } -void Execution::ProcessDebugMesssages(bool debug_command_only) { +void Execution::ProcessDebugMessages(bool debug_command_only) { Isolate* isolate = Isolate::Current(); // Clear the debug command request flag. isolate->stack_guard()->Continue(DEBUGCOMMAND); diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index f2d17d0792..014736ee88 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -136,7 +136,7 @@ class Execution : public AllStatic { Handle is_global); #ifdef ENABLE_DEBUGGER_SUPPORT static Object* DebugBreakHelper(); - static void ProcessDebugMesssages(bool debug_command_only); + static void ProcessDebugMessages(bool debug_command_only); #endif // If the stack guard is triggered, but it is not an actual diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index c2976a577a..0b796350d4 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -77,11 +77,21 @@ Handle Factory::NewStringDictionary(int at_least_space_for) { } -Handle Factory::NewNumberDictionary(int at_least_space_for) { +Handle Factory::NewSeededNumberDictionary( + int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - NumberDictionary::Allocate(at_least_space_for), - NumberDictionary); + SeededNumberDictionary::Allocate(at_least_space_for), + SeededNumberDictionary); +} + + +Handle Factory::NewUnseededNumberDictionary( + int at_least_space_for) { + ASSERT(0 <= at_least_space_for); + CALL_HEAP_FUNCTION(isolate(), + UnseededNumberDictionary::Allocate(at_least_space_for), + UnseededNumberDictionary); } @@ -131,6 +141,13 @@ Handle Factory::NewDeoptimizationOutputData( } +Handle Factory::NewAccessorPair() { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->AllocateAccessorPair(), + AccessorPair); +} + + // Symbols are created in the old generation (data space). Handle Factory::LookupSymbol(Vector string) { CALL_HEAP_FUNCTION(isolate(), @@ -698,7 +715,7 @@ Handle Factory::NewFunction(Handle name, // Allocate the function Handle function = NewFunction(name, the_hole_value()); - // Setup the code pointer in both the shared function info and in + // Set up the code pointer in both the shared function info and in // the function itself. function->shared()->set_code(*code); function->set_code(*code); @@ -729,7 +746,7 @@ Handle Factory::NewFunctionWithPrototype(Handle name, // Allocate the function. Handle function = NewFunction(name, prototype); - // Setup the code pointer in both the shared function info and in + // Set up the code pointer in both the shared function info and in // the function itself. function->shared()->set_code(*code); function->set_code(*code); @@ -751,7 +768,10 @@ Handle Factory::NewFunctionWithPrototype(Handle name, // property that refers to the function. SetPrototypeProperty(function, prototype); // Currently safe because it is only invoked from Genesis. - SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM); + CHECK_NOT_EMPTY_HANDLE(isolate(), + JSObject::SetLocalPropertyIgnoreAttributes( + prototype, constructor_symbol(), + function, DONT_ENUM)); return function; } @@ -1061,13 +1081,23 @@ Handle Factory::Uint32ToString(uint32_t value) { } -Handle Factory::DictionaryAtNumberPut( - Handle dictionary, +Handle Factory::DictionaryAtNumberPut( + Handle dictionary, + uint32_t key, + Handle value) { + CALL_HEAP_FUNCTION(isolate(), + dictionary->AtNumberPut(key, *value), + SeededNumberDictionary); +} + + +Handle Factory::DictionaryAtNumberPut( + Handle dictionary, uint32_t key, Handle value) { CALL_HEAP_FUNCTION(isolate(), dictionary->AtNumberPut(key, *value), - NumberDictionary); + UnseededNumberDictionary); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index e9a43fd4fc..8725b67ec9 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -54,7 +54,11 @@ class Factory { int size, PretenureFlag pretenure = NOT_TENURED); - Handle NewNumberDictionary(int at_least_space_for); + Handle NewSeededNumberDictionary( + int at_least_space_for); + + Handle NewUnseededNumberDictionary( + int at_least_space_for); Handle NewStringDictionary(int at_least_space_for); @@ -69,6 +73,8 @@ class Factory { Handle NewDeoptimizationOutputData( int deopt_entry_count, PretenureFlag pretenure); + // Allocates a pre-tenured empty AccessorPair. + Handle NewAccessorPair(); Handle LookupSymbol(Vector str); Handle LookupSymbol(Handle str); @@ -430,8 +436,13 @@ class Factory { Handle stack_trace, Handle stack_frames); - Handle DictionaryAtNumberPut( - Handle, + Handle DictionaryAtNumberPut( + Handle, + uint32_t key, + Handle value); + + Handle DictionaryAtNumberPut( + Handle, uint32_t key, Handle value); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 07060dcb03..9284e1369f 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -349,13 +349,13 @@ DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") -DEFINE_bool(randomize_string_hashes, +DEFINE_bool(randomize_hashes, true, - "randomize string hashes to avoid predictable hash collisions " + "randomize hashes to avoid predictable hash collisions " "(with snapshots this option cannot override the baked-in seed)") -DEFINE_int(string_hash_seed, +DEFINE_int(hash_seed, 0, - "Fixed seed to use to string hashing (0 means random)" + "Fixed seed to use to hash property keys (0 means random)" "(with snapshots this option cannot override the baked-in seed)") // v8.cc diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index e3ed2de4e3..3a46e0869f 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -485,7 +485,7 @@ Code* ExitFrame::unchecked_code() const { void ExitFrame::ComputeCallerState(State* state) const { - // Setup the caller state. + // Set up the caller state. state->sp = caller_sp(); state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset); state->pc_address @@ -1303,7 +1303,8 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* isolate_->counters()->pc_to_code()->Increment(); ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize)); uint32_t hash = ComputeIntegerHash( - static_cast(reinterpret_cast(inner_pointer))); + static_cast(reinterpret_cast(inner_pointer)), + v8::internal::kZeroHashSeed); uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1); InnerPointerToCodeCacheEntry* entry = cache(index); if (entry->inner_pointer == inner_pointer) { diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 04086d483d..5c7a23d54d 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -1178,7 +1178,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) { } ExitFinallyBlock(); // Return to the calling code. - // Setup try handler. + // Set up try handler. __ bind(&try_entry); __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index()); { TryFinally try_body(this, &finally_entry); @@ -1284,7 +1284,7 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit( bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { - Expression *sub_expr; + Expression* sub_expr; Handle check; if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { EmitLiteralCompareTypeof(expr, sub_expr, check); diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc index b386bed177..4192222f90 100644 --- a/deps/v8/src/gdb-jit.cc +++ b/deps/v8/src/gdb-jit.cc @@ -1556,23 +1556,23 @@ class DebugLineSection : public DebugSection { class UnwindInfoSection : public DebugSection { public: - explicit UnwindInfoSection(CodeDescription *desc); - virtual bool WriteBody(Writer *w); + explicit UnwindInfoSection(CodeDescription* desc); + virtual bool WriteBody(Writer* w); - int WriteCIE(Writer *w); - void WriteFDE(Writer *w, int); + int WriteCIE(Writer* w); + void WriteFDE(Writer* w, int); - void WriteFDEStateOnEntry(Writer *w); - void WriteFDEStateAfterRBPPush(Writer *w); - void WriteFDEStateAfterRBPSet(Writer *w); - void WriteFDEStateAfterRBPPop(Writer *w); + void WriteFDEStateOnEntry(Writer* w); + void WriteFDEStateAfterRBPPush(Writer* w); + void WriteFDEStateAfterRBPSet(Writer* w); + void WriteFDEStateAfterRBPPop(Writer* w); - void WriteLength(Writer *w, + void WriteLength(Writer* w, Writer::Slot* length_slot, int initial_position); private: - CodeDescription *desc_; + CodeDescription* desc_; // DWARF3 Specification, Table 7.23 enum CFIInstructions { @@ -1623,7 +1623,7 @@ class UnwindInfoSection : public DebugSection { }; -void UnwindInfoSection::WriteLength(Writer *w, +void UnwindInfoSection::WriteLength(Writer* w, Writer::Slot* length_slot, int initial_position) { uint32_t align = (w->position() - initial_position) % kPointerSize; @@ -1639,7 +1639,7 @@ void UnwindInfoSection::WriteLength(Writer *w, } -UnwindInfoSection::UnwindInfoSection(CodeDescription *desc) +UnwindInfoSection::UnwindInfoSection(CodeDescription* desc) #ifdef __ELF : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), #else @@ -1648,7 +1648,7 @@ UnwindInfoSection::UnwindInfoSection(CodeDescription *desc) #endif desc_(desc) { } -int UnwindInfoSection::WriteCIE(Writer *w) { +int UnwindInfoSection::WriteCIE(Writer* w) { Writer::Slot cie_length_slot = w->CreateSlotHere(); uint32_t cie_position = w->position(); @@ -1668,7 +1668,7 @@ int UnwindInfoSection::WriteCIE(Writer *w) { } -void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) { +void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) { // The only FDE for this function. The CFA is the current RBP. Writer::Slot fde_length_slot = w->CreateSlotHere(); int fde_position = w->position(); @@ -1686,7 +1686,7 @@ void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) { } -void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) { +void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) { // The first state, just after the control has been transferred to the the // function. @@ -1713,7 +1713,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) { // The second state, just after RBP has been pushed. // RBP / CFA for this function is now the current RSP, so just set the @@ -1734,7 +1734,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) { // The third state, after the RBP has been set. // The CFA can now directly be set to RBP. @@ -1749,7 +1749,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) { } -void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) { +void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) { // The fourth (final) state. The RBP has been popped (just before issuing a // return). @@ -1769,7 +1769,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) { } -bool UnwindInfoSection::WriteBody(Writer *w) { +bool UnwindInfoSection::WriteBody(Writer* w) { uint32_t cie_position = WriteCIE(w); WriteFDE(w, cie_position); return true; @@ -1810,8 +1810,8 @@ extern "C" { struct JITDescriptor { uint32_t version_; uint32_t action_flag_; - JITCodeEntry *relevant_entry_; - JITCodeEntry *first_entry_; + JITCodeEntry* relevant_entry_; + JITCodeEntry* first_entry_; }; // GDB will place breakpoint into this function. @@ -1998,7 +1998,7 @@ void GDBJITInterface::AddCode(Handle name, } } -static void AddUnwindInfo(CodeDescription *desc) { +static void AddUnwindInfo(CodeDescription* desc) { #ifdef V8_TARGET_ARCH_X64 if (desc->tag() == GDBJITInterface::FUNCTION) { // To avoid propagating unwinding information through diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 2ff797d077..34eaddbbd7 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -208,42 +208,6 @@ void SetExpectedNofPropertiesFromEstimate(Handle shared, } -void NormalizeProperties(Handle object, - PropertyNormalizationMode mode, - int expected_additional_properties) { - CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), - object->NormalizeProperties( - mode, - expected_additional_properties)); -} - - -Handle NormalizeElements(Handle object) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->NormalizeElements(), - NumberDictionary); -} - - -void TransformToFastProperties(Handle object, - int unused_property_fields) { - CALL_HEAP_FUNCTION_VOID( - object->GetIsolate(), - object->TransformToFastProperties(unused_property_fields)); -} - - -Handle NumberDictionarySet( - Handle dictionary, - uint32_t index, - Handle value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(dictionary->GetIsolate(), - dictionary->Set(index, *value, details), - NumberDictionary); -} - - void FlattenString(Handle string) { CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten()); } @@ -265,17 +229,6 @@ Handle SetPrototype(Handle function, } -Handle SetProperty(Handle object, - Handle key, - Handle value, - PropertyAttributes attributes, - StrictModeFlag strict_mode) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetProperty(*key, *value, attributes, strict_mode), - Object); -} - - Handle SetProperty(Handle object, Handle key, Handle value, @@ -303,16 +256,6 @@ Handle ForceSetProperty(Handle object, } -Handle SetNormalizedProperty(Handle object, - Handle key, - Handle value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetNormalizedProperty(*key, *value, details), - Object); -} - - Handle ForceDeleteProperty(Handle object, Handle key) { Isolate* isolate = object->GetIsolate(); @@ -322,30 +265,6 @@ Handle ForceDeleteProperty(Handle object, } -Handle SetLocalPropertyIgnoreAttributes( - Handle object, - Handle key, - Handle value, - PropertyAttributes attributes) { - CALL_HEAP_FUNCTION( - object->GetIsolate(), - object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes), - Object); -} - - -void SetLocalPropertyNoThrow(Handle object, - Handle key, - Handle value, - PropertyAttributes attributes) { - Isolate* isolate = object->GetIsolate(); - ASSERT(!isolate->has_pending_exception()); - CHECK(!SetLocalPropertyIgnoreAttributes( - object, key, value, attributes).is_null()); - CHECK(!isolate->has_pending_exception()); -} - - Handle SetPropertyWithInterceptor(Handle object, Handle key, Handle value, @@ -389,12 +308,6 @@ Handle GetPropertyWithInterceptor(Handle receiver, } -Handle GetPrototype(Handle obj) { - Handle result(obj->GetPrototype()); - return result; -} - - Handle SetPrototype(Handle obj, Handle value) { const bool skip_hidden_prototypes = false; CALL_HEAP_FUNCTION(obj->GetIsolate(), @@ -402,44 +315,6 @@ Handle SetPrototype(Handle obj, Handle value) { } -Handle PreventExtensions(Handle object) { - CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object); -} - - -Handle SetHiddenProperty(Handle obj, - Handle key, - Handle value) { - CALL_HEAP_FUNCTION(obj->GetIsolate(), - obj->SetHiddenProperty(*key, *value), - Object); -} - - -int GetIdentityHash(Handle obj) { - CALL_AND_RETRY(obj->GetIsolate(), - obj->GetIdentityHash(ALLOW_CREATION), - return Smi::cast(__object__)->value(), - return 0); -} - - -Handle DeleteElement(Handle obj, - uint32_t index) { - CALL_HEAP_FUNCTION(obj->GetIsolate(), - obj->DeleteElement(index, JSObject::NORMAL_DELETION), - Object); -} - - -Handle DeleteProperty(Handle obj, - Handle prop) { - CALL_HEAP_FUNCTION(obj->GetIsolate(), - obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION), - Object); -} - - Handle LookupSingleCharacterStringFromCode(uint32_t index) { Isolate* isolate = Isolate::Current(); CALL_HEAP_FUNCTION( @@ -457,43 +332,6 @@ Handle SubString(Handle str, } -Handle SetElement(Handle object, - uint32_t index, - Handle value, - StrictModeFlag strict_mode) { - if (object->HasExternalArrayElements()) { - if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) { - bool has_exception; - Handle number = Execution::ToNumber(value, &has_exception); - if (has_exception) return Handle(); - value = number; - } - } - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetElement(index, *value, strict_mode, true), - Object); -} - - -Handle SetOwnElement(Handle object, - uint32_t index, - Handle value, - StrictModeFlag strict_mode) { - ASSERT(!object->HasExternalArrayElements()); - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetElement(index, *value, strict_mode, false), - Object); -} - - -Handle TransitionElementsKind(Handle object, - ElementsKind to_kind) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->TransitionElementsKind(to_kind), - Object); -} - - Handle Copy(Handle obj) { Isolate* isolate = obj->GetIsolate(); CALL_HEAP_FUNCTION(isolate, diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index cfa65b3786..42089134e4 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -167,18 +167,6 @@ class HandleScope { // an object of expected type, or the handle is an error if running out // of space or encountering an internal error. -void NormalizeProperties(Handle object, - PropertyNormalizationMode mode, - int expected_additional_properties); -Handle NormalizeElements(Handle object); -void TransformToFastProperties(Handle object, - int unused_property_fields); -MUST_USE_RESULT Handle NumberDictionarySet( - Handle dictionary, - uint32_t index, - Handle value, - PropertyDetails details); - // Flattens a string. void FlattenString(Handle str); @@ -186,12 +174,6 @@ void FlattenString(Handle str); // string. Handle FlattenGetString(Handle str); -Handle SetProperty(Handle object, - Handle key, - Handle value, - PropertyAttributes attributes, - StrictModeFlag strict_mode); - Handle SetProperty(Handle object, Handle key, Handle value, @@ -203,40 +185,9 @@ Handle ForceSetProperty(Handle object, Handle value, PropertyAttributes attributes); -Handle SetNormalizedProperty(Handle object, - Handle key, - Handle value, - PropertyDetails details); - Handle ForceDeleteProperty(Handle object, Handle key); -Handle SetLocalPropertyIgnoreAttributes( - Handle object, - Handle key, - Handle value, - PropertyAttributes attributes); - -// Used to set local properties on the object we totally control -// and which therefore has no accessors and alikes. -void SetLocalPropertyNoThrow(Handle object, - Handle key, - Handle value, - PropertyAttributes attributes = NONE); - -MUST_USE_RESULT Handle SetElement(Handle object, - uint32_t index, - Handle value, - StrictModeFlag strict_mode); - -Handle SetOwnElement(Handle object, - uint32_t index, - Handle value, - StrictModeFlag strict_mode); - -Handle TransitionElementsKind(Handle object, - ElementsKind to_kind); - Handle GetProperty(Handle obj, const char* name); @@ -248,21 +199,8 @@ Handle GetPropertyWithInterceptor(Handle receiver, Handle name, PropertyAttributes* attributes); -Handle GetPrototype(Handle obj); - Handle SetPrototype(Handle obj, Handle value); -// Sets a hidden property on an object. Returns obj on success, undefined -// if trying to set the property on a detached proxy. -Handle SetHiddenProperty(Handle obj, - Handle key, - Handle value); - -int GetIdentityHash(Handle obj); - -Handle DeleteElement(Handle obj, uint32_t index); -Handle DeleteProperty(Handle obj, Handle prop); - Handle LookupSingleCharacterStringFromCode(uint32_t index); Handle Copy(Handle obj); @@ -316,7 +254,6 @@ Handle SubString(Handle str, int end, PretenureFlag pretenure = NOT_TENURED); - // Sets the expected number of properties for the function's instances. void SetExpectedNofProperties(Handle func, int nof); @@ -335,8 +272,6 @@ Handle ReinitializeJSGlobalProxy( Handle SetPrototype(Handle function, Handle prototype); -Handle PreventExtensions(Handle object); - Handle ObjectHashSetAdd(Handle table, Handle key); diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index ef6e58ed0b..4d98fbad10 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -463,7 +463,7 @@ MaybeObject* Heap::PrepareForCompare(String* str) { int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { - ASSERT(HasBeenSetup()); + ASSERT(HasBeenSetUp()); int amount = amount_of_external_allocated_memory_ + change_in_bytes; if (change_in_bytes >= 0) { // Avoid overflow. diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 46c63c27c8..8be6f27685 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -51,7 +51,7 @@ void HeapProfiler::ResetSnapshots() { } -void HeapProfiler::Setup() { +void HeapProfiler::SetUp() { Isolate* isolate = Isolate::Current(); if (isolate->heap_profiler() == NULL) { isolate->set_heap_profiler(new HeapProfiler()); diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index b1bc91c307..ef5c4f4b4a 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -48,7 +48,7 @@ class HeapSnapshotsCollection; // to generate .hp files for use by the GHC/Valgrind tool hp2ps. class HeapProfiler { public: - static void Setup(); + static void SetUp(); static void TearDown(); static HeapSnapshot* TakeSnapshot(const char* name, diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 31cd889cbe..3c871e2706 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -176,7 +176,7 @@ Heap::Heap() intptr_t Heap::Capacity() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.Capacity() + old_pointer_space_->Capacity() + @@ -188,7 +188,7 @@ intptr_t Heap::Capacity() { intptr_t Heap::CommittedMemory() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() + @@ -200,14 +200,14 @@ intptr_t Heap::CommittedMemory() { } intptr_t Heap::CommittedMemoryExecutable() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return isolate()->memory_allocator()->SizeExecutable(); } intptr_t Heap::Available() { - if (!HasBeenSetup()) return 0; + if (!HasBeenSetUp()) return 0; return new_space_.Available() + old_pointer_space_->Available() + @@ -218,7 +218,7 @@ intptr_t Heap::Available() { } -bool Heap::HasBeenSetup() { +bool Heap::HasBeenSetUp() { return old_pointer_space_ != NULL && old_data_space_ != NULL && code_space_ != NULL && @@ -1354,6 +1354,28 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { } +void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { + AssertNoAllocation no_allocation; + + class VisitorAdapter : public ObjectVisitor { + public: + explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor) + : visitor_(visitor) {} + virtual void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + if ((*p)->IsExternalString()) { + visitor_->VisitExternalString(Utils::ToLocal( + Handle(String::cast(*p)))); + } + } + } + private: + v8::ExternalResourceVisitor* visitor_; + } visitor_adapter(visitor); + external_string_table_.Iterate(&visitor_adapter); +} + + class NewSpaceScavenger : public StaticNewSpaceVisitor { public: static inline void VisitPointer(Heap* heap, Object** p) { @@ -1869,6 +1891,19 @@ MaybeObject* Heap::AllocatePolymorphicCodeCache() { } +MaybeObject* Heap::AllocateAccessorPair() { + Object* result; + { MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE); + if (!maybe_result->ToObject(&result)) return maybe_result; + } + AccessorPair* accessors = AccessorPair::cast(result); + // Later we will have to distinguish between undefined and the hole... + // accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER); + // accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER); + return accessors; +} + + const Heap::StringTypeTable Heap::string_type_table[] = { #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ {type, size, k##camel_name##MapRootIndex}, @@ -2428,18 +2463,18 @@ bool Heap::CreateInitialObjects() { // Allocate the code_stubs dictionary. The initial size is set to avoid // expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = NumberDictionary::Allocate(128); + { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128); if (!maybe_obj->ToObject(&obj)) return false; } - set_code_stubs(NumberDictionary::cast(obj)); + set_code_stubs(UnseededNumberDictionary::cast(obj)); // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size // is set to avoid expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = NumberDictionary::Allocate(64); + { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64); if (!maybe_obj->ToObject(&obj)) return false; } - set_non_monomorphic_cache(NumberDictionary::cast(obj)); + set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj)); { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache(); if (!maybe_obj->ToObject(&obj)) return false; @@ -3794,7 +3829,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { } Map* new_map = Map::cast(obj); - // Setup the global object as a normalized object. + // Set up the global object as a normalized object. global->set_map(new_map); global->map()->clear_instance_descriptors(); global->set_properties(dictionary); @@ -4727,7 +4762,7 @@ bool Heap::IdleGlobalGC() { #ifdef DEBUG void Heap::Print() { - if (!HasBeenSetup()) return; + if (!HasBeenSetUp()) return; isolate()->PrintStack(); AllSpaces spaces; for (Space* space = spaces.next(); space != NULL; space = spaces.next()) @@ -4792,7 +4827,7 @@ bool Heap::Contains(HeapObject* value) { bool Heap::Contains(Address addr) { if (OS::IsOutsideAllocatedSpace(addr)) return false; - return HasBeenSetup() && + return HasBeenSetUp() && (new_space_.ToSpaceContains(addr) || old_pointer_space_->Contains(addr) || old_data_space_->Contains(addr) || @@ -4810,7 +4845,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) { bool Heap::InSpace(Address addr, AllocationSpace space) { if (OS::IsOutsideAllocatedSpace(addr)) return false; - if (!HasBeenSetup()) return false; + if (!HasBeenSetUp()) return false; switch (space) { case NEW_SPACE: @@ -4835,7 +4870,7 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { #ifdef DEBUG void Heap::Verify() { - ASSERT(HasBeenSetup()); + ASSERT(HasBeenSetUp()); store_buffer()->Verify(); @@ -5262,7 +5297,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { bool Heap::ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size) { - if (HasBeenSetup()) return false; + if (HasBeenSetUp()) return false; if (max_semispace_size > 0) { if (max_semispace_size < Page::kPageSize) { @@ -5551,7 +5586,7 @@ class HeapDebugUtils { #endif -bool Heap::Setup(bool create_heap_objects) { +bool Heap::SetUp(bool create_heap_objects) { #ifdef DEBUG allocation_timeout_ = FLAG_gc_interval; debug_utils_ = new HeapDebugUtils(this); @@ -5581,12 +5616,12 @@ bool Heap::Setup(bool create_heap_objects) { MarkMapPointersAsEncoded(false); - // Setup memory allocator. - if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize())) + // Set up memory allocator. + if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize())) return false; - // Setup new space. - if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) { + // Set up new space. + if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) { return false; } @@ -5597,7 +5632,7 @@ bool Heap::Setup(bool create_heap_objects) { OLD_POINTER_SPACE, NOT_EXECUTABLE); if (old_pointer_space_ == NULL) return false; - if (!old_pointer_space_->Setup()) return false; + if (!old_pointer_space_->SetUp()) return false; // Initialize old data space. old_data_space_ = @@ -5606,14 +5641,14 @@ bool Heap::Setup(bool create_heap_objects) { OLD_DATA_SPACE, NOT_EXECUTABLE); if (old_data_space_ == NULL) return false; - if (!old_data_space_->Setup()) return false; + if (!old_data_space_->SetUp()) return false; // Initialize the code space, set its maximum capacity to the old // generation size. It needs executable memory. // On 64-bit platform(s), we put all code objects in a 2 GB range of // virtual address space, so that they can call each other with near calls. if (code_range_size_ > 0) { - if (!isolate_->code_range()->Setup(code_range_size_)) { + if (!isolate_->code_range()->SetUp(code_range_size_)) { return false; } } @@ -5621,7 +5656,7 @@ bool Heap::Setup(bool create_heap_objects) { code_space_ = new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); if (code_space_ == NULL) return false; - if (!code_space_->Setup()) return false; + if (!code_space_->SetUp()) return false; // Initialize map space. map_space_ = new MapSpace(this, @@ -5629,28 +5664,28 @@ bool Heap::Setup(bool create_heap_objects) { FLAG_max_map_space_pages, MAP_SPACE); if (map_space_ == NULL) return false; - if (!map_space_->Setup()) return false; + if (!map_space_->SetUp()) return false; // Initialize global property cell space. cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); if (cell_space_ == NULL) return false; - if (!cell_space_->Setup()) return false; + if (!cell_space_->SetUp()) return false; // The large object code space may contain code or data. We set the memory // to be non-executable here for safety, but this means we need to enable it // explicitly when allocating large code objects. lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); if (lo_space_ == NULL) return false; - if (!lo_space_->Setup()) return false; + if (!lo_space_->SetUp()) return false; - // Setup the seed that is used to randomize the string hash function. - ASSERT(string_hash_seed() == 0); - if (FLAG_randomize_string_hashes) { - if (FLAG_string_hash_seed == 0) { - set_string_hash_seed( + // Set up the seed that is used to randomize the string hash function. + ASSERT(hash_seed() == 0); + if (FLAG_randomize_hashes) { + if (FLAG_hash_seed == 0) { + set_hash_seed( Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff)); } else { - set_string_hash_seed(Smi::FromInt(FLAG_string_hash_seed)); + set_hash_seed(Smi::FromInt(FLAG_hash_seed)); } } @@ -5668,7 +5703,7 @@ bool Heap::Setup(bool create_heap_objects) { LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-available", Available())); - store_buffer()->Setup(); + store_buffer()->SetUp(); return true; } diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 2d993bbc2b..a1a53db4a3 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -96,7 +96,7 @@ inline Heap* _inline_get_heap_(); V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, string_split_cache, StringSplitCache) \ V(Object, termination_exception, TerminationException) \ - V(Smi, string_hash_seed, StringHashSeed) \ + V(Smi, hash_seed, HashSeed) \ V(Map, string_map, StringMap) \ V(Map, symbol_map, SymbolMap) \ V(Map, cons_string_map, ConsStringMap) \ @@ -146,8 +146,8 @@ inline Heap* _inline_get_heap_(); V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Foreign, prototype_accessors, PrototypeAccessors) \ - V(NumberDictionary, code_stubs, CodeStubs) \ - V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ + V(UnseededNumberDictionary, code_stubs, CodeStubs) \ + V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ V(Code, js_entry_code, JsEntryCode) \ V(Code, js_construct_entry_code, JsConstructEntryCode) \ @@ -434,7 +434,7 @@ class ExternalStringTable { class Heap { public: // Configure heap size before setup. Return false if the heap has been - // setup already. + // set up already. bool ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size); @@ -443,7 +443,7 @@ class Heap { // Initializes the global object heap. If create_heap_objects is true, // also creates the basic non-mutable objects. // Returns whether it succeeded. - bool Setup(bool create_heap_objects); + bool SetUp(bool create_heap_objects); // Destroys all memory allocated by the heap. void TearDown(); @@ -453,8 +453,8 @@ class Heap { // jslimit_/real_jslimit_ variable in the StackGuard. void SetStackLimits(); - // Returns whether Setup has been called. - bool HasBeenSetup(); + // Returns whether SetUp has been called. + bool HasBeenSetUp(); // Returns the maximum amount of memory reserved for the heap. For // the young generation, we reserve 4 times the amount needed for a @@ -615,6 +615,9 @@ class Heap { // Allocates an empty PolymorphicCodeCache. MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); + // Allocates a pre-tenured empty AccessorPair. + MUST_USE_RESULT MaybeObject* AllocateAccessorPair(); + // Clear the Instanceof cache (used when a prototype changes). inline void ClearInstanceofCache(); @@ -1136,7 +1139,7 @@ class Heap { inline AllocationSpace TargetSpaceId(InstanceType type); // Sets the stub_cache_ (only used when expanding the dictionary). - void public_set_code_stubs(NumberDictionary* value) { + void public_set_code_stubs(UnseededNumberDictionary* value) { roots_[kCodeStubsRootIndex] = value; } @@ -1148,7 +1151,7 @@ class Heap { } // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). - void public_set_non_monomorphic_cache(NumberDictionary* value) { + void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { roots_[kNonMonomorphicCacheRootIndex] = value; } @@ -1409,6 +1412,8 @@ class Heap { void ProcessWeakReferences(WeakObjectRetainer* retainer); + void VisitExternalResources(v8::ExternalResourceVisitor* visitor); + // Helper function that governs the promotion policy from new space to // old. If the object's old address lies below the new space's age // mark or if we've already filled the bottom 1/16th of the to space, @@ -1506,9 +1511,9 @@ class Heap { return idle_notification_will_schedule_next_gc_; } - uint32_t StringHashSeed() { - uint32_t seed = static_cast(string_hash_seed()->value()); - ASSERT(FLAG_randomize_string_hashes || seed == 0); + uint32_t HashSeed() { + uint32_t seed = static_cast(hash_seed()->value()); + ASSERT(FLAG_randomize_hashes || seed == 0); return seed; } @@ -1911,7 +1916,7 @@ class Heap { PromotionQueue promotion_queue_; // Flag is set when the heap has been configured. The heap can be repeatedly - // configured through the API until it is setup. + // configured through the API until it is set up. bool configured_; ExternalStringTable external_string_table_; diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index 1ff7f16fdf..887e80afc4 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -788,6 +788,29 @@ HValue* HTypeof::Canonicalize() { } +HValue* HBitwise::Canonicalize() { + if (!representation().IsInteger32()) return this; + // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x. + int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0; + if (left()->IsConstant() && + HConstant::cast(left())->HasInteger32Value() && + HConstant::cast(left())->Integer32Value() == nop_constant) { + return right(); + } + if (right()->IsConstant() && + HConstant::cast(right())->HasInteger32Value() && + HConstant::cast(right())->Integer32Value() == nop_constant) { + return left(); + } + return this; +} + + +HValue* HChange::Canonicalize() { + return (from().Equals(to())) ? value() : this; +} + + void HTypeof::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 1856c80929..5082e4d3ff 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -146,6 +146,7 @@ class LChunkBuilder; V(Parameter) \ V(Power) \ V(PushArgument) \ + V(Random) \ V(RegExpLiteral) \ V(Return) \ V(Sar) \ @@ -1130,12 +1131,16 @@ class HChange: public HUnaryOperation { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HType CalculateInferredType(); + virtual HValue* Canonicalize(); Representation from() { return value()->representation(); } Representation to() { return representation(); } bool deoptimize_on_undefined() const { return CheckFlag(kDeoptimizeOnUndefined); } + bool deoptimize_on_minus_zero() const { + return CheckFlag(kBailoutOnMinusZero); + } virtual Representation RequiredInputRepresentation(int index) { return from(); } @@ -1921,8 +1926,11 @@ class HLoadExternalArrayPointer: public HUnaryOperation { class HCheckMap: public HTemplateInstruction<2> { public: - HCheckMap(HValue* value, Handle map, HValue* typecheck = NULL) - : map_(map) { + HCheckMap(HValue* value, Handle map, + HValue* typecheck = NULL, + CompareMapMode mode = REQUIRE_EXACT_MAP) + : map_(map), + mode_(mode) { SetOperandAt(0, value); // If callers don't depend on a typecheck, they can pass in NULL. In that // case we use a copy of the |value| argument as a dummy value. @@ -1930,6 +1938,9 @@ class HCheckMap: public HTemplateInstruction<2> { set_representation(Representation::Tagged()); SetFlag(kUseGVN); SetFlag(kDependsOnMaps); + has_element_transitions_ = + map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL || + map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL; } virtual Representation RequiredInputRepresentation(int index) { @@ -1940,17 +1951,24 @@ class HCheckMap: public HTemplateInstruction<2> { HValue* value() { return OperandAt(0); } Handle map() const { return map_; } + CompareMapMode mode() const { return mode_; } DECLARE_CONCRETE_INSTRUCTION(CheckMap) protected: virtual bool DataEquals(HValue* other) { HCheckMap* b = HCheckMap::cast(other); - return map_.is_identical_to(b->map()); + // Two CheckMaps instructions are DataEqual if their maps are identical and + // they have the same mode. The mode comparison can be ignored if the map + // has no elements transitions. + return map_.is_identical_to(b->map()) && + (b->mode() == mode() || !has_element_transitions_); } private: + bool has_element_transitions_; Handle map_; + CompareMapMode mode_; }; @@ -2985,6 +3003,23 @@ class HPower: public HTemplateInstruction<2> { }; +class HRandom: public HTemplateInstruction<1> { + public: + explicit HRandom(HValue* global_object) { + SetOperandAt(0, global_object); + set_representation(Representation::Double()); + } + + HValue* global_object() { return OperandAt(0); } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(Random) +}; + + class HAdd: public HArithmeticBinaryOperation { public: HAdd(HValue* context, HValue* left, HValue* right) @@ -3138,6 +3173,8 @@ class HBitwise: public HBitwiseBinaryOperation { virtual bool IsCommutative() const { return true; } + virtual HValue* Canonicalize(); + static HInstruction* NewHBitwise(Zone* zone, Token::Value op, HValue* context, diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 9230870432..47dcc80536 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -628,7 +628,11 @@ HGraph::HGraph(CompilationInfo* info) Handle HGraph::Compile(CompilationInfo* info) { int values = GetMaximumValueID(); if (values > LAllocator::max_initial_value_ids()) { - if (FLAG_trace_bailout) PrintF("Function is too big\n"); + if (FLAG_trace_bailout) { + SmartArrayPointer name( + info->shared_info()->DebugName()->ToCString()); + PrintF("Function @\"%s\" is too big.\n", *name); + } return Handle::null(); } @@ -2301,7 +2305,7 @@ HGraph* HGraphBuilder::CreateGraph() { Bailout("function with illegal redeclaration"); return NULL; } - SetupScope(scope); + SetUpScope(scope); // Add an edge to the body entry. This is warty: the graph's start // environment will be used by the Lithium translation as the initial @@ -2465,7 +2469,7 @@ HInstruction* HGraphBuilder::PreProcessCall(HCall* call) { } -void HGraphBuilder::SetupScope(Scope* scope) { +void HGraphBuilder::SetUpScope(Scope* scope) { HConstant* undefined_constant = new(zone()) HConstant( isolate()->factory()->undefined_value(), Representation::Tagged()); AddInstruction(undefined_constant); @@ -3572,7 +3576,8 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object, bool smi_and_map_check) { if (smi_and_map_check) { AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(new(zone()) HCheckMap(object, type)); + AddInstruction(new(zone()) HCheckMap(object, type, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); } int index = ComputeStoredFieldIndex(type, name, lookup); @@ -4117,7 +4122,8 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object, bool smi_and_map_check) { if (smi_and_map_check) { AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(new(zone()) HCheckMap(object, type)); + AddInstruction(new(zone()) HCheckMap(object, type, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); } int index = lookup->GetLocalFieldIndexFromMap(*type); @@ -4157,7 +4163,8 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj, true); } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) { AddInstruction(new(zone()) HCheckNonSmi(obj)); - AddInstruction(new(zone()) HCheckMap(obj, map)); + AddInstruction(new(zone()) HCheckMap(obj, map, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); Handle function(lookup.GetConstantFunctionFromMap(*map)); return new(zone()) HConstant(function, Representation::Tagged()); } else { @@ -4652,7 +4659,8 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr, // its prototypes. if (smi_and_map_check) { AddInstruction(new(zone()) HCheckNonSmi(receiver)); - AddInstruction(new(zone()) HCheckMap(receiver, receiver_map)); + AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); } if (!expr->holder().is_null()) { AddInstruction(new(zone()) HCheckPrototypeMaps( @@ -5124,6 +5132,69 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr, return true; } break; + case kMathRandom: + if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) { + AddCheckConstantFunction(expr, receiver, receiver_map, true); + Drop(1); + HValue* context = environment()->LookupContext(); + HGlobalObject* global_object = new(zone()) HGlobalObject(context); + AddInstruction(global_object); + HRandom* result = new(zone()) HRandom(global_object); + ast_context()->ReturnInstruction(result, expr->id()); + return true; + } + break; + case kMathMax: + case kMathMin: + if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) { + AddCheckConstantFunction(expr, receiver, receiver_map, true); + HValue* right = Pop(); + HValue* left = Pop(); + // Do not inline if the return representation is not certain. + if (!left->representation().Equals(right->representation())) { + Push(left); + Push(right); + return false; + } + + Pop(); // Pop receiver. + Token::Value op = (id == kMathMin) ? Token::LT : Token::GT; + HCompareIDAndBranch* compare = NULL; + + if (left->representation().IsTagged()) { + HChange* left_cvt = + new(zone()) HChange(left, Representation::Double(), false, true); + left_cvt->SetFlag(HValue::kBailoutOnMinusZero); + AddInstruction(left_cvt); + HChange* right_cvt = + new(zone()) HChange(right, Representation::Double(), false, true); + right_cvt->SetFlag(HValue::kBailoutOnMinusZero); + AddInstruction(right_cvt); + compare = new(zone()) HCompareIDAndBranch(left_cvt, right_cvt, op); + compare->SetInputRepresentation(Representation::Double()); + } else { + compare = new(zone()) HCompareIDAndBranch(left, right, op); + compare->SetInputRepresentation(left->representation()); + } + + HBasicBlock* return_left = graph()->CreateBasicBlock(); + HBasicBlock* return_right = graph()->CreateBasicBlock(); + + compare->SetSuccessorAt(0, return_left); + compare->SetSuccessorAt(1, return_right); + current_block()->Finish(compare); + + set_current_block(return_left); + Push(left); + set_current_block(return_right); + Push(right); + + HBasicBlock* join = CreateJoin(return_left, return_right, expr->id()); + set_current_block(join); + ast_context()->ReturnValue(Pop()); + return true; + } + break; default: // Not yet supported for inlining. break; @@ -6195,9 +6266,11 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { Handle map = oracle()->GetCompareMap(expr); if (!map.is_null()) { AddInstruction(new(zone()) HCheckNonSmi(left)); - AddInstruction(new(zone()) HCheckMap(left, map)); + AddInstruction(new(zone()) HCheckMap(left, map, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); AddInstruction(new(zone()) HCheckNonSmi(right)); - AddInstruction(new(zone()) HCheckMap(right, map)); + AddInstruction(new(zone()) HCheckMap(right, map, NULL, + ALLOW_ELEMENT_TRANSITION_MAPS)); HCompareObjectEqAndBranch* result = new(zone()) HCompareObjectEqAndBranch(left, right); result->set_position(expr->position()); @@ -6569,7 +6642,11 @@ void HGraphBuilder::GenerateLog(CallRuntime* call) { // Fast support for Math.random(). void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { - return Bailout("inlined runtime function: RandomHeapNumber"); + HValue* context = environment()->LookupContext(); + HGlobalObject* global_object = new(zone()) HGlobalObject(context); + AddInstruction(global_object); + HRandom* result = new(zone()) HRandom(global_object); + return ast_context()->ReturnInstruction(result, call->id()); } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index ded1356d18..9705859066 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -870,7 +870,7 @@ class HGraphBuilder: public AstVisitor { Representation rep); static Representation ToRepresentation(TypeInfo info); - void SetupScope(Scope* scope); + void SetUpScope(Scope* scope); virtual void VisitStatements(ZoneList* statements); #define DECLARE_VISIT(type) virtual void Visit##type(type* node); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 7a5a191644..bb050b63f9 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -350,7 +350,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } #endif - // Setup buffer pointers. + // Set up buffer pointers. ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); @@ -377,7 +377,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. - // Setup code descriptor. + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); @@ -2457,7 +2457,7 @@ void Assembler::GrowBuffer() { V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); } - // Setup new buffer. + // Set up new buffer. desc.buffer = NewArray(desc.buffer_size); desc.instr_size = pc_offset(); desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos()); diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 55f66f1df8..4666311af6 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -333,7 +333,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ push(ebx); __ push(ebx); - // Setup pointer to last argument. + // Set up pointer to last argument. __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 9bc024f40d..b654390c2b 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -128,14 +128,14 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Get the function from the stack. __ mov(ecx, Operand(esp, 1 * kPointerSize)); - // Setup the object header. + // Set up the object header. Factory* factory = masm->isolate()->factory(); __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->function_context_map()); __ mov(FieldOperand(eax, Context::kLengthOffset), Immediate(Smi::FromInt(length))); - // Setup the fixed slots. + // Set up the fixed slots. __ Set(ebx, Immediate(0)); // Set to NULL. __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi); @@ -179,7 +179,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Get the serialized scope info from the stack. __ mov(ebx, Operand(esp, 2 * kPointerSize)); - // Setup the object header. + // Set up the object header. Factory* factory = masm->isolate()->factory(); __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->block_context_map()); @@ -202,7 +202,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX)); __ bind(&after_sentinel); - // Setup the fixed slots. + // Set up the fixed slots. __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx); __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi); __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx); @@ -3379,7 +3379,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(FieldOperand(eax, i), edx); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ mov(edx, Operand(esp, 4 * kPointerSize)); __ mov(FieldOperand(eax, JSObject::kHeaderSize + @@ -3392,7 +3392,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize), ecx); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, edi will point there, otherwise to the // backing store. __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); @@ -3571,7 +3571,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ mov(edx, Operand(esp, 2 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict)); __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); @@ -4950,7 +4950,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, handler_entry, exit; Label not_outermost_js, not_outermost_js_2; - // Setup frame. + // Set up frame. __ push(ebp); __ mov(ebp, esp); @@ -5081,8 +5081,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { static const int kDeltaToCmpImmediate = 2; static const int kDeltaToMov = 8; static const int kDeltaToMovImmediate = 9; - static const int8_t kCmpEdiImmediateByte1 = BitCast(0x81); - static const int8_t kCmpEdiImmediateByte2 = BitCast(0xff); + static const int8_t kCmpEdiOperandByte1 = BitCast(0x3b); + static const int8_t kCmpEdiOperandByte2 = BitCast(0x3d); static const int8_t kMovEaxImmediateByte = BitCast(0xb8); ExternalReference roots_array_start = @@ -5147,12 +5147,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(scratch, Operand(esp, 0 * kPointerSize)); __ sub(scratch, Operand(esp, 1 * kPointerSize)); if (FLAG_debug_code) { - __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1); + __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1); __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)"); - __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2); + __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2); __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)"); } - __ mov(Operand(scratch, kDeltaToCmpImmediate), map); + __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate)); + __ mov(Operand(scratch, 0), map); } // Loop through the prototype chain of the object looking for the function @@ -6037,7 +6038,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, if (Serializer::enabled()) { ExternalReference roots_array_start = ExternalReference::roots_array_start(masm->isolate()); - __ mov(scratch, Immediate(Heap::kStringHashSeedRootIndex)); + __ mov(scratch, Immediate(Heap::kHashSeedRootIndex)); __ mov(scratch, Operand::StaticArray(scratch, times_pointer_size, roots_array_start)); @@ -6046,7 +6047,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, __ shl(scratch, 10); __ add(hash, scratch); } else { - int32_t seed = masm->isolate()->heap()->StringHashSeed(); + int32_t seed = masm->isolate()->heap()->HashSeed(); __ lea(scratch, Operand(character, seed)); __ shl(scratch, 10); __ lea(hash, Operand(scratch, character, times_1, seed)); @@ -6091,14 +6092,12 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, __ shl(scratch, 15); __ add(hash, scratch); - uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1; - __ and_(hash, kHashShiftCutOffMask); + __ and_(hash, String::kHashBitMask); // if (hash == 0) hash = 27; Label hash_not_zero; - __ test(hash, hash); __ j(not_zero, &hash_not_zero, Label::kNear); - __ mov(hash, Immediate(27)); + __ mov(hash, Immediate(StringHasher::kZeroHash)); __ bind(&hash_not_zero); } diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc index 57e66df9e3..9eabb2a969 100644 --- a/deps/v8/src/ia32/cpu-ia32.cc +++ b/deps/v8/src/ia32/cpu-ia32.cc @@ -41,7 +41,7 @@ namespace v8 { namespace internal { -void CPU::Setup() { +void CPU::SetUp() { CpuFeatures::Probe(); } diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 98c240079e..292315d10c 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -406,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0] = input_; output_[0]->SetPc(reinterpret_cast(from_)); } else { - // Setup the frame pointer and the context pointer. + // Set up the frame pointer and the context pointer. // All OSR stack frames are dynamically aligned to an 8-byte boundary. int frame_pointer = input_->GetRegister(ebp.code()); if ((frame_pointer & 0x4) == 0) { diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 6e2391110b..4f3274436a 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset)); __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); - // Setup the four remaining stack slots. + // Set up the four remaining stack slots. __ push(eax); // Map. __ push(edx); // Enumeration cache. __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset)); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 137d62c554..8d412fdb53 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1831,7 +1831,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch( // Branches to a label or falls through with the answer in the z flag. Trashes -// the temp registers, but not the input. Only input and temp2 may alias. +// the temp registers, but not the input. void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, Handleclass_name, @@ -1839,7 +1839,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Register temp, Register temp2) { ASSERT(!input.is(temp)); - ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. + ASSERT(!input.is(temp2)); + ASSERT(!temp.is(temp2)); __ JumpIfSmi(input, is_false); if (class_name->IsEqualTo(CStrVector("Function"))) { @@ -1899,12 +1900,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Register input = ToRegister(instr->InputAt(0)); Register temp = ToRegister(instr->TempAt(0)); Register temp2 = ToRegister(instr->TempAt(1)); - if (input.is(temp)) { - // Swap. - Register swapper = temp; - temp = temp2; - temp2 = swapper; - } + Handle class_name = instr->hydrogen()->class_name(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -1979,7 +1975,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register map = ToRegister(instr->TempAt(0)); __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); __ bind(deferred->map_check()); // Label for calculating code patching. - __ cmp(map, factory()->the_hole_value()); // Patched to cached map. + Handle cache_cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map. __ j(not_equal, &cache_miss, Label::kNear); __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. __ jmp(&done); @@ -3016,6 +3014,29 @@ void LCodeGen::DoPower(LPower* instr) { } +void LCodeGen::DoRandom(LRandom* instr) { + // Having marked this instruction as a call we can use any + // registers. + ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); + ASSERT(ToRegister(instr->InputAt(0)).is(eax)); + + __ PrepareCallCFunction(1, ebx); + __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset)); + __ mov(Operand(esp, 0), eax); + __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); + + // Convert 32 random bits in eax to 0.(32 random bits) in a double + // by computing: + // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). + __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. + __ movd(xmm2, ebx); + __ movd(xmm1, eax); + __ cvtss2sd(xmm2, xmm2); + __ xorps(xmm1, xmm2); + __ subsd(xmm1, xmm2); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3678,8 +3699,10 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, + Register temp_reg, XMMRegister result_reg, bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, LEnvironment* env) { Label load_smi, done; @@ -3708,6 +3731,15 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } // Heap number to XMM conversion. __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + XMMRegister xmm_scratch = xmm0; + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(result_reg, xmm_scratch); + __ j(not_zero, &done, Label::kNear); + __ movmskpd(temp_reg, result_reg); + __ test_b(temp_reg, 1); + DeoptimizeIf(not_zero, env); + } __ jmp(&done, Label::kNear); // Smi to XMM conversion @@ -3830,14 +3862,23 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); + LOperand* temp = instr->TempAt(0); + ASSERT(temp == NULL || temp->IsRegister()); LOperand* result = instr->result(); ASSERT(result->IsDoubleRegister()); Register input_reg = ToRegister(input); XMMRegister result_reg = ToDoubleRegister(result); - EmitNumberUntagD(input_reg, result_reg, + bool deoptimize_on_minus_zero = + instr->hydrogen()->deoptimize_on_minus_zero(); + Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; + + EmitNumberUntagD(input_reg, + temp_reg, + result_reg, instr->hydrogen()->deoptimize_on_undefined(), + deoptimize_on_minus_zero, instr->environment()); } @@ -4033,13 +4074,23 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { } +void LCodeGen::DoCheckMapCommon(Register reg, + Handle map, + CompareMapMode mode, + LEnvironment* env) { + Label success; + __ CompareMap(reg, map, &success, mode); + DeoptimizeIf(not_equal, env); + __ bind(&success); +} + + void LCodeGen::DoCheckMap(LCheckMap* instr) { LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); Register reg = ToRegister(input); - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - instr->hydrogen()->map()); - DeoptimizeIf(not_equal, instr->environment()); + Handle map = instr->hydrogen()->map(); + DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment()); } @@ -4102,9 +4153,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { // Check prototype maps up to the holder. while (!current_prototype.is_identical_to(holder)) { - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - Handle(current_prototype->map())); - DeoptimizeIf(not_equal, instr->environment()); + DoCheckMapCommon(reg, Handle(current_prototype->map()), + ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); + current_prototype = Handle(JSObject::cast(current_prototype->GetPrototype())); // Load next prototype object. @@ -4112,9 +4163,8 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { } // Check the holder map. - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - Handle(current_prototype->map())); - DeoptimizeIf(not_equal, instr->environment()); + DoCheckMapCommon(reg, Handle(current_prototype->map()), + ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); } @@ -4139,7 +4189,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { DeoptimizeIf(not_equal, instr->environment()); } - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); @@ -4247,7 +4297,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { Handle constant_properties = instr->hydrogen()->constant_properties(); - // Setup the parameters to the stub/runtime call. + // Set up the parameters to the stub/runtime call. __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 375f137d7b..d86d48cd8c 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -110,6 +110,9 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); + void DoCheckMapCommon(Register reg, Handle map, + CompareMapMode mode, LEnvironment* env); + // Parallel move support. void DoParallelMove(LParallelMove* move); void DoGap(LGap* instr); @@ -265,8 +268,10 @@ class LCodeGen BASE_EMBEDDED { void EmitGoto(int block); void EmitBranch(int left_block, int right_block, Condition cc); void EmitNumberUntagD(Register input, + Register temp, XMMRegister result, bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, LEnvironment* env); // Emits optimized code for typeof x == "y". Modifies input register. @@ -380,7 +385,7 @@ class LDeferredCode: public ZoneObject { virtual void Generate() = 0; virtual LInstruction* instr() = 0; - void SetExit(Label *exit) { external_exit_ = exit; } + void SetExit(Label* exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } int instruction_index() const { return instruction_index_; } diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 5cd276f300..8435a3c2a1 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1047,22 +1047,31 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* v = instr->value(); - if (v->EmitAtUses()) { - ASSERT(v->IsConstant()); - ASSERT(!v->representation().IsDouble()); - HBasicBlock* successor = HConstant::cast(v)->ToBoolean() + HValue* value = instr->value(); + if (value->EmitAtUses()) { + ASSERT(value->IsConstant()); + ASSERT(!value->representation().IsDouble()); + HBasicBlock* successor = HConstant::cast(value)->ToBoolean() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new(zone()) LGoto(successor->block_id()); } + + // Untagged integers or doubles, smis and booleans don't require a + // deoptimization environment nor a temp register. + Representation rep = value->representation(); + HType type = value->type(); + if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) { + return new(zone()) LBranch(UseRegister(value), NULL); + } + ToBooleanStub::Types expected = instr->expected_input_types(); // We need a temporary register when we have to access the map *or* we have // no type info yet, in which case we handle all cases (including the ones // involving maps). bool needs_temp = expected.NeedsMap() || expected.IsEmpty(); LOperand* temp = needs_temp ? TempRegister() : NULL; - return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp)); + return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp)); } @@ -1388,7 +1397,11 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { temp = TempRegister(); } LMulI* mul = new(zone()) LMulI(left, right, temp); - return AssignEnvironment(DefineSameAsFirst(mul)); + if (instr->CheckFlag(HValue::kCanOverflow) || + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + AssignEnvironment(mul); + } + return DefineSameAsFirst(mul); } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); } else { @@ -1456,6 +1469,15 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { } +LInstruction* LChunkBuilder::DoRandom(HRandom* instr) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->global_object()->representation().IsTagged()); + LOperand* global_object = UseFixed(instr->global_object(), eax); + LRandom* result = new(zone()) LRandom(global_object); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr); +} + + LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); @@ -1588,9 +1610,9 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()), - TempRegister(), - TempRegister()); + return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()), + TempRegister(), + TempRegister()); } @@ -1616,7 +1638,7 @@ LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LOperand* object = UseRegister(instr->value()); LValueOf* result = new(zone()) LValueOf(object, TempRegister()); - return AssignEnvironment(DefineSameAsFirst(result)); + return DefineSameAsFirst(result); } @@ -1660,7 +1682,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { if (from.IsTagged()) { if (to.IsDouble()) { LOperand* value = UseRegister(instr->value()); - LNumberUntagD* res = new(zone()) LNumberUntagD(value); + // Temp register only necessary for minus zero check. + LOperand* temp = instr->deoptimize_on_minus_zero() + ? TempRegister() + : NULL; + LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp); return AssignEnvironment(DefineAsRegister(res)); } else { ASSERT(to.IsInteger32()); @@ -1956,7 +1982,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement( LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); - return AssignEnvironment(DefineAsRegister(result)); + if (instr->RequiresHoleCheck()) AssignEnvironment(result); + return DefineAsRegister(result); } diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index cc3762d7ab..67bf9376c7 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -135,6 +135,7 @@ class LCodeGen; V(OuterContext) \ V(Parameter) \ V(Power) \ + V(Random) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -1043,6 +1044,17 @@ class LPower: public LTemplateInstruction<1, 2, 0> { }; +class LRandom: public LTemplateInstruction<1, 1, 0> { + public: + explicit LRandom(LOperand* global_object) { + inputs_[0] = global_object; + } + + DECLARE_CONCRETE_INSTRUCTION(Random, "random") + DECLARE_HYDROGEN_ACCESSOR(Random) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) @@ -1612,10 +1624,11 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> { }; -class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { +class LNumberUntagD: public LTemplateInstruction<1, 1, 1> { public: - explicit LNumberUntagD(LOperand* value) { + explicit LNumberUntagD(LOperand* value, LOperand* temp) { inputs_[0] = value; + temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 3356e81892..43f265cf01 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -487,15 +487,48 @@ void MacroAssembler::StoreNumberToDoubleElements( } +void MacroAssembler::CompareMap(Register obj, + Handle map, + Label* early_success, + CompareMapMode mode) { + cmp(FieldOperand(obj, HeapObject::kMapOffset), map); + if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { + Map* transitioned_fast_element_map( + map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); + ASSERT(transitioned_fast_element_map == NULL || + map->elements_kind() != FAST_ELEMENTS); + if (transitioned_fast_element_map != NULL) { + j(equal, early_success, Label::kNear); + cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle(transitioned_fast_element_map)); + } + + Map* transitioned_double_map( + map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); + ASSERT(transitioned_double_map == NULL || + map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); + if (transitioned_double_map != NULL) { + j(equal, early_success, Label::kNear); + cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle(transitioned_double_map)); + } + } +} + + void MacroAssembler::CheckMap(Register obj, Handle map, Label* fail, - SmiCheckType smi_check_type) { + SmiCheckType smi_check_type, + CompareMapMode mode) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } - cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map)); + + Label success; + CompareMap(obj, map, &success, mode); j(not_equal, fail); + bind(&success); } @@ -616,7 +649,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFramePrologue() { - // Setup the frame structure on the stack. + // Set up the frame structure on the stack. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); @@ -668,7 +701,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { void MacroAssembler::EnterExitFrame(bool save_doubles) { EnterExitFramePrologue(); - // Setup argc and argv in callee-saved registers. + // Set up argc and argv in callee-saved registers. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; mov(edi, eax); lea(esi, Operand(ebp, eax, times_4, offset)); @@ -959,6 +992,50 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +// Compute the hash code from the untagged key. This must be kept in sync +// with ComputeIntegerHash in utils.h. +// +// Note: r0 will contain hash code +void MacroAssembler::GetNumberHash(Register r0, Register scratch) { + // Xor original key with a seed. + if (Serializer::enabled()) { + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(scratch, Immediate(Heap::kHashSeedRootIndex)); + mov(scratch, + Operand::StaticArray(scratch, times_pointer_size, roots_array_start)); + SmiUntag(scratch); + xor_(r0, scratch); + } else { + int32_t seed = isolate()->heap()->HashSeed(); + xor_(r0, Immediate(seed)); + } + + // hash = ~hash + (hash << 15); + mov(scratch, r0); + not_(r0); + shl(scratch, 15); + add(r0, scratch); + // hash = hash ^ (hash >> 12); + mov(scratch, r0); + shr(scratch, 12); + xor_(r0, scratch); + // hash = hash + (hash << 2); + lea(r0, Operand(r0, r0, times_4, 0)); + // hash = hash ^ (hash >> 4); + mov(scratch, r0); + shr(scratch, 4); + xor_(r0, scratch); + // hash = hash * 2057; + imul(r0, r0, 2057); + // hash = hash ^ (hash >> 16); + mov(scratch, r0); + shr(scratch, 16); + xor_(r0, scratch); +} + + + void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements, Register key, @@ -984,33 +1061,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, Label done; - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - mov(r1, r0); - not_(r0); - shl(r1, 15); - add(r0, r1); - // hash = hash ^ (hash >> 12); - mov(r1, r0); - shr(r1, 12); - xor_(r0, r1); - // hash = hash + (hash << 2); - lea(r0, Operand(r0, r0, times_4, 0)); - // hash = hash ^ (hash >> 4); - mov(r1, r0); - shr(r1, 4); - xor_(r0, r1); - // hash = hash * 2057; - imul(r0, r0, 2057); - // hash = hash ^ (hash >> 16); - mov(r1, r0); - shr(r1, 16); - xor_(r0, r1); + GetNumberHash(r0, r1); // Compute capacity mask. - mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); + mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset)); shr(r1, kSmiTagSize); // convert smi to int dec(r1); @@ -1021,19 +1075,19 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r2, r0); // Compute the masked index: (hash + i + i * i) & mask. if (i > 0) { - add(r2, Immediate(NumberDictionary::GetProbeOffset(i))); + add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); } and_(r2, r1); // Scale the index by multiplying by the entry size. - ASSERT(NumberDictionary::kEntrySize == 3); + ASSERT(SeededNumberDictionary::kEntrySize == 3); lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 // Check if the key matches. cmp(key, FieldOperand(elements, r2, times_pointer_size, - NumberDictionary::kElementsStartOffset)); + SeededNumberDictionary::kElementsStartOffset)); if (i != (kProbes - 1)) { j(equal, &done); } else { @@ -1044,7 +1098,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, bind(&done); // Check that the value is a normal propety. const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; ASSERT_EQ(NORMAL, 0); test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); @@ -1052,7 +1106,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, // Get the value at the masked, scaled index. const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; + SeededNumberDictionary::kElementsStartOffset + kPointerSize; mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); } diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index e6a17417d8..c969a6f71b 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -251,7 +251,7 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in ecx. The method takes ecx as an + // Set up call kind marking in ecx. The method takes ecx as an // explicit first parameter to make the code more readable at the // call sites. void SetCallKind(Register dst, CallKind kind); @@ -356,13 +356,24 @@ class MacroAssembler: public Assembler { Label* fail, bool specialize_for_processor); + // Compare an object's map with the specified map and its transitioned + // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with + // result of map compare. If multiple map compares are required, the compare + // sequences branches to early_success. + void CompareMap(Register obj, + Handle map, + Label* early_success, + CompareMapMode mode = REQUIRE_EXACT_MAP); + // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a - // heap object) + // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match + // against maps that are ElementsKind transition maps of the specificed map. void CheckMap(Register obj, Handle map, Label* fail, - SmiCheckType smi_check_type); + SmiCheckType smi_check_type, + CompareMapMode mode = REQUIRE_EXACT_MAP); // Check if the map of an object is equal to a specified map and branch to a // specified target if equal. Skip the smi check if not required (object is @@ -486,6 +497,7 @@ class MacroAssembler: public Assembler { Register scratch, Label* miss); + void GetNumberHash(Register r0, Register scratch); void LoadFromNumberDictionary(Label* miss, Register elements, diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index c27a60fd06..0da51c857e 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -695,13 +695,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Register name_reg, Register scratch, Label* miss_label) { - // Check that the object isn't a smi. - __ JumpIfSmi(receiver_reg, miss_label); - // Check that the map of the object hasn't changed. - __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset), - Immediate(Handle(object->map()))); - __ j(not_equal, miss_label); + __ CheckMap(receiver_reg, Handle(object->map()), + miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -878,13 +874,10 @@ Register StubCompiler::CheckPrototypes(Handle object, if (in_new_space) { // Save the map in scratch1 for later. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); - __ cmp(scratch1, Immediate(current_map)); - } else { - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - Immediate(current_map)); } - // Branch on the result of the map check. - __ j(not_equal, miss); + __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. @@ -916,9 +909,8 @@ Register StubCompiler::CheckPrototypes(Handle object, LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); // Check the holder map. - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - Immediate(Handle(holder->map()))); - __ j(not_equal, miss); + __ CheckMap(reg, Handle(holder->map()), + miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -2338,7 +2330,7 @@ Handle CallStubCompiler::CompileCallGlobal( __ mov(Operand(esp, (argc + 1) * kPointerSize), edx); } - // Setup the context (function already in edi). + // Set up the context (function already in edi). __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Jump to the cached code (tail call). @@ -2403,13 +2395,9 @@ Handle StoreStubCompiler::CompileStoreCallback( // ----------------------------------- Label miss; - // Check that the object isn't a smi. - __ JumpIfSmi(edx, &miss); - // Check that the map of the object hasn't changed. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(Handle(object->map()))); - __ j(not_equal, &miss); + __ CheckMap(edx, Handle(object->map()), + &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -2453,13 +2441,9 @@ Handle StoreStubCompiler::CompileStoreInterceptor( // ----------------------------------- Label miss; - // Check that the object isn't a smi. - __ JumpIfSmi(edx, &miss); - // Check that the map of the object hasn't changed. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(Handle(receiver->map()))); - __ j(not_equal, &miss); + __ CheckMap(edx, Handle(receiver->map()), + &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. if (receiver->IsJSGlobalProxy()) { diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 624ecd7f58..9024605da5 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -1267,7 +1267,8 @@ MaybeObject* StoreIC::Store(State state, // Check if the given name is an array index. uint32_t index; if (name->AsArrayIndex(&index)) { - Handle result = SetElement(receiver, index, value, strict_mode); + Handle result = + JSObject::SetElement(receiver, index, value, strict_mode); RETURN_IF_EMPTY_HANDLE(isolate(), result); return *value; } @@ -1644,7 +1645,8 @@ MaybeObject* KeyedStoreIC::Store(State state, // Check if the given name is an array index. uint32_t index; if (name->AsArrayIndex(&index)) { - Handle result = SetElement(receiver, index, value, strict_mode); + Handle result = + JSObject::SetElement(receiver, index, value, strict_mode); RETURN_IF_EMPTY_HANDLE(isolate(), result); return *value; } diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc index 8fca305057..f6d5a5963d 100644 --- a/deps/v8/src/incremental-marking.cc +++ b/deps/v8/src/incremental-marking.cc @@ -418,7 +418,7 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) { static void PatchIncrementalMarkingRecordWriteStubs( Heap* heap, RecordWriteStub::Mode mode) { - NumberDictionary* stubs = heap->code_stubs(); + UnseededNumberDictionary* stubs = heap->code_stubs(); int capacity = stubs->Capacity(); for (int i = 0; i < capacity; i++) { diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h index 25def87068..4f8fa6b127 100644 --- a/deps/v8/src/incremental-marking.h +++ b/deps/v8/src/incremental-marking.h @@ -56,6 +56,7 @@ class IncrementalMarking { } bool should_hurry() { return should_hurry_; } + void set_should_hurry(bool val) { should_hurry_ = val; } inline bool IsStopped() { return state() == STOPPED; } @@ -219,10 +220,6 @@ class IncrementalMarking { void UncommitMarkingDeque(); private: - void set_should_hurry(bool val) { - should_hurry_ = val; - } - int64_t SpaceLeftInOldSpace(); void ResetStepCounters(); diff --git a/deps/v8/src/inspector.cc b/deps/v8/src/inspector.cc index 8fb80f1a22..833d338439 100644 --- a/deps/v8/src/inspector.cc +++ b/deps/v8/src/inspector.cc @@ -38,11 +38,11 @@ namespace internal { //============================================================================ // The Inspector. -void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) { +void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) { // Dump the object pointer. OS::FPrint(out, "%p:", reinterpret_cast(obj)); if (obj->IsHeapObject()) { - HeapObject *hobj = HeapObject::cast(obj); + HeapObject* hobj = HeapObject::cast(obj); OS::FPrint(out, " size %d :", hobj->Size()); } diff --git a/deps/v8/src/inspector.h b/deps/v8/src/inspector.h index e328bcdfa5..6962e21f4f 100644 --- a/deps/v8/src/inspector.h +++ b/deps/v8/src/inspector.h @@ -41,14 +41,14 @@ namespace internal { class Inspector { public: - static void DumpObjectType(FILE* out, Object *obj, bool print_more); - static void DumpObjectType(FILE* out, Object *obj) { + static void DumpObjectType(FILE* out, Object* obj, bool print_more); + static void DumpObjectType(FILE* out, Object* obj) { DumpObjectType(out, obj, false); } - static void DumpObjectType(Object *obj, bool print_more) { + static void DumpObjectType(Object* obj, bool print_more) { DumpObjectType(stdout, obj, print_more); } - static void DumpObjectType(Object *obj) { + static void DumpObjectType(Object* obj) { DumpObjectType(stdout, obj, false); } }; diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index c235a23439..35e9e284f9 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -570,7 +570,7 @@ Handle Isolate::CaptureCurrentStackTrace( frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) { // Create a JSObject to hold the information for the StackFrame. - Handle stackFrame = factory()->NewJSObject(object_function()); + Handle stack_frame = factory()->NewJSObject(object_function()); Handle fun = frames[i].function(); Handle