diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 6d18b46c70..e68ee15fde 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -399,6 +399,15 @@ }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index d1e5b51a5e..97306a1659 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -3446,7 +3446,10 @@ class BoundsCheckBbData: public ZoneObject { // (either upper or lower; note that HasSingleCheck() becomes false). // Otherwise one of the current checks is modified so that it also covers // new_offset, and new_check is removed. - void CoverCheck(HBoundsCheck* new_check, + // + // If the check cannot be modified because the context is unknown it + // returns false, otherwise it returns true. + bool CoverCheck(HBoundsCheck* new_check, int32_t new_offset) { ASSERT(new_check->index()->representation().IsInteger32()); bool keep_new_check = false; @@ -3457,12 +3460,13 @@ class BoundsCheckBbData: public ZoneObject { keep_new_check = true; upper_check_ = new_check; } else { - BuildOffsetAdd(upper_check_, - &added_upper_index_, - &added_upper_offset_, - Key()->IndexBase(), - new_check->index()->representation(), - new_offset); + bool result = BuildOffsetAdd(upper_check_, + &added_upper_index_, + &added_upper_offset_, + Key()->IndexBase(), + new_check->index()->representation(), + new_offset); + if (!result) return false; upper_check_->SetOperandAt(0, added_upper_index_); } } else if (new_offset < lower_offset_) { @@ -3471,12 +3475,13 @@ class BoundsCheckBbData: public ZoneObject { keep_new_check = true; lower_check_ = new_check; } else { - BuildOffsetAdd(lower_check_, - &added_lower_index_, - &added_lower_offset_, - Key()->IndexBase(), - new_check->index()->representation(), - new_offset); + bool result = BuildOffsetAdd(lower_check_, + &added_lower_index_, + &added_lower_offset_, + Key()->IndexBase(), + new_check->index()->representation(), + new_offset); + if (!result) return false; lower_check_->SetOperandAt(0, added_lower_index_); } } else { @@ -3486,6 +3491,8 @@ class BoundsCheckBbData: public ZoneObject { if (!keep_new_check) { new_check->DeleteAndReplaceWith(NULL); } + + return true; } void RemoveZeroOperations() { @@ -3528,20 +3535,34 @@ class BoundsCheckBbData: public ZoneObject { BoundsCheckBbData* next_in_bb_; BoundsCheckBbData* father_in_dt_; - void BuildOffsetAdd(HBoundsCheck* check, + // Given an existing add instruction and a bounds check it tries to + // find the current context (either of the add or of the check index). + HValue* IndexContext(HAdd* add, HBoundsCheck* check) { + if (add != NULL) { + return add->context(); + } + if (check->index()->IsBinaryOperation()) { + return HBinaryOperation::cast(check->index())->context(); + } + return NULL; + } + + // This function returns false if it cannot build the add because the + // current context cannot be determined. + bool BuildOffsetAdd(HBoundsCheck* check, HAdd** add, HConstant** constant, HValue* original_value, Representation representation, int32_t new_offset) { + HValue* index_context = IndexContext(*add, check); + if (index_context == NULL) return false; + HConstant* new_constant = new(BasicBlock()->zone()) HConstant(new_offset, Representation::Integer32()); if (*add == NULL) { new_constant->InsertBefore(check); - // Because of the bounds checks elimination algorithm, the index is always - // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation. - HValue* context = HBinaryOperation::cast(check->index())->context(); - *add = new(BasicBlock()->zone()) HAdd(context, + *add = new(BasicBlock()->zone()) HAdd(index_context, original_value, new_constant); (*add)->AssumeRepresentation(representation); @@ -3551,6 +3572,7 @@ class BoundsCheckBbData: public ZoneObject { (*constant)->DeleteAndReplaceWith(new_constant); } *constant = new_constant; + return true; } void RemoveZeroAdd(HAdd** add, HConstant** constant) { @@ -3625,9 +3647,11 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, *data_p = bb_data_list; } else if (data->OffsetIsCovered(offset)) { check->DeleteAndReplaceWith(NULL); - } else if (data->BasicBlock() == bb) { - data->CoverCheck(check, offset); - } else { + } else if (data->BasicBlock() != bb || + !data->CoverCheck(check, offset)) { + // If the check is in the current BB we try to modify it by calling + // "CoverCheck", but if also that fails we record the current offsets + // in a new data instance because from now on they are covered. int32_t new_lower_offset = offset < data->LowerOffset() ? offset : data->LowerOffset(); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 8cccaa5a74..dc20fefd65 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -2103,6 +2103,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { } +void Assembler::movmskps(Register dst, XMMRegister src) { + ASSERT(CpuFeatures::IsEnabled(SSE2)); + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0x50); + emit_sse_operand(dst, src); +} + + void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index b1f421ec86..ad805c1dc5 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -1015,6 +1015,7 @@ class Assembler : public AssemblerBase { void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); void movmskpd(Register dst, XMMRegister src); + void movmskps(Register dst, XMMRegister src); void cmpltsd(XMMRegister dst, XMMRegister src); void pcmpeqd(XMMRegister dst, XMMRegister src); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index da8e2ae457..a70ccbdd1d 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -1745,12 +1745,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Check result type if it is currently Int32. if (result_type_ <= BinaryOpIC::INT32) { - __ cvttsd2si(ecx, Operand(xmm0)); - __ cvtsi2sd(xmm2, ecx); - __ pcmpeqd(xmm2, xmm0); - __ movmskpd(ecx, xmm2); - __ test(ecx, Immediate(1)); - __ j(zero, ¬_int32); + FloatingPointHelper::CheckSSE2OperandIsInt32( + masm, ¬_int32, xmm0, ecx, xmm2); } BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); @@ -2858,9 +2854,11 @@ void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, __ cvttsd2si(scratch, Operand(operand)); __ cvtsi2sd(xmm_scratch, scratch); __ pcmpeqd(xmm_scratch, operand); - __ movmskpd(scratch, xmm_scratch); - __ test(scratch, Immediate(1)); - __ j(zero, non_int32); + __ movmskps(scratch, xmm_scratch); + // Two least significant bits should be both set. + __ not_(scratch); + __ test(scratch, Immediate(3)); + __ j(not_zero, non_int32); } diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index 1ac3b2eb23..9eb0d292c7 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -1040,6 +1040,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer, NameOfXMMRegister(regop), NameOfXMMRegister(rm)); data++; + } else if (f0byte == 0x50) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movmskps %s,%s", + NameOfCPURegister(regop), + NameOfXMMRegister(rm)); + data++; } else if ((f0byte & 0xF0) == 0x80) { data += JumpConditional(data, branch_hint); } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 077e782905..67db5b4944 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -4308,11 +4308,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache) void SharedFunctionInfo::BeforeVisitingPointers() { if (IsInobjectSlackTrackingInProgress()) DetachInitialMap(); +} + - // Flush optimized code map on major GC. - // Note: we may experiment with rebuilding it or retaining entries - // which should survive as we iterate through optimized functions - // anyway. +void SharedFunctionInfo::ClearOptimizedCodeMap() { set_optimized_code_map(Smi::FromInt(0)); } diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h index 4a9dab5caa..925b2562f2 100644 --- a/deps/v8/src/objects-visiting-inl.h +++ b/deps/v8/src/objects-visiting-inl.h @@ -299,6 +299,13 @@ void StaticMarkingVisitor::VisitSharedFunctionInfo( if (shared->ic_age() != heap->global_ic_age()) { shared->ResetForNewContext(heap->global_ic_age()); } + if (FLAG_cache_optimized_code) { + // Flush optimized code map on major GC. + // TODO(mstarzinger): We may experiment with rebuilding it or with + // retaining entries which should survive as we iterate through + // optimized functions anyway. + shared->ClearOptimizedCodeMap(); + } MarkCompactCollector* collector = heap->mark_compact_collector(); if (collector->is_code_flushing_enabled()) { if (IsFlushable(heap, shared)) { diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index fcc2efad31..885a5dd8b2 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -8073,11 +8073,6 @@ bool SharedFunctionInfo::CompileLazy(Handle shared, } -void SharedFunctionInfo::ClearOptimizedCodeMap() { - set_optimized_code_map(Smi::FromInt(0)); -} - - void SharedFunctionInfo::AddToOptimizedCodeMap( Handle shared, Handle native_context, diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 701712be32..6c9b31b937 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -5444,7 +5444,7 @@ class SharedFunctionInfo: public HeapObject { void InstallFromOptimizedCodeMap(JSFunction* function, int index); // Clear optimized code map. - void ClearOptimizedCodeMap(); + inline void ClearOptimizedCodeMap(); // Add a new entry to the optimized code map. static void AddToOptimizedCodeMap(Handle shared, diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 61cf9ea271..aa55c1b86b 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -35,7 +35,7 @@ #define MAJOR_VERSION 3 #define MINOR_VERSION 15 #define BUILD_NUMBER 11 -#define PATCH_LEVEL 0 +#define PATCH_LEVEL 5 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 370cb02a36..f136b65591 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -2951,6 +2951,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { } +void Assembler::movmskps(Register dst, XMMRegister src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0f); + emit(0x50); + emit_sse_operand(dst, src); +} + + void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { Register ireg = { reg.code() }; emit_operand(ireg, adr); diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 24c8df368f..beb695673d 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -1388,6 +1388,7 @@ class Assembler : public AssemblerBase { void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); void movmskpd(Register dst, XMMRegister src); + void movmskps(Register dst, XMMRegister src); // The first argument is the reg field, the second argument is the r/m field. void emit_sse_operand(XMMRegister dst, XMMRegister src); diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index c8606c40b2..fb0914d7d0 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1244,6 +1244,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); + } else if (opcode == 0x50) { + // movmskps reg, xmm + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop)); + current += PrintRightXMMOperand(current); + } else if ((opcode & 0xF0) == 0x80) { // Jcc: Conditional jump (branch). current = data + JumpConditional(data); @@ -1724,6 +1731,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector out_buffer, data += F6F7Instruction(data); break; + case 0x3C: + AppendToBuffer("cmp al, 0x%x", *reinterpret_cast(data + 1)); + data +=2; + break; + default: UnimplementedInstruction(); data += 1; diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index c69b27c445..bb91813dd4 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -1535,17 +1535,17 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { if (right->IsConstantOperand()) { Immediate right_imm = Immediate(ToInteger32(LConstantOperand::cast(right))); - __ cmpq(left_reg, right_imm); + __ cmpl(left_reg, right_imm); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_imm); } else if (right->IsRegister()) { Register right_reg = ToRegister(right); - __ cmpq(left_reg, right_reg); + __ cmpl(left_reg, right_reg); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_reg); } else { Operand right_op = ToOperand(right); - __ cmpq(left_reg, right_op); + __ cmpl(left_reg, right_op); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_op); } diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js index df7988bdaa..7a7cb304de 100644 --- a/deps/v8/test/mjsunit/array-bounds-check-removal.js +++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js @@ -178,5 +178,29 @@ short_test(a, 0); assertTrue(%GetOptimizationStatus(short_test) != 1); +// A test for when we would modify a phi index. +var data_phi = [0, 1, 2, 3, 4, 5, 6, 7, 8]; +function test_phi(a, base, check) { + var index; + if (check) { + index = base + 1; + } else { + index = base + 2; + } + var result = a[index]; + result += a[index + 1]; + result += a[index - 1]; + return result; +} +var result_phi = 0; +result_phi = test_phi(data_phi, 3, true); +assertEquals(12, result_phi); +result_phi = test_phi(data_phi, 3, true); +assertEquals(12, result_phi); +%OptimizeFunctionOnNextCall(test_phi); +result_phi = test_phi(data_phi, 3, true); +assertEquals(12, result_phi); + + gc(); diff --git a/deps/v8/test/mjsunit/regress/regress-164442.js b/deps/v8/test/mjsunit/regress/regress-164442.js new file mode 100644 index 0000000000..1160d874f5 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-164442.js @@ -0,0 +1,45 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Should not take a very long time (n^2 algorithms are bad) + + +function ensureNotNegative(x) { + return Math.max(0, x | 0); +} + + +ensureNotNegative(1); +ensureNotNegative(2); + +%OptimizeFunctionOnNextCall(ensureNotNegative); + +var r = ensureNotNegative(-1); + +assertEquals(0, r); diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py index b4b79aada4..0acb658c53 100644 --- a/deps/v8/tools/gen-postmortem-metadata.py +++ b/deps/v8/tools/gen-postmortem-metadata.py @@ -83,6 +83,8 @@ consts_misc = [ 'value': 'DescriptorArray::kFirstIndex' }, { 'name': 'prop_type_field', 'value': 'FIELD' }, + { 'name': 'prop_type_first_phantom', + 'value': 'TRANSITION' }, { 'name': 'prop_type_mask', 'value': 'PropertyDetails::TypeField::kMask' }, diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html new file mode 100644 index 0000000000..bc9f636cb7 --- /dev/null +++ b/deps/v8/tools/tick-processor.html @@ -0,0 +1,168 @@ + + + + + + + V8 Tick Processor + + + + + + + + + + + + + + + + +

+ Chrome V8 profiling log processor +

+

+Process V8's profiling information log (sampling profiler tick information) +in your browser. Particularly useful if you don't have the V8 shell (d8) +at hand on your system. You still have to run Chrome with the appropriate + + command line flags +to produce the profiling log. +

+

Usage:

+

+Click on the button and browse to the profiling log file (usually, v8.log). +Process will start automatically and the output will be visible in the below +text area. +

+

Limitations and disclaimer:

+

+This page offers a subset of the functionalities of the command-line tick +processor utility in the V8 repository. In particular, this page cannot +access the command-line utility that provides library symbol information, +hence the [C++] section of the output stays empty. Also consider that this +web-based tool is provided only for convenience and quick reference, you +should refer to the + + command-line +version for full output. +

+

+ +

+

+ +

+

+Copyright the V8 Authors - Last change to this page: 12/12/2012 +

+ + + +