From a0702b54d1db35a6006644882c0b5420d8670958 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 9 Feb 2011 10:24:26 -0800 Subject: [PATCH] Upgrade V8 to 3.1.2 --- deps/v8/.gitignore | 1 + deps/v8/AUTHORS | 3 +- deps/v8/ChangeLog | 12 + .../strongtalk/LICENSE => LICENSE.strongtalk} | 10 +- deps/v8/LICENSE.v8 | 26 + deps/v8/LICENSE.valgrind | 45 ++ deps/v8/SConstruct | 2 +- deps/v8/src/arm/assembler-arm.cc | 16 +- deps/v8/src/arm/assembler-arm.h | 18 +- deps/v8/src/arm/code-stubs-arm.cc | 446 ++++++++++++---- deps/v8/src/arm/code-stubs-arm.h | 39 ++ deps/v8/src/arm/codegen-arm.cc | 12 +- deps/v8/src/arm/codegen-arm.h | 1 + deps/v8/src/arm/constants-arm.h | 24 +- deps/v8/src/arm/deoptimizer-arm.cc | 2 +- deps/v8/src/arm/full-codegen-arm.cc | 10 +- deps/v8/src/arm/ic-arm.cc | 25 +- deps/v8/src/arm/lithium-arm.cc | 117 ++++- deps/v8/src/arm/lithium-arm.h | 243 ++++----- deps/v8/src/arm/lithium-codegen-arm.cc | 196 +++++-- deps/v8/src/arm/lithium-codegen-arm.h | 5 + deps/v8/src/arm/macro-assembler-arm.cc | 158 +++++- deps/v8/src/arm/macro-assembler-arm.h | 36 +- deps/v8/src/arm/simulator-arm.cc | 213 +++++--- deps/v8/src/arm/simulator-arm.h | 8 +- deps/v8/src/arm/stub-cache-arm.cc | 230 +++++--- deps/v8/src/array.js | 8 +- deps/v8/src/assembler.cc | 12 +- deps/v8/src/assembler.h | 35 +- deps/v8/src/code-stubs.h | 3 +- deps/v8/src/codegen-inl.h | 4 + deps/v8/src/compilation-cache.cc | 21 +- deps/v8/src/compilation-cache.h | 3 +- deps/v8/src/compiler.cc | 14 +- deps/v8/src/compiler.h | 13 +- deps/v8/src/conversions.cc | 78 ++- deps/v8/src/deoptimizer.cc | 4 +- deps/v8/src/disassembler.cc | 4 +- deps/v8/src/extensions/gc-extension.cc | 6 +- deps/v8/src/full-codegen.cc | 2 +- deps/v8/src/full-codegen.h | 3 + deps/v8/src/handles.cc | 2 +- deps/v8/src/heap-profiler.cc | 3 +- deps/v8/src/heap.cc | 11 + deps/v8/src/heap.h | 11 +- deps/v8/src/hydrogen-instructions.cc | 10 +- deps/v8/src/hydrogen-instructions.h | 114 +++- deps/v8/src/hydrogen.cc | 92 ++-- deps/v8/src/hydrogen.h | 2 + deps/v8/src/ia32/code-stubs-ia32.cc | 48 ++ deps/v8/src/ia32/code-stubs-ia32.h | 19 + deps/v8/src/ia32/codegen-ia32.cc | 10 +- deps/v8/src/ia32/codegen-ia32.h | 1 + deps/v8/src/ia32/deoptimizer-ia32.cc | 115 ++-- deps/v8/src/ia32/full-codegen-ia32.cc | 153 +++--- deps/v8/src/ia32/ic-ia32.cc | 22 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 45 +- deps/v8/src/ia32/lithium-ia32.cc | 98 +++- deps/v8/src/ia32/lithium-ia32.h | 238 ++++----- deps/v8/src/ia32/macro-assembler-ia32.cc | 20 +- deps/v8/src/ia32/macro-assembler-ia32.h | 11 + deps/v8/src/ia32/stub-cache-ia32.cc | 31 ++ deps/v8/src/ic.cc | 48 +- deps/v8/src/lithium-allocator-inl.h | 140 +++++ deps/v8/src/lithium-allocator.cc | 171 ++---- deps/v8/src/lithium-allocator.h | 118 ++--- deps/v8/src/lithium.h | 76 +++ deps/v8/src/messages.js | 9 +- deps/v8/src/objects-inl.h | 28 +- deps/v8/src/objects.cc | 55 +- deps/v8/src/objects.h | 23 +- deps/v8/src/parser.cc | 126 ++++- deps/v8/src/parser.h | 8 +- deps/v8/src/preparser.cc | 31 +- deps/v8/src/preparser.h | 2 + deps/v8/src/prettyprinter.cc | 25 +- deps/v8/src/runtime.cc | 192 ++++++- deps/v8/src/runtime.h | 4 +- deps/v8/src/safepoint-table.cc | 22 +- deps/v8/src/safepoint-table.h | 11 +- deps/v8/src/scanner-base.cc | 61 ++- deps/v8/src/scanner-base.h | 9 +- deps/v8/src/scanner.cc | 19 +- deps/v8/src/scanner.h | 9 + deps/v8/src/scopes.cc | 3 +- deps/v8/src/stub-cache.cc | 33 ++ deps/v8/src/stub-cache.h | 4 + .../third_party/strongtalk/README.chromium | 18 - deps/v8/src/token.h | 37 +- deps/v8/src/top.h | 2 +- deps/v8/src/type-info.h | 6 +- deps/v8/src/uri.js | 3 +- deps/v8/src/v8globals.h | 6 + deps/v8/src/v8natives.js | 146 +++-- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/assembler-x64.cc | 27 + deps/v8/src/x64/assembler-x64.h | 32 +- deps/v8/src/x64/code-stubs-x64.cc | 497 +++++++++++++++--- deps/v8/src/x64/code-stubs-x64.h | 24 + deps/v8/src/x64/codegen-x64.cc | 10 +- deps/v8/src/x64/codegen-x64.h | 1 + deps/v8/src/x64/deoptimizer-x64.cc | 147 ++++-- deps/v8/src/x64/disasm-x64.cc | 2 + deps/v8/src/x64/frames-x64.h | 5 +- deps/v8/src/x64/full-codegen-x64.cc | 41 +- deps/v8/src/x64/ic-x64.cc | 26 +- deps/v8/src/x64/lithium-codegen-x64.cc | 409 ++++++++++++-- deps/v8/src/x64/lithium-x64.cc | 237 ++++++--- deps/v8/src/x64/lithium-x64.h | 65 ++- deps/v8/src/x64/macro-assembler-x64.cc | 4 +- deps/v8/src/x64/macro-assembler-x64.h | 29 +- deps/v8/src/x64/stub-cache-x64.cc | 29 + deps/v8/test/cctest/cctest.status | 2 +- deps/v8/test/cctest/test-api.cc | 329 +++++++++++- deps/v8/test/cctest/test-assembler-arm.cc | 216 ++++++-- deps/v8/test/cctest/test-bignum-dtoa.cc | 4 +- deps/v8/test/cctest/test-dtoa.cc | 2 +- deps/v8/test/cctest/test-fast-dtoa.cc | 2 +- deps/v8/test/es5conform/es5conform.status | 220 +++++++- .../mjsunit/get-own-property-descriptor.js | 16 + deps/v8/test/mjsunit/mjsunit.status | 4 + .../v8/test/mjsunit/object-define-property.js | 137 ++++- deps/v8/test/mjsunit/regress/regress-1083.js | 38 ++ deps/v8/test/mjsunit/regress/regress-1092.js | 35 ++ deps/v8/test/mjsunit/regress/regress-1099.js | 46 ++ .../v8/test/mjsunit/regress/regress-900966.js | 2 + deps/v8/test/mjsunit/regress/regress-992.js | 43 ++ .../test/mjsunit/regress/regress-deopt-gc.js | 49 ++ deps/v8/test/mjsunit/strict-mode-eval.js | 82 +++ deps/v8/test/mjsunit/strict-mode.js | 65 +++ deps/v8/test/mozilla/mozilla.status | 15 +- deps/v8/tools/gyp/v8.gyp | 1 + deps/v8/tools/visual_studio/v8_base.vcproj | 20 + .../v8/tools/visual_studio/v8_base_arm.vcproj | 4 + .../v8/tools/visual_studio/v8_base_x64.vcproj | 116 +++- 135 files changed, 5878 insertions(+), 1766 deletions(-) rename deps/v8/{src/third_party/strongtalk/LICENSE => LICENSE.strongtalk} (79%) create mode 100644 deps/v8/LICENSE.v8 create mode 100644 deps/v8/LICENSE.valgrind create mode 100644 deps/v8/src/lithium-allocator-inl.h delete mode 100644 deps/v8/src/third_party/strongtalk/README.chromium create mode 100644 deps/v8/test/mjsunit/regress/regress-1083.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1092.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1099.js create mode 100644 deps/v8/test/mjsunit/regress/regress-992.js create mode 100644 deps/v8/test/mjsunit/regress/regress-deopt-gc.js create mode 100644 deps/v8/test/mjsunit/strict-mode-eval.js diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index d85ef64d41..c68dadbe98 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -20,6 +20,7 @@ d8_g shell shell_g /obj/ +/test/sputnik/sputniktests/ /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/visual_studio/Debug diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index da864885ae..1b756caf27 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -26,6 +26,7 @@ Kun Zhang Matt Hanselman Martyn Capewell Michael Smith +Mike Gilbert Paolo Giarrusso Patrick Gansterer Rafal Krypa @@ -35,4 +36,4 @@ Ryan Dahl Sanjoy Das Subrato K De Vlad Burlik -Mike Gilbert +Zaheer Ahmad diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index f143a40cd2..d48ded840c 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,15 @@ +2011-02-07: Version 3.1.2 + + Added better security checks when accessing properties via + Object.getOwnPropertyDescriptor. + + Fixed bug in Object.defineProperty and related access bugs (issues + 992, 1083 and 1092). + + Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease + copyright notice generation for embedders. + + 2011-02-02: Version 3.1.1 Perform security checks before fetching the value in diff --git a/deps/v8/src/third_party/strongtalk/LICENSE b/deps/v8/LICENSE.strongtalk similarity index 79% rename from deps/v8/src/third_party/strongtalk/LICENSE rename to deps/v8/LICENSE.strongtalk index 7473a7b2b5..9bd62e4f23 100644 --- a/deps/v8/src/third_party/strongtalk/LICENSE +++ b/deps/v8/LICENSE.strongtalk @@ -6,15 +6,15 @@ modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +this list of conditions and the following disclaimer. - Redistribution in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. - Neither the name of Sun Microsystems or the names of contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. +be used to endorse or promote products derived from this software without +specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, diff --git a/deps/v8/LICENSE.v8 b/deps/v8/LICENSE.v8 new file mode 100644 index 0000000000..933718a9ef --- /dev/null +++ b/deps/v8/LICENSE.v8 @@ -0,0 +1,26 @@ +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/v8/LICENSE.valgrind b/deps/v8/LICENSE.valgrind new file mode 100644 index 0000000000..fd8ebaf509 --- /dev/null +++ b/deps/v8/LICENSE.valgrind @@ -0,0 +1,45 @@ +---------------------------------------------------------------- + +Notice that the following BSD-style license applies to this one +file (valgrind.h) only. The rest of Valgrind is licensed under the +terms of the GNU General Public License, version 2, unless +otherwise indicated. See the COPYING file in the source +distribution for details. + +---------------------------------------------------------------- + +This file is part of Valgrind, a dynamic binary instrumentation +framework. + +Copyright (C) 2000-2007 Julian Seward. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + +4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index c9993991d5..bae1cd5e1d 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -136,7 +136,7 @@ LIBRARY_FLAGS = { 'gcc': { 'all': { 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], - 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'], + 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'], }, 'visibility:hidden': { # Use visibility=default to disable this. diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index ebf040d904..243ba4978a 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -2124,7 +2124,7 @@ static Instr EncodeVCVT(const VFPType dst_type, const int dst_code, const VFPType src_type, const int src_code, - Assembler::ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(src_type != dst_type); int D, Vd, M, Vm; @@ -2167,7 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type, void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); @@ -2176,7 +2176,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); @@ -2185,7 +2185,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst, void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); @@ -2194,7 +2194,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst, void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); @@ -2203,7 +2203,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst, void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); @@ -2212,7 +2212,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst, void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); @@ -2221,7 +2221,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst, void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode, + VFPConversionMode mode, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index a6edf66ec8..fc826c727e 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -942,37 +942,33 @@ class Assembler : public Malloced { void vmov(const Register dst, const SwVfpRegister src, const Condition cond = al); - enum ConversionMode { - FPSCRRounding = 0, - RoundToZero = 1 - }; void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, - ConversionMode mode = RoundToZero, + VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); void vabs(const DwVfpRegister dst, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 590d8ce15e..437dfd2733 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -396,6 +396,19 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2, Label* not_number); + + // Loads the number from object into dst as a 32-bit integer if possible. If + // the object is not a 32-bit integer control continues at the label + // not_int32. If VFP is supported double_scratch is used but not scratch2. + static void LoadNumberAsInteger(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + DwVfpRegister double_scratch, + Label* not_int32); + private: static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, @@ -461,15 +474,21 @@ void FloatingPointHelper::LoadOperands( void FloatingPointHelper::LoadNumber(MacroAssembler* masm, - Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number) { + Destination destination, + Register object, + DwVfpRegister dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number) { + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + Label is_smi, done; __ JumpIfSmi(object, &is_smi); @@ -514,6 +533,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, } +void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + DwVfpRegister double_scratch, + Label* not_int32) { + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + Label is_smi, done; + __ JumpIfSmi(object, &is_smi); + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); + __ cmp(scratch1, heap_number_map); + __ b(ne, not_int32); + __ ConvertToInt32( + object, dst, scratch1, scratch2, double_scratch, not_int32); + __ jmp(&done); + __ bind(&is_smi); + __ SmiUntag(dst, object); + __ bind(&done); +} + + + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1676,7 +1723,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); __ cmp(r4, heap_number_map); __ b(ne, &slow); - __ ConvertToInt32(lhs, r3, r5, r4, &slow); + __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow); __ jmp(&done_checking_lhs); __ bind(&lhs_is_smi); __ mov(r3, Operand(lhs, ASR, 1)); @@ -1687,7 +1734,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); __ cmp(r4, heap_number_map); __ b(ne, &slow); - __ ConvertToInt32(rhs, r2, r5, r4, &slow); + __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow); __ jmp(&done_checking_rhs); __ bind(&rhs_is_smi); __ mov(r2, Operand(rhs, ASR, 1)); @@ -2529,6 +2576,18 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( __ and_(right, left, Operand(scratch1)); __ Ret(); break; + case Token::BIT_OR: + __ orr(right, left, Operand(right)); + __ Ret(); + break; + case Token::BIT_AND: + __ and_(right, left, Operand(right)); + __ Ret(); + break; + case Token::BIT_XOR: + __ eor(right, left, Operand(right)); + __ Ret(); + break; default: UNREACHABLE(); } @@ -2545,90 +2604,179 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, Register scratch1 = r7; Register scratch2 = r9; - // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending - // on whether VFP3 is available. - FloatingPointHelper::Destination destination = - CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? - FloatingPointHelper::kVFPRegisters : - FloatingPointHelper::kCoreRegisters; + ASSERT(smi_operands || (not_numbers != NULL)); + if (smi_operands && FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } Register heap_number_map = r6; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - // Allocate new heap number for result. - Register result = r5; - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: { + // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 + // depending on whether VFP3 is available or not. + FloatingPointHelper::Destination destination = + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? + FloatingPointHelper::kVFPRegisters : + FloatingPointHelper::kCoreRegisters; + + // Allocate new heap number for result. + Register result = r5; + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + + // Load the operands. + if (smi_operands) { + FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); + } else { + FloatingPointHelper::LoadOperands(masm, + destination, + heap_number_map, + scratch1, + scratch2, + not_numbers); + } - // Load the operands. - if (smi_operands) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); - } - FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); - } else { - FloatingPointHelper::LoadOperands(masm, - destination, - heap_number_map, - scratch1, - scratch2, - not_numbers); - } + // Calculate the result. + if (destination == FloatingPointHelper::kVFPRegisters) { + // Using VFP registers: + // d6: Left value + // d7: Right value + CpuFeatures::Scope scope(VFP3); + switch (op_) { + case Token::ADD: + __ vadd(d5, d6, d7); + break; + case Token::SUB: + __ vsub(d5, d6, d7); + break; + case Token::MUL: + __ vmul(d5, d6, d7); + break; + case Token::DIV: + __ vdiv(d5, d6, d7); + break; + default: + UNREACHABLE(); + } - // Calculate the result. - if (destination == FloatingPointHelper::kVFPRegisters) { - // Using VFP registers: - // d6: Left value - // d7: Right value - CpuFeatures::Scope scope(VFP3); - switch (op_) { - case Token::ADD: - __ vadd(d5, d6, d7); - break; - case Token::SUB: - __ vsub(d5, d6, d7); - break; - case Token::MUL: - __ vmul(d5, d6, d7); - break; - case Token::DIV: - __ vdiv(d5, d6, d7); - break; - default: - UNREACHABLE(); - } + __ sub(r0, result, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ add(r0, r0, Operand(kHeapObjectTag)); + __ Ret(); + } else { + // Using core registers: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). - __ sub(r0, result, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ add(r0, r0, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Using core registers: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - - __ push(lr); // For later. - __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. - // Call C routine that may not cause GC or other trouble. r5 is callee - // save. - __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); - // Store answer in the overwritable heap number. + // Push the current return address before the C call. Return will be + // through pop(pc) below. + __ push(lr); + __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. + // Call C routine that may not cause GC or other trouble. r5 is callee + // save. + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); + // Store answer in the overwritable heap number. #if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as - // register cr8. Offsets must be divisible by 4 for coprocessor so we - // need to substract the tag from r5. - __ sub(scratch1, result, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from r5. + __ sub(scratch1, result, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); #else - // Double returned in registers 0 and 1. - __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); #endif - __ mov(r0, Operand(result)); - // And we are done. - __ pop(pc); + // Plase result in r0 and return to the pushed return address. + __ mov(r0, Operand(result)); + __ pop(pc); + } + break; + } + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: { + if (smi_operands) { + __ SmiUntag(r3, left); + __ SmiUntag(r2, right); + } else { + // Convert operands to 32-bit integers. Right in r2 and left in r3. + FloatingPointHelper::LoadNumberAsInteger(masm, + left, + r3, + heap_number_map, + scratch1, + scratch2, + d0, + not_numbers); + FloatingPointHelper::LoadNumberAsInteger(masm, + right, + r2, + heap_number_map, + scratch1, + scratch2, + d0, + not_numbers); + } + switch (op_) { + case Token::BIT_OR: + __ orr(r2, r3, Operand(r2)); + break; + case Token::BIT_XOR: + __ eor(r2, r3, Operand(r2)); + break; + case Token::BIT_AND: + __ and_(r2, r3, Operand(r2)); + break; + default: + UNREACHABLE(); + } + + Label result_not_a_smi; + // Check that the *signed* result fits in a smi. + __ add(r3, r2, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + __ SmiTag(r0, r2); + __ Ret(); + + // Allocate new heap number for result. + __ bind(&result_not_a_smi); + __ AllocateHeapNumber( + r5, scratch1, scratch2, heap_number_map, gc_required); + + // r2: Answer as signed int32. + // r5: Heap number to write answer into. + + // Nothing can go wrong now, so move the heap number to r0, which is the + // result. + __ mov(r0, Operand(r5)); + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r2); + __ vcvt_f64_s32(d0, s0); + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + break; + } + default: + UNREACHABLE(); } } @@ -2646,7 +2794,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, op_ == Token::SUB || op_ == Token::MUL || op_ == Token::DIV || - op_ == Token::MOD); + op_ == Token::MOD || + op_ == Token::BIT_OR || + op_ == Token::BIT_AND || + op_ == Token::BIT_XOR); Register left = r1; Register right = r0; @@ -2678,7 +2829,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { op_ == Token::SUB || op_ == Token::MUL || op_ == Token::DIV || - op_ == Token::MOD); + op_ == Token::MOD || + op_ == Token::BIT_OR || + op_ == Token::BIT_AND || + op_ == Token::BIT_XOR); if (result_type_ == TRBinaryOpIC::UNINITIALIZED || result_type_ == TRBinaryOpIC::SMI) { @@ -2714,7 +2868,10 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { op_ == Token::SUB || op_ == Token::MUL || op_ == Token::DIV || - op_ == Token::MOD); + op_ == Token::MOD || + op_ == Token::BIT_OR || + op_ == Token::BIT_AND || + op_ == Token::BIT_XOR); ASSERT(operands_type_ == TRBinaryOpIC::INT32); @@ -2727,7 +2884,10 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { op_ == Token::SUB || op_ == Token::MUL || op_ == Token::DIV || - op_ == Token::MOD); + op_ == Token::MOD || + op_ == Token::BIT_OR || + op_ == Token::BIT_AND || + op_ == Token::BIT_XOR); Label not_numbers, call_runtime; ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); @@ -2747,7 +2907,10 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { op_ == Token::SUB || op_ == Token::MUL || op_ == Token::DIV || - op_ == Token::MOD); + op_ == Token::MOD || + op_ == Token::BIT_OR || + op_ == Token::BIT_AND || + op_ == Token::BIT_XOR); Label call_runtime; @@ -2812,6 +2975,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { case Token::MOD: __ InvokeBuiltin(Builtins::MOD, JUMP_JS); break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); + break; default: UNREACHABLE(); } @@ -3037,7 +3209,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); // Convert the heap number is r0 to an untagged integer in r1. - __ ConvertToInt32(r0, r1, r2, r3, &slow); + __ ConvertToInt32(r0, r1, r2, r3, d0, &slow); // Do the bitwise operation (move negated) and check if the result // fits in a smi. @@ -3329,9 +3501,17 @@ void CEntryStub::Generate(MacroAssembler* masm) { // this by performing a garbage collection and retrying the // builtin once. + // Compute the argv pointer in a callee-saved register. + __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); + __ sub(r6, r6, Operand(kPointerSize)); + // Enter the exit frame that transitions from JavaScript to C++. __ EnterExitFrame(save_doubles_); + // Setup argc and the builtin function in callee-saved registers. + __ mov(r4, Operand(r0)); + __ mov(r5, Operand(r1)); + // r4: number of arguments (C callee-saved) // r5: pointer to builtin function (C callee-saved) // r6: pointer to first argument (C callee-saved) @@ -5734,6 +5914,90 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { } +void DirectCEntryStub::Generate(MacroAssembler* masm) { + __ ldr(pc, MemOperand(sp, 0)); +} + + +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + ApiFunction *function) { + __ mov(lr, Operand(reinterpret_cast(GetCode().location()), + RelocInfo::CODE_TARGET)); + // Push return address (accessible to GC through exit frame pc). + __ mov(r2, + Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); + __ str(pc, MemOperand(sp, 0)); + __ Jump(r2); // Call the api function. +} + + +void GenerateFastPixelArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register elements_map, + Register elements, + Register scratch1, + Register scratch2, + Register result, + Label* not_pixel_array, + Label* key_not_smi, + Label* out_of_range) { + // Register use: + // + // receiver - holds the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // elements - set to be the receiver's elements on exit. + // + // elements_map - set to be the map of the receiver's elements + // on exit. + // + // result - holds the result of the pixel array load on exit, + // tagged as a smi if successful. + // + // Scratch registers: + // + // scratch1 - used a scratch register in map check, if map + // check is successful, contains the length of the + // pixel array, the pointer to external elements and + // the untagged result. + // + // scratch2 - holds the untaged key. + + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + __ SmiUntag(scratch2, key); + + // Verify that the receiver has pixel array elements. + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex, + not_pixel_array, true); + + // Key must be in range of the pixel array. + __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(scratch2, scratch1); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + // Perform the indexed load and tag the result as a smi. + __ ldr(scratch1, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ ldrb(scratch1, MemOperand(scratch1, scratch2)); + __ SmiTag(r0, scratch1); + __ Ret(); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 9e9047e579..bf7d635487 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -571,6 +571,45 @@ class RegExpCEntryStub: public CodeStub { }; +// Trampoline stub to call into native code. To call safely into native code +// in the presence of compacting GC (which can move code objects) we need to +// keep the code which called into native pinned in the memory. Currently the +// simplest approach is to generate such stub early enough so it can never be +// moved by GC +class DirectCEntryStub: public CodeStub { + public: + DirectCEntryStub() {} + void Generate(MacroAssembler* masm); + void GenerateCall(MacroAssembler* masm, ApiFunction *function); + + private: + Major MajorKey() { return DirectCEntry; } + int MinorKey() { return 0; } + const char* GetName() { return "DirectCEntryStub"; } +}; + + +// Generate code the to load an element from a pixel array. The receiver is +// assumed to not be a smi and to have elements, the caller must guarantee this +// precondition. If the receiver does not have elements that are pixel arrays, +// the generated code jumps to not_pixel_array. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated . If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. +void GenerateFastPixelArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register elements_map, + Register elements, + Register scratch1, + Register scratch2, + Register result, + Label* not_pixel_array, + Label* key_not_smi, + Label* out_of_range); + + } } // namespace v8::internal #endif // V8_ARM_CODE_STUBS_ARM_H_ diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 9a60183979..12842230bf 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -1110,7 +1110,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() { Register int32 = r2; // Not a 32bits signed int, fall back to the GenericBinaryOpStub. - __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label()); + __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label()); // tos_register_ (r0 or r1): Original heap number. // int32: signed 32bits int. @@ -4177,7 +4177,10 @@ void CodeGenerator::VisitCall(Call* node) { __ ldr(r1, frame_->Receiver()); frame_->EmitPush(r1); - frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3); + // Push the strict mode flag. + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + + frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4); done.Jump(); slow.Bind(); @@ -4197,8 +4200,11 @@ void CodeGenerator::VisitCall(Call* node) { __ ldr(r1, frame_->Receiver()); frame_->EmitPush(r1); + // Push the strict mode flag. + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + // Resolve the call. - frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); + frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); // If we generated fast-case code bind the jump-target where fast // and slow case merge. diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 589e704b51..8f46256b8a 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -287,6 +287,7 @@ class CodeGenerator: public AstVisitor { // Accessors inline bool is_eval(); inline Scope* scope(); + inline StrictModeFlag strict_mode_flag(); // Generating deferred code. void ProcessDeferred(); diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 7502ef0d65..5671feecba 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -380,10 +380,13 @@ enum VFPRegPrecision { // VFP FPSCR constants. +enum VFPConversionMode { + kFPSCRRounding = 0, + kDefaultRoundToZero = 1 +}; + static const uint32_t kVFPExceptionMask = 0xf; -static const uint32_t kVFPRoundingModeMask = 3 << 22; static const uint32_t kVFPFlushToZeroMask = 1 << 24; -static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22; static const uint32_t kVFPInvalidExceptionBit = 1; static const uint32_t kVFPNConditionFlagBit = 1 << 31; @@ -393,13 +396,20 @@ static const uint32_t kVFPVConditionFlagBit = 1 << 28; // VFP rounding modes. See ARM DDI 0406B Page A2-29. -enum FPSCRRoundingModes { - RN, // Round to Nearest. - RP, // Round towards Plus Infinity. - RM, // Round towards Minus Infinity. - RZ // Round towards zero. +enum VFPRoundingMode { + RN = 0 << 22, // Round to Nearest. + RP = 1 << 22, // Round towards Plus Infinity. + RM = 2 << 22, // Round towards Minus Infinity. + RZ = 3 << 22, // Round towards zero. + + // Aliases. + kRoundToNearest = RN, + kRoundToPlusInf = RP, + kRoundToMinusInf = RM, + kRoundToZero = RZ }; +static const uint32_t kVFPRoundingModeMask = 3 << 22; // ----------------------------------------------------------------------------- // Hints. diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 75399c69db..e05001f3c3 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -97,7 +97,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { #ifdef DEBUG // Destroy the code which is not supposed to be run again. int instructions = - (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize; + (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize; CodePatcher destroyer(code->instruction_start() + last_pc_offset, instructions); for (int x = 0; x < instructions; x++) { diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 23e5f69ebe..ff446c5e4b 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1554,7 +1554,10 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op, op == Token::SUB || op == Token::MUL || op == Token::DIV || - op == Token::MOD) { + op == Token::MOD || + op == Token::BIT_OR || + op == Token::BIT_AND || + op == Token::BIT_XOR) { TypeRecordingBinaryOpStub stub(op, mode); __ CallStub(&stub); } else { @@ -1923,7 +1926,10 @@ void FullCodeGenerator::VisitCall(Call* expr) { __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); __ push(r1); - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); + // Push the strict mode flag. + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); // The runtime call returns a pair of values in r0 (function) and // r1 (receiver). Touch up the stack with the right values. diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index d74468c945..1aa031d39b 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1189,19 +1189,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // r0: key // r1: receiver __ bind(&check_pixel_array); - __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); - __ cmp(r3, ip); - __ b(ne, &check_number_dictionary); - __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset)); - __ mov(r2, Operand(key, ASR, kSmiTagSize)); - __ cmp(r2, ip); - __ b(hs, &slow); - __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset)); - __ ldrb(r2, MemOperand(ip, r2)); - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi. - __ Ret(); + + GenerateFastPixelArrayLoad(masm, + r1, + r0, + r3, + r4, + r2, + r5, + r0, + &check_number_dictionary, + NULL, + &slow); __ bind(&check_number_dictionary); // Check whether the elements is a number dictionary. diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index b7c1b78d1b..f672d4908e 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -25,6 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include "lithium-allocator-inl.h" #include "arm/lithium-arm.h" #include "arm/lithium-codegen-arm.h" @@ -56,6 +57,31 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index, } +#ifdef DEBUG +void LInstruction::VerifyCall() { + // Call instructions can use only fixed registers as + // temporaries and outputs because all registers + // are blocked by the calling convention. + // Inputs can use either fixed register or have a short lifetime (be + // used at start of the instruction). + ASSERT(Output() == NULL || + LUnallocated::cast(Output())->HasFixedPolicy() || + !LUnallocated::cast(Output())->HasRegisterPolicy()); + for (UseIterator it(this); it.HasNext(); it.Advance()) { + LOperand* operand = it.Next(); + ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() || + LUnallocated::cast(operand)->IsUsedAtStart() || + !LUnallocated::cast(operand)->HasRegisterPolicy()); + } + for (TempIterator it(this); it.HasNext(); it.Advance()) { + LOperand* operand = it.Next(); + ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() || + !LUnallocated::cast(operand)->HasRegisterPolicy()); + } +} +#endif + + void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index, LOperand* spill_operand) { ASSERT(spill_operand->IsDoubleStackSlot()); @@ -66,9 +92,8 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index, void LInstruction::PrintTo(StringStream* stream) { stream->Add("%s ", this->Mnemonic()); - if (HasResult()) { - PrintOutputOperandTo(stream); - } + + PrintOutputOperandTo(stream); PrintDataTo(stream); @@ -158,6 +183,9 @@ const char* LArithmeticT::Mnemonic() const { case Token::MUL: return "mul-t"; case Token::MOD: return "mod-t"; case Token::DIV: return "div-t"; + case Token::BIT_AND: return "bit-and-t"; + case Token::BIT_OR: return "bit-or-t"; + case Token::BIT_XOR: return "bit-xor-t"; default: UNREACHABLE(); return NULL; @@ -258,7 +286,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { void LLoadContextSlot::PrintDataTo(StringStream* stream) { - stream->Add("(%d, %d)", context_chain_length(), slot_index()); + InputAt(0)->PrintTo(stream); + stream->Add("[%d]", slot_index()); +} + + +void LStoreContextSlot::PrintDataTo(StringStream* stream) { + InputAt(0)->PrintTo(stream); + stream->Add("[%d] <- ", slot_index()); + InputAt(1)->PrintTo(stream); } @@ -390,7 +426,7 @@ void LChunk::MarkEmptyBlocks() { } -int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { +void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { LGap* gap = new LGap(block); int index = -1; if (instr->IsControl()) { @@ -406,7 +442,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { pointer_maps_.Add(instr->pointer_map()); instr->pointer_map()->set_lithium_position(index); } - return index; } @@ -672,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { - allocator_->MarkAsCall(); +#ifdef DEBUG + instr->VerifyCall(); +#endif + instr->MarkAsCall(); instr = AssignPointerMap(instr); if (hinstr->HasSideEffects()) { @@ -697,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { - allocator_->MarkAsSaveDoubles(); + instr->MarkAsSaveDoubles(); return instr; } @@ -742,13 +780,23 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoBit(Token::Value op, HBitwiseBinaryOperation* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (instr->representation().IsInteger32()) { + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); - LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - return DefineSameAsFirst(new LBitI(op, left, right)); + LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); + LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); + return DefineSameAsFirst(new LBitI(op, left, right)); + } else { + ASSERT(instr->representation().IsTagged()); + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + + LOperand* left = UseFixed(instr->left(), r1); + LOperand* right = UseFixed(instr->right(), r0); + LArithmeticT* result = new LArithmeticT(op, left, right); + return MarkAsCall(DefineFixed(result, r0), instr); + } } @@ -887,7 +935,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { void LChunkBuilder::VisitInstruction(HInstruction* current) { HInstruction* old_current = current_instruction_; current_instruction_ = current; - allocator_->BeginInstruction(); if (current->has_position()) position_ = current->position(); LInstruction* instr = current->CompileToLithium(this); @@ -910,11 +957,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { instr->set_hydrogen_value(current); } - int index = chunk_->AddInstruction(instr, current_block_); - allocator_->SummarizeInstruction(index); - } else { - // This instruction should be omitted. - allocator_->OmitInstruction(); + chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; } @@ -1105,13 +1148,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } +LInstruction* LChunkBuilder::DoContext(HContext* instr) { + return DefineAsRegister(new LContext); +} + + +LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LOuterContext(context)); +} + + LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { - return DefineAsRegister(new LGlobalObject); + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LGlobalObject(context)); } LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { - return DefineAsRegister(new LGlobalReceiver); + LOperand* global_object = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LGlobalReceiver(global_object)); } @@ -1514,7 +1570,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegister(instr->value()); - LDoubleToI* res = new LDoubleToI(value); + LDoubleToI* res = new LDoubleToI(value, TempRegister()); return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { @@ -1621,7 +1677,20 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - return DefineAsRegister(new LLoadContextSlot); + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LLoadContextSlot(context)); +} + + +LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { + LOperand* context = UseTempRegister(instr->context()); + LOperand* value; + if (instr->NeedsWriteBarrier()) { + value = UseTempRegister(instr->value()); + } else { + value = UseRegister(instr->value()); + } + return new LStoreContextSlot(context, value); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 7f89ee2922..a076c80c75 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -39,118 +39,6 @@ namespace internal { // Forward declarations. class LCodeGen; - -// Type hierarchy: -// -// LInstruction -// LTemplateInstruction -// LControlInstruction -// LBranch -// LClassOfTestAndBranch -// LCmpJSObjectEqAndBranch -// LCmpIDAndBranch -// LHasCachedArrayIndexAndBranch -// LHasInstanceTypeAndBranch -// LInstanceOfAndBranch -// LIsNullAndBranch -// LIsObjectAndBranch -// LIsSmiAndBranch -// LTypeofIsAndBranch -// LAccessArgumentsAt -// LArgumentsElements -// LArgumentsLength -// LAddI -// LApplyArguments -// LArithmeticD -// LArithmeticT -// LBitI -// LBoundsCheck -// LCmpID -// LCmpJSObjectEq -// LCmpT -// LDivI -// LInstanceOf -// LInstanceOfKnownGlobal -// LLoadKeyedFastElement -// LLoadKeyedGeneric -// LModI -// LMulI -// LPower -// LShiftI -// LSubI -// LCallConstantFunction -// LCallFunction -// LCallGlobal -// LCallKeyed -// LCallKnownGlobal -// LCallNamed -// LCallRuntime -// LCallStub -// LConstant -// LConstantD -// LConstantI -// LConstantT -// LDeoptimize -// LFunctionLiteral -// LGap -// LLabel -// LGlobalObject -// LGlobalReceiver -// LGoto -// LLazyBailout -// LLoadGlobal -// LCheckPrototypeMaps -// LLoadContextSlot -// LArrayLiteral -// LObjectLiteral -// LRegExpLiteral -// LOsrEntry -// LParameter -// LRegExpConstructResult -// LStackCheck -// LStoreKeyed -// LStoreKeyedFastElement -// LStoreKeyedGeneric -// LStoreNamed -// LStoreNamedField -// LStoreNamedGeneric -// LStringCharCodeAt -// LBitNotI -// LCallNew -// LCheckFunction -// LCheckPrototypeMaps -// LCheckInstanceType -// LCheckMap -// LCheckSmi -// LClassOfTest -// LDeleteProperty -// LDoubleToI -// LFixedArrayLength -// LHasCachedArrayIndex -// LHasInstanceType -// LInteger32ToDouble -// LIsNull -// LIsObject -// LIsSmi -// LJSArrayLength -// LLoadNamedField -// LLoadNamedGeneric -// LLoadFunctionPrototype -// LNumberTagD -// LNumberTagI -// LPushArgument -// LReturn -// LSmiTag -// LStoreGlobal -// LStringLength -// LTaggedToI -// LThrow -// LTypeof -// LTypeofIs -// LUnaryMathOperation -// LValueOf -// LUnknownOSRValue - #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Constant) \ @@ -187,6 +75,8 @@ class LCodeGen; V(CheckMap) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ + V(ClassOfTest) \ + V(ClassOfTestAndBranch) \ V(CmpID) \ V(CmpIDAndBranch) \ V(CmpJSObjectEq) \ @@ -197,6 +87,7 @@ class LCodeGen; V(ConstantD) \ V(ConstantI) \ V(ConstantT) \ + V(Context) \ V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ @@ -207,6 +98,10 @@ class LCodeGen; V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ + V(HasCachedArrayIndex) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceType) \ + V(HasInstanceTypeAndBranch) \ V(InstanceOf) \ V(InstanceOfAndBranch) \ V(InstanceOfKnownGlobal) \ @@ -218,22 +113,16 @@ class LCodeGen; V(IsSmi) \ V(IsSmiAndBranch) \ V(JSArrayLength) \ - V(HasInstanceType) \ - V(HasInstanceTypeAndBranch) \ - V(HasCachedArrayIndex) \ - V(HasCachedArrayIndexAndBranch) \ - V(ClassOfTest) \ - V(ClassOfTestAndBranch) \ V(Label) \ V(LazyBailout) \ V(LoadContextSlot) \ V(LoadElements) \ + V(LoadFunctionPrototype) \ V(LoadGlobal) \ V(LoadKeyedFastElement) \ V(LoadKeyedGeneric) \ V(LoadNamedField) \ V(LoadNamedGeneric) \ - V(LoadFunctionPrototype) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -241,6 +130,7 @@ class LCodeGen; V(NumberUntagD) \ V(ObjectLiteral) \ V(OsrEntry) \ + V(OuterContext) \ V(Parameter) \ V(PushArgument) \ V(RegExpLiteral) \ @@ -249,14 +139,15 @@ class LCodeGen; V(SmiTag) \ V(SmiUntag) \ V(StackCheck) \ + V(StoreContextSlot) \ V(StoreGlobal) \ V(StoreKeyedFastElement) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ - V(SubI) \ V(StringCharCodeAt) \ V(StringLength) \ + V(SubI) \ V(TaggedToI) \ V(Throw) \ V(Typeof) \ @@ -290,7 +181,10 @@ class LCodeGen; class LInstruction: public ZoneObject { public: LInstruction() - : hydrogen_value_(NULL) { } + : environment_(NULL), + hydrogen_value_(NULL), + is_call_(false), + is_save_doubles_(false) { } virtual ~LInstruction() { } virtual void CompileToNative(LCodeGen* generator) = 0; @@ -307,16 +201,14 @@ class LInstruction: public ZoneObject { virtual bool IsControl() const { return false; } virtual void SetBranchTargets(int true_block_id, int false_block_id) { } - void set_environment(LEnvironment* env) { environment_.set(env); } - LEnvironment* environment() const { return environment_.get(); } - bool HasEnvironment() const { return environment_.is_set(); } + void set_environment(LEnvironment* env) { environment_ = env; } + LEnvironment* environment() const { return environment_; } + bool HasEnvironment() const { return environment_ != NULL; } void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - virtual bool HasResult() const = 0; - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -330,11 +222,35 @@ class LInstruction: public ZoneObject { return deoptimization_environment_.is_set(); } + void MarkAsCall() { is_call_ = true; } + void MarkAsSaveDoubles() { is_save_doubles_ = true; } + + // Interface to the register allocator and iterators. + bool IsMarkedAsCall() const { return is_call_; } + bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; } + + virtual bool HasResult() const = 0; + virtual LOperand* result() = 0; + + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + virtual int TempCount() = 0; + virtual LOperand* TempAt(int i) = 0; + + LOperand* FirstInput() { return InputAt(0); } + LOperand* Output() { return HasResult() ? result() : NULL; } + +#ifdef DEBUG + void VerifyCall(); +#endif + private: - SetOncePointer environment_; + LEnvironment* environment_; SetOncePointer pointer_map_; HValue* hydrogen_value_; SetOncePointer deoptimization_environment_; + bool is_call_; + bool is_save_doubles_; }; @@ -361,6 +277,11 @@ class OperandContainer { public: int length() { return 0; } void PrintOperandsTo(StringStream* stream) { } + ElementType& operator[](int i) { + UNREACHABLE(); + static ElementType t = 0; + return t; + } }; @@ -1266,18 +1187,41 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 1> { }; -class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { +class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> { public: + explicit LLoadContextSlot(LOperand* context) { + inputs_[0] = context; + } + DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - int context_chain_length() { return hydrogen()->context_chain_length(); } + LOperand* context() { return InputAt(0); } int slot_index() { return hydrogen()->slot_index(); } virtual void PrintDataTo(StringStream* stream); }; +class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> { + public: + LStoreContextSlot(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") + DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) + + LOperand* context() { return InputAt(0); } + LOperand* value() { return InputAt(1); } + int slot_index() { return hydrogen()->slot_index(); } + int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } + + virtual void PrintDataTo(StringStream* stream); +}; + + class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: explicit LPushArgument(LOperand* value) { @@ -1288,15 +1232,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { }; -class LGlobalObject: public LTemplateInstruction<1, 0, 0> { +class LContext: public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(Context, "context") +}; + + +class LOuterContext: public LTemplateInstruction<1, 1, 0> { + public: + explicit LOuterContext(LOperand* context) { + inputs_[0] = context; + } + + DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context") + + LOperand* context() { return InputAt(0); } +}; + + +class LGlobalObject: public LTemplateInstruction<1, 1, 0> { public: + explicit LGlobalObject(LOperand* context) { + inputs_[0] = context; + } + DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") + + LOperand* context() { return InputAt(0); } }; -class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> { +class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> { public: + explicit LGlobalReceiver(LOperand* global_object) { + inputs_[0] = global_object; + } + DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") + + LOperand* global() { return InputAt(0); } }; @@ -1431,10 +1405,11 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { // Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LTemplateInstruction<1, 1, 0> { +class LDoubleToI: public LTemplateInstruction<1, 1, 1> { public: - explicit LDoubleToI(LOperand* value) { + explicit LDoubleToI(LOperand* value, LOperand* temp1) { inputs_[0] = value; + temps_[0] = temp1; } DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") @@ -1789,7 +1764,7 @@ class LChunk: public ZoneObject { public: explicit LChunk(HGraph* graph); - int AddInstruction(LInstruction* instruction, HBasicBlock* block); + void AddInstruction(LInstruction* instruction, HBasicBlock* block); LConstantOperand* DefineConstantOperand(HConstant* constant); Handle LookupLiteral(LConstantOperand* operand) const; Representation LookupLiteralRepresentation(LConstantOperand* operand) const; diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index a1adae389d..855ed461b5 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -223,7 +223,7 @@ bool LCodeGen::GenerateCode() { void LCodeGen::FinishCode(Handle code) { ASSERT(is_done()); code->set_stack_slots(StackSlotCount()); - code->set_safepoint_table_start(safepoints_.GetCodeOffset()); + code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); PopulateDeoptimizationData(code); } @@ -1174,7 +1174,7 @@ void LCodeGen::DoMulI(LMulI* instr) { if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { // scratch:left = left * right. - __ smull(scratch, left, left, right); + __ smull(left, scratch, left, right); __ mov(ip, Operand(left, ASR, 31)); __ cmp(ip, Operand(scratch)); DeoptimizeIf(ne, instr->environment()); @@ -1398,7 +1398,18 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ vdiv(left, left, right); break; case Token::MOD: { - Abort("DoArithmeticD unimplemented for MOD."); + // Save r0-r3 on the stack. + __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); + + __ PrepareCallCFunction(4, scratch0()); + __ vmov(r0, r1, left); + __ vmov(r2, r3, right); + __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); + // Move the result in the double result register. + __ vmov(ToDoubleRegister(instr->result()), r0, r1); + + // Restore r0-r3. + __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); break; } default: @@ -1595,17 +1606,58 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { __ cmp(ToRegister(left), ToOperand(right)); - Abort("EmitCmpI untested."); } void LCodeGen::DoCmpID(LCmpID* instr) { - Abort("DoCmpID unimplemented."); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + LOperand* result = instr->result(); + Register scratch = scratch0(); + + Label unordered, done; + if (instr->is_double()) { + // Compare left and right as doubles and load the + // resulting flags into the normal status register. + __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); + __ vmrs(pc); + // If a NaN is involved, i.e. the result is unordered (V set), + // jump to unordered to return false. + __ b(vs, &unordered); + } else { + EmitCmpI(left, right); + } + + Condition cc = TokenToCondition(instr->op(), instr->is_double()); + __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex); + __ b(cc, &done); + + __ bind(&unordered); + __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex); + __ bind(&done); } void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { - Abort("DoCmpIDAndBranch unimplemented."); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + + if (instr->is_double()) { + // Compare left and right as doubles and load the + // resulting flags into the normal status register. + __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); + __ vmrs(pc); + // If a NaN is involved, i.e. the result is unordered (V set), + // jump to false block label. + __ b(vs, chunk_->GetAssemblyLabel(false_block)); + } else { + EmitCmpI(left, right); + } + + Condition cc = TokenToCondition(instr->op(), instr->is_double()); + EmitBranch(true_block, false_block, cc); } @@ -2201,13 +2253,27 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - // TODO(antonm): load a context with a separate instruction. + Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ LoadContext(result, instr->context_chain_length()); + __ ldr(result, + MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); __ ldr(result, ContextOperand(result, instr->slot_index())); } +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + __ ldr(context, + MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ str(value, ContextOperand(context, instr->slot_index())); + if (instr->needs_write_barrier()) { + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWrite(context, Operand(offset), value, scratch0()); + } +} + + void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { Register object = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); @@ -2458,16 +2524,32 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { } +void LCodeGen::DoContext(LContext* instr) { + Register result = ToRegister(instr->result()); + __ mov(result, cp); +} + + +void LCodeGen::DoOuterContext(LOuterContext* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ ldr(result, + MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset)); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { + Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); } void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { + Register global = ToRegister(instr->global()); Register result = ToRegister(instr->result()); - __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); + __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); } @@ -2625,34 +2707,53 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); - Register prev_fpscr = ToRegister(instr->TempAt(0)); - SwVfpRegister single_scratch = double_scratch0().low(); - Register scratch = scratch0(); +// Truncates a double using a specific rounding mode. +// Clears the z flag (ne condition) if an overflow occurs. +void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2) { + Register prev_fpscr = scratch1; + Register scratch = scratch2; // Set custom FPCSR: - // - Set rounding mode to "Round towards Minus Infinity". + // - Set rounding mode. // - Clear vfp cumulative exception flags. // - Make sure Flush-to-zero mode control bit is unset. __ vmrs(prev_fpscr); - __ bic(scratch, prev_fpscr, - Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); - __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits)); + __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask | + kVFPRoundingModeMask | + kVFPFlushToZeroMask)); + __ orr(scratch, scratch, Operand(rounding_mode)); __ vmsr(scratch); // Convert the argument to an integer. - __ vcvt_s32_f64(single_scratch, - input, - Assembler::FPSCRRounding, - al); + __ vcvt_s32_f64(result, + double_input, + kFPSCRRounding); - // Retrieve FPSCR and check for vfp exceptions. + // Retrieve FPSCR. __ vmrs(scratch); - // Restore FPSCR + // Restore FPSCR. __ vmsr(prev_fpscr); + // Check for vfp exceptions. __ tst(scratch, Operand(kVFPExceptionMask)); +} + + +void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + SwVfpRegister single_scratch = double_scratch0().low(); + Register scratch1 = scratch0(); + Register scratch2 = ToRegister(instr->TempAt(0)); + + EmitVFPTruncate(kRoundToMinusInf, + single_scratch, + input, + scratch1, + scratch2); DeoptimizeIf(ne, instr->environment()); // Move the result back to general purpose register r0. @@ -2662,8 +2763,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { Label done; __ cmp(result, Operand(0)); __ b(ne, &done); - __ vmov(scratch, input.high()); - __ tst(scratch, Operand(HeapNumber::kSignMask)); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); DeoptimizeIf(ne, instr->environment()); __ bind(&done); } @@ -3297,7 +3398,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Abort("DoDoubleToI unimplemented."); + LOperand* input = instr->InputAt(0); + ASSERT(input->IsDoubleRegister()); + LOperand* result = instr->result(); + ASSERT(result->IsRegister()); + + DoubleRegister double_input = ToDoubleRegister(input); + Register result_reg = ToRegister(result); + SwVfpRegister single_scratch = double_scratch0().low(); + Register scratch1 = scratch0(); + Register scratch2 = ToRegister(instr->TempAt(0)); + + VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf + : kRoundToNearest; + + EmitVFPTruncate(rounding_mode, + single_scratch, + double_input, + scratch1, + scratch2); + // Deoptimize if we had a vfp invalid exception. + DeoptimizeIf(ne, instr->environment()); + // Retrieve the result. + __ vmov(result_reg, single_scratch); + + if (instr->truncating() && + instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand(0)); + __ b(ne, &done); + // Check for -0. + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + + __ bind(&done); + } } @@ -3497,7 +3633,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. Handle shared_info = instr->shared_info(); - bool pretenure = !instr->hydrogen()->pretenure(); + bool pretenure = instr->hydrogen()->pretenure(); if (shared_info->num_literals() == 0 && !pretenure) { FastNewClosureStub stub; __ mov(r1, Operand(shared_info)); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 27a72f29a0..3f7fe4519b 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -219,6 +219,11 @@ class LCodeGen BASE_EMBEDDED { // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); + void EmitVFPTruncate(VFPRoundingMode rounding_mode, + SwVfpRegister result, + DwVfpRegister double_input, + Register scratch1, + Register scratch2); void DoMathFloor(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index ef351022a8..c11d664f07 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -632,11 +632,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { } -void MacroAssembler::EnterExitFrame(bool save_doubles) { - // Compute the argv pointer in a callee-saved register. - add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); - sub(r6, r6, Operand(kPointerSize)); - +void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Setup the frame structure on the stack. ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); @@ -658,10 +654,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { mov(ip, Operand(ExternalReference(Top::k_context_address))); str(cp, MemOperand(ip)); - // Setup argc and the builtin function in callee-saved registers. - mov(r4, Operand(r0)); - mov(r5, Operand(r1)); - // Optionally save all double registers. if (save_doubles) { sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize)); @@ -675,10 +667,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // since the sp slot and code slot were pushed after the fp. } - // Reserve place for the return address and align the frame preparing for - // calling the runtime function. + // Reserve place for the return address and stack space and align the frame + // preparing for calling the runtime function. const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - sub(sp, sp, Operand(kPointerSize)); + sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); if (frame_alignment > 0) { ASSERT(IsPowerOf2(frame_alignment)); and_(sp, sp, Operand(-frame_alignment)); @@ -1475,14 +1467,112 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); +} + + +MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + Object* result; + { MaybeObject* maybe_result = stub->TryGetCode(); + if (!maybe_result->ToObject(&result)) return maybe_result; + } Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); + return result; +} + + +static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { + return ref0.address() - ref1.address(); +} + + +MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( + ApiFunction* function, int stack_space) { + ExternalReference next_address = + ExternalReference::handle_scope_next_address(); + const int kNextOffset = 0; + const int kLimitOffset = AddressOffset( + ExternalReference::handle_scope_limit_address(), + next_address); + const int kLevelOffset = AddressOffset( + ExternalReference::handle_scope_level_address(), + next_address); + + // Allocate HandleScope in callee-save registers. + mov(r7, Operand(next_address)); + ldr(r4, MemOperand(r7, kNextOffset)); + ldr(r5, MemOperand(r7, kLimitOffset)); + ldr(r6, MemOperand(r7, kLevelOffset)); + add(r6, r6, Operand(1)); + str(r6, MemOperand(r7, kLevelOffset)); + + // Native call returns to the DirectCEntry stub which redirects to the + // return address pushed on stack (could have moved after GC). + // DirectCEntry stub itself is generated early and never moves. + DirectCEntryStub stub; + stub.GenerateCall(this, function); + + Label promote_scheduled_exception; + Label delete_allocated_handles; + Label leave_exit_frame; + + // If result is non-zero, dereference to get the result value + // otherwise set it to undefined. + cmp(r0, Operand(0)); + LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + ldr(r0, MemOperand(r0), ne); + + // No more valid handles (the result handle was the last one). Restore + // previous handle scope. + str(r4, MemOperand(r7, kNextOffset)); + if (FLAG_debug_code) { + ldr(r1, MemOperand(r7, kLevelOffset)); + cmp(r1, r6); + Check(eq, "Unexpected level after return from api call"); + } + sub(r6, r6, Operand(1)); + str(r6, MemOperand(r7, kLevelOffset)); + ldr(ip, MemOperand(r7, kLimitOffset)); + cmp(r5, ip); + b(ne, &delete_allocated_handles); + + // Check if the function scheduled an exception. + bind(&leave_exit_frame); + LoadRoot(r4, Heap::kTheHoleValueRootIndex); + mov(ip, Operand(ExternalReference::scheduled_exception_address())); + ldr(r5, MemOperand(ip)); + cmp(r4, r5); + b(ne, &promote_scheduled_exception); + + // LeaveExitFrame expects unwind space to be in r4. + mov(r4, Operand(stack_space)); + LeaveExitFrame(false); + + bind(&promote_scheduled_exception); + MaybeObject* result = TryTailCallExternalReference( + ExternalReference(Runtime::kPromoteScheduledException), 0, 1); + if (result->IsFailure()) { + return result; + } + + // HandleScope limit has changed. Delete allocated extensions. + bind(&delete_allocated_handles); + str(r5, MemOperand(r7, kLimitOffset)); + mov(r4, r0); + PrepareCallCFunction(0, r5); + CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0); + mov(r0, r4); + jmp(&leave_exit_frame); + + return result; } @@ -1577,13 +1667,14 @@ void MacroAssembler::ConvertToInt32(Register source, Register dest, Register scratch, Register scratch2, + DwVfpRegister double_scratch, Label *not_int32) { if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); sub(scratch, source, Operand(kHeapObjectTag)); - vldr(d0, scratch, HeapNumber::kValueOffset); - vcvt_s32_f64(s0, d0); - vmov(dest, s0); + vldr(double_scratch, scratch, HeapNumber::kValueOffset); + vcvt_s32_f64(double_scratch.low(), double_scratch); + vmov(dest, double_scratch.low()); // Signed vcvt instruction will saturate to the minimum (0x80000000) or // maximun (0x7fffffff) signed 32bits integer when the double is out of // range. When substracting one, the minimum signed integer becomes the @@ -1739,6 +1830,17 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, } +MaybeObject* MacroAssembler::TryTailCallExternalReference( + const ExternalReference& ext, int num_arguments, int result_size) { + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + mov(r0, Operand(num_arguments)); + return TryJumpToExternalReference(ext); +} + + void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size) { @@ -1757,6 +1859,18 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { } +MaybeObject* MacroAssembler::TryJumpToExternalReference( + const ExternalReference& builtin) { +#if defined(__thumb__) + // Thumb mode builtin. + ASSERT((reinterpret_cast(builtin.address()) & 1) == 1); +#endif + mov(r1, Operand(builtin)); + CEntryStub stub(1); + return TryTailCallStub(&stub); +} + + void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags, PostCallGenerator* post_call_generator) { @@ -1999,6 +2113,16 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotRootValue(Register src, + Heap::RootListIndex root_value_index, + const char* message) { + ASSERT(!src.is(ip)); + LoadRoot(ip, root_value_index); + cmp(src, ip); + Assert(eq, message); +} + + void MacroAssembler::JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index a1a13e32ef..c9ffde8981 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -287,10 +287,8 @@ class MacroAssembler: public Assembler { void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } // Enter exit frame. - // Expects the number of arguments in register r0 and - // the builtin function to call in register r1. Exits with argc in - // r4, argv in r6, and and the builtin function to call in r5. - void EnterExitFrame(bool save_doubles); + // stack_space - extra stack space, used for alignment before call to C. + void EnterExitFrame(bool save_doubles, int stack_space = 0); // Leave the current exit frame. Expects the return value in r0. void LeaveExitFrame(bool save_doubles); @@ -589,11 +587,13 @@ class MacroAssembler: public Assembler { // Convert the HeapNumber pointed to by source to a 32bits signed integer // dest. If the HeapNumber does not fit into a 32bits signed integer branch - // to not_int32 label. + // to not_int32 label. If VFP3 is available double_scratch is used but not + // scratch2. void ConvertToInt32(Register source, Register dest, Register scratch, Register scratch2, + DwVfpRegister double_scratch, Label *not_int32); // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz @@ -614,6 +614,12 @@ class MacroAssembler: public Assembler { // Call a code stub. void TailCallStub(CodeStub* stub, Condition cond = al); + // Tail call a code stub (jump) and return the code object called. Try to + // generate the code if necessary. Do not perform a GC but instead return + // a retry after GC failure. + MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub, + Condition cond = al); + // Call a runtime routine. void CallRuntime(Runtime::Function* f, int num_arguments); void CallRuntimeSaveDoubles(Runtime::FunctionId id); @@ -632,6 +638,12 @@ class MacroAssembler: public Assembler { int num_arguments, int result_size); + // Tail call of a runtime routine (jump). Try to generate the code if + // necessary. Do not perform a GC but instead return a retry after GC + // failure. + MUST_USE_RESULT MaybeObject* TryTailCallExternalReference( + const ExternalReference& ext, int num_arguments, int result_size); + // Convenience function: tail call a runtime routine (jump). void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, @@ -655,9 +667,18 @@ class MacroAssembler: public Assembler { void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(Register function, int num_arguments); + // Calls an API function. Allocates HandleScope, extracts returned value + // from handle and propagates exceptions. Restores context. + // stack_space - space to be unwound on exit (includes the call js + // arguments space and the additional space allocated for the fast call). + MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function, + int stack_space); + // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin); + MaybeObject* TryJumpToExternalReference(const ExternalReference& ext); + // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. void InvokeBuiltin(Builtins::JavaScript id, @@ -763,6 +784,11 @@ class MacroAssembler: public Assembler { void AbortIfSmi(Register object); void AbortIfNotSmi(Register object); + // Abort execution if argument is not the root value with the given index. + void AbortIfNotRootValue(Register src, + Heap::RootListIndex root_value_index, + const char* message); + // --------------------------------------------------------------------------- // HeapNumber utilities diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index de440306c9..8104747f14 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -744,10 +744,10 @@ Simulator::Simulator() { // offset from the svc instruction so the simulator knows what to call. class Redirection { public: - Redirection(void* external_function, bool fp_return) + Redirection(void* external_function, ExternalReference::Type type) : external_function_(external_function), swi_instruction_(al | (0xf*B24) | kCallRtRedirected), - fp_return_(fp_return), + type_(type), next_(list_) { Simulator::current()-> FlushICache(reinterpret_cast(&swi_instruction_), @@ -760,14 +760,15 @@ class Redirection { } void* external_function() { return external_function_; } - bool fp_return() { return fp_return_; } + ExternalReference::Type type() { return type_; } - static Redirection* Get(void* external_function, bool fp_return) { + static Redirection* Get(void* external_function, + ExternalReference::Type type) { Redirection* current; for (current = list_; current != NULL; current = current->next_) { if (current->external_function_ == external_function) return current; } - return new Redirection(external_function, fp_return); + return new Redirection(external_function, type); } static Redirection* FromSwiInstruction(Instruction* swi_instruction) { @@ -780,7 +781,7 @@ class Redirection { private: void* external_function_; uint32_t swi_instruction_; - bool fp_return_; + ExternalReference::Type type_; Redirection* next_; static Redirection* list_; }; @@ -790,8 +791,8 @@ Redirection* Redirection::list_ = NULL; void* Simulator::RedirectExternalReference(void* external_function, - bool fp_return) { - Redirection* redirection = Redirection::Get(external_function, fp_return); + ExternalReference::Type type) { + Redirection* redirection = Redirection::Get(external_function, type); return redirection->address_of_swi_instruction(); } @@ -1528,6 +1529,9 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, int32_t arg2, int32_t arg3); +// This signature supports direct call in to API function native callback +// (refer to InvocationCallback in v8.h). +typedef v8::Handle (*SimulatorRuntimeApiCall)(int32_t arg0); // Software interrupt instructions are used by the simulator to call into the // C-based V8 runtime. @@ -1550,9 +1554,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { // This is dodgy but it works because the C entry stubs are never moved. // See comment in codegen-arm.cc and bug 1242173. int32_t saved_lr = get_register(lr); - if (redirection->fp_return()) { - intptr_t external = - reinterpret_cast(redirection->external_function()); + intptr_t external = + reinterpret_cast(redirection->external_function()); + if (redirection->type() == ExternalReference::FP_RETURN_CALL) { SimulatorRuntimeFPCall target = reinterpret_cast(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { @@ -1568,9 +1572,28 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { CHECK(stack_aligned); double result = target(arg0, arg1, arg2, arg3); SetFpResult(result); + } else if (redirection->type() == ExternalReference::DIRECT_CALL) { + SimulatorRuntimeApiCall target = + reinterpret_cast(external); + if (::v8::internal::FLAG_trace_sim || !stack_aligned) { + PrintF( + "Call to host function at %p args %08x", + FUNCTION_ADDR(target), + arg0); + if (!stack_aligned) { + PrintF(" with unaligned stack %08x\n", get_register(sp)); + } + PrintF("\n"); + } + CHECK(stack_aligned); + v8::Handle result = target(arg0); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %p\n", reinterpret_cast(*result)); + } + set_register(r0, (int32_t) *result); } else { - intptr_t external = - reinterpret_cast(redirection->external_function()); + // builtin call. + ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); SimulatorRuntimeCall target = reinterpret_cast(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { @@ -2539,7 +2562,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { (overflow_vfp_flag_ << 2) | (div_zero_vfp_flag_ << 1) | (inv_op_vfp_flag_ << 0) | - (FPSCR_rounding_mode_ << 22); + (FPSCR_rounding_mode_); set_register(rt, fpscr); } } else if ((instr->VLValue() == 0x0) && @@ -2562,7 +2585,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { div_zero_vfp_flag_ = (rt_value >> 1) & 1; inv_op_vfp_flag_ = (rt_value >> 0) & 1; FPSCR_rounding_mode_ = - static_cast((rt_value >> 22) & 3); + static_cast((rt_value) & kVFPRoundingModeMask); } } else { UNIMPLEMENTED(); // Not used by V8. @@ -2651,87 +2674,135 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) { } } +bool get_inv_op_vfp_flag(VFPRoundingMode mode, + double val, + bool unsigned_) { + ASSERT((mode == RN) || (mode == RM) || (mode == RZ)); + double max_uint = static_cast(0xffffffffu); + double max_int = static_cast(kMaxInt); + double min_int = static_cast(kMinInt); + + // Check for NaN. + if (val != val) { + return true; + } + + // Check for overflow. This code works because 32bit integers can be + // exactly represented by ieee-754 64bit floating-point values. + switch (mode) { + case RN: + return unsigned_ ? (val >= (max_uint + 0.5)) || + (val < -0.5) + : (val >= (max_int + 0.5)) || + (val < (min_int - 0.5)); + + case RM: + return unsigned_ ? (val >= (max_uint + 1.0)) || + (val < 0) + : (val >= (max_int + 1.0)) || + (val < min_int); + + case RZ: + return unsigned_ ? (val >= (max_uint + 1.0)) || + (val <= -1) + : (val >= (max_int + 1.0)) || + (val <= (min_int - 1.0)); + default: + UNREACHABLE(); + return true; + } +} + + +// We call this function only if we had a vfp invalid exception. +// It returns the correct saturated value. +int VFPConversionSaturate(double val, bool unsigned_res) { + if (val != val) { + return 0; + } else { + if (unsigned_res) { + return (val < 0) ? 0 : 0xffffffffu; + } else { + return (val < 0) ? kMinInt : kMaxInt; + } + } +} + void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) && + (instr->Bits(27, 23) == 0x1D)); ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1))); // Conversion between floating-point and integer. bool to_integer = (instr->Bit(18) == 1); - VFPRegPrecision src_precision = kSinglePrecision; - if (instr->SzValue() == 1) { - src_precision = kDoublePrecision; - } + VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision + : kSinglePrecision; if (to_integer) { - bool unsigned_integer = (instr->Bit(16) == 0); - FPSCRRoundingModes mode; - if (instr->Bit(7) != 1) { - // Use FPSCR defined rounding mode. - mode = FPSCR_rounding_mode_; - // Only RZ and RM modes are supported. - ASSERT((mode == RM) || (mode == RZ)); - } else { - // VFP uses round towards zero by default. - mode = RZ; - } + // We are playing with code close to the C++ standard's limits below, + // hence the very simple code and heavy checks. + // + // Note: + // C++ defines default type casting from floating point to integer as + // (close to) rounding toward zero ("fractional part discarded"). int dst = instr->VFPDRegValue(kSinglePrecision); int src = instr->VFPMRegValue(src_precision); - int32_t kMaxInt = v8::internal::kMaxInt; - int32_t kMinInt = v8::internal::kMinInt; - switch (mode) { - case RM: - if (src_precision == kDoublePrecision) { - double val = get_double_from_d_register(src); - inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); + // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding + // mode or the default Round to Zero mode. + VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_ + : RZ; + ASSERT((mode == RM) || (mode == RZ) || (mode == RN)); - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); - sint = sint > val ? sint - 1 : sint; + bool unsigned_integer = (instr->Bit(16) == 0); + bool double_precision = (src_precision == kDoublePrecision); - set_s_register_from_sinteger(dst, sint); - } else { - float val = get_float_from_s_register(src); + double val = double_precision ? get_double_from_d_register(src) + : get_float_from_s_register(src); - inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); + int temp = unsigned_integer ? static_cast(val) + : static_cast(val); - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); - sint = sint > val ? sint - 1 : sint; + inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer); - set_s_register_from_sinteger(dst, sint); + if (inv_op_vfp_flag_) { + temp = VFPConversionSaturate(val, unsigned_integer); + } else { + switch (mode) { + case RN: { + double abs_diff = + unsigned_integer ? fabs(val - static_cast(temp)) + : fabs(val - temp); + int val_sign = (val > 0) ? 1 : -1; + if (abs_diff > 0.5) { + temp += val_sign; + } else if (abs_diff == 0.5) { + // Round to even if exactly halfway. + temp = ((temp % 2) == 0) ? temp : temp + val_sign; + } + break; } - break; - case RZ: - if (src_precision == kDoublePrecision) { - double val = get_double_from_d_register(src); - - inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); - - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); - set_s_register_from_sinteger(dst, sint); - } else { - float val = get_float_from_s_register(src); - - inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); - - int sint = unsigned_integer ? static_cast(val) : - static_cast(val); + case RM: + temp = temp > val ? temp - 1 : temp; + break; - set_s_register_from_sinteger(dst, sint); - } - break; + case RZ: + // Nothing to do. + break; - default: - UNREACHABLE(); + default: + UNREACHABLE(); + } } + // Update the destination register. + set_s_register_from_sinteger(dst, temp); + } else { bool unsigned_integer = (instr->Bit(7) == 0); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index be44766d54..5256ae35b9 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -79,6 +79,7 @@ class SimulatorStack : public v8::internal::AllStatic { #include "constants-arm.h" #include "hashmap.h" +#include "assembler.h" namespace v8 { namespace internal { @@ -285,8 +286,9 @@ class Simulator { static CachePage* GetCachePage(void* page); // Runtime call support. - static void* RedirectExternalReference(void* external_function, - bool fp_return); + static void* RedirectExternalReference( + void* external_function, + v8::internal::ExternalReference::Type type); // For use in calls that take two double values, constructed from r0, r1, r2 // and r3. @@ -312,7 +314,7 @@ class Simulator { bool v_flag_FPSCR_; // VFP rounding mode. See ARM DDI 0406B Page A2-29. - FPSCRRoundingModes FPSCR_rounding_mode_; + VFPRoundingMode FPSCR_rounding_mode_; // VFP FP exception flags architecture state. bool inv_op_vfp_flag_; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 1e99e60694..9ef61158ea 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -575,72 +575,94 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, __ CallStub(&stub); } +static const int kFastApiCallArguments = 3; // Reserves space for the extra arguments to FastHandleApiCall in the // caller's frame. // -// These arguments are set by CheckPrototypes and GenerateFastApiCall. +// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { __ mov(scratch, Operand(Smi::FromInt(0))); - __ push(scratch); - __ push(scratch); - __ push(scratch); - __ push(scratch); + for (int i = 0; i < kFastApiCallArguments; i++) { + __ push(scratch); + } } // Undoes the effects of ReserveSpaceForFastApiCall. static void FreeSpaceForFastApiCall(MacroAssembler* masm) { - __ Drop(4); + __ Drop(kFastApiCallArguments); } -// Generates call to FastHandleApiCall builtin. -static void GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { + // ----------- S t a t e ------------- + // -- sp[0] : holder (set by CheckPrototypes) + // -- sp[4] : callee js function + // -- sp[8] : call data + // -- sp[12] : last js argument + // -- ... + // -- sp[(argc + 3) * 4] : first js argument + // -- sp[(argc + 4) * 4] : receiver + // ----------------------------------- // Get the function and setup the context. JSFunction* function = optimization.constant_function(); __ mov(r5, Operand(Handle(function))); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); // Pass the additional arguments FastHandleApiCall expects. - bool info_loaded = false; - Object* callback = optimization.api_call_info()->callback(); - if (Heap::InNewSpace(callback)) { - info_loaded = true; - __ Move(r0, Handle(optimization.api_call_info())); - __ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset)); - } else { - __ Move(r7, Handle(callback)); - } Object* call_data = optimization.api_call_info()->data(); + Handle api_call_info_handle(optimization.api_call_info()); if (Heap::InNewSpace(call_data)) { - if (!info_loaded) { - __ Move(r0, Handle(optimization.api_call_info())); - } + __ Move(r0, api_call_info_handle); __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); } else { __ Move(r6, Handle(call_data)); } + // Store js function and call data. + __ stm(ib, sp, r5.bit() | r6.bit()); - __ add(sp, sp, Operand(1 * kPointerSize)); - __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit()); - __ sub(sp, sp, Operand(1 * kPointerSize)); - - // Set the number of arguments. - __ mov(r0, Operand(argc + 4)); + // r2 points to call data as expected by Arguments + // (refer to layout above). + __ add(r2, sp, Operand(2 * kPointerSize)); - // Jump to the fast api call builtin (tail call). - Handle code = Handle( - Builtins::builtin(Builtins::FastHandleApiCall)); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, JUMP_FUNCTION); + Object* callback = optimization.api_call_info()->callback(); + Address api_function_address = v8::ToCData
(callback); + ApiFunction fun(api_function_address); + + const int kApiStackSpace = 4; + __ EnterExitFrame(false, kApiStackSpace); + + // r0 = v8::Arguments& + // Arguments is after the return address. + __ add(r0, sp, Operand(1 * kPointerSize)); + // v8::Arguments::implicit_args = data + __ str(r2, MemOperand(r0, 0 * kPointerSize)); + // v8::Arguments::values = last argument + __ add(ip, r2, Operand(argc * kPointerSize)); + __ str(ip, MemOperand(r0, 1 * kPointerSize)); + // v8::Arguments::length_ = argc + __ mov(ip, Operand(argc)); + __ str(ip, MemOperand(r0, 2 * kPointerSize)); + // v8::Arguments::is_construct_call = 0 + __ mov(ip, Operand(0)); + __ str(ip, MemOperand(r0, 3 * kPointerSize)); + + // Emitting a stub call may try to allocate (if the code is not + // already generated). Do not allow the assembler to perform a + // garbage collection but instead return the allocation failure + // object. + MaybeObject* result = masm->TryCallApiFunctionAndReturn( + &fun, argc + kFastApiCallArguments + 1); + if (result->IsFailure()) { + return result; + } + return Heap::undefined_value(); } - class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -650,16 +672,16 @@ class CallInterceptorCompiler BASE_EMBEDDED { arguments_(arguments), name_(name) {} - void Compile(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - String* name, - LookupResult* lookup, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss) { + MaybeObject* Compile(MacroAssembler* masm, + JSObject* object, + JSObject* holder, + String* name, + LookupResult* lookup, + Register receiver, + Register scratch1, + Register scratch2, + Register scratch3, + Label* miss) { ASSERT(holder->HasNamedInterceptor()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); @@ -669,17 +691,17 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallOptimization optimization(lookup); if (optimization.is_constant_call()) { - CompileCacheable(masm, - object, - receiver, - scratch1, - scratch2, - scratch3, - holder, - lookup, - name, - optimization, - miss); + return CompileCacheable(masm, + object, + receiver, + scratch1, + scratch2, + scratch3, + holder, + lookup, + name, + optimization, + miss); } else { CompileRegular(masm, object, @@ -690,21 +712,22 @@ class CallInterceptorCompiler BASE_EMBEDDED { name, holder, miss); + return Heap::undefined_value(); } } private: - void CompileCacheable(MacroAssembler* masm, - JSObject* object, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - JSObject* interceptor_holder, - LookupResult* lookup, - String* name, - const CallOptimization& optimization, - Label* miss_label) { + MaybeObject* CompileCacheable(MacroAssembler* masm, + JSObject* object, + Register receiver, + Register scratch1, + Register scratch2, + Register scratch3, + JSObject* interceptor_holder, + LookupResult* lookup, + String* name, + const CallOptimization& optimization, + Label* miss_label) { ASSERT(optimization.is_constant_call()); ASSERT(!lookup->holder()->IsGlobalObject()); @@ -768,7 +791,10 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Invoke function. if (can_do_fast_api_call) { - GenerateFastApiCall(masm, optimization, arguments_.immediate()); + MaybeObject* result = GenerateFastApiDirectCall(masm, + optimization, + arguments_.immediate()); + if (result->IsFailure()) return result; } else { __ InvokeFunction(optimization.constant_function(), arguments_, JUMP_FUNCTION); @@ -786,6 +812,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { if (can_do_fast_api_call) { FreeSpaceForFastApiCall(masm); } + + return Heap::undefined_value(); } void CompileRegular(MacroAssembler* masm, @@ -2055,11 +2083,11 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, // - Make sure Flush-to-zero mode control bit is unset (bit 22). __ bic(r9, r3, Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); - __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits)); + __ orr(r9, r9, Operand(kRoundToMinusInf)); __ vmsr(r9); // Convert the argument to an integer. - __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al); + __ vcvt_s32_f64(s0, d1, kFPSCRRounding); // Use vcvt latency to start checking for special cases. // Get the argument exponent and clear the sign bit. @@ -2368,7 +2396,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } if (depth != kInvalidProtoDepth) { - GenerateFastApiCall(masm(), optimization, argc); + MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc); + if (result->IsFailure()) return result; } else { __ InvokeFunction(function, arguments(), JUMP_FUNCTION); } @@ -2412,16 +2441,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object, __ ldr(r1, MemOperand(sp, argc * kPointerSize)); CallInterceptorCompiler compiler(this, arguments(), r2); - compiler.Compile(masm(), - object, - holder, - name, - &lookup, - r1, - r3, - r4, - r0, - &miss); + MaybeObject* result = compiler.Compile(masm(), + object, + holder, + name, + &lookup, + r1, + r3, + r4, + r0, + &miss); + if (result->IsFailure()) { + return result; + } // Move returned value, the function to call, to r1. __ mov(r1, r0); @@ -3087,6 +3119,38 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) { } +MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) { + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(r1, r2, Handle(receiver->map()), &miss, false); + + GenerateFastPixelArrayLoad(masm(), + r1, + r0, + r2, + r3, + r4, + r5, + r0, + &miss, + &miss, + &miss); + + __ bind(&miss); + Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, int index, Map* transition, @@ -3764,9 +3828,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( // Not infinity or NaN simply convert to int. if (IsElementTypeSigned(array_type)) { - __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); + __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne); } else { - __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); + __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne); } __ vmov(r5, s0, ne); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index a0e3e0bb4e..1298434d59 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -1018,9 +1018,11 @@ function ArrayIndexOf(element, index) { } else { index = TO_INTEGER(index); // If index is negative, index from the end of the array. - if (index < 0) index = length + index; - // If index is still negative, search the entire array. - if (index < 0) index = 0; + if (index < 0) { + index = length + index; + // If index is still negative, search the entire array. + if (index < 0) index = 0; + } } var min = index; var max = length; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ca72d63992..ef2094f63a 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -553,8 +553,9 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id) : address_(Redirect(Builtins::c_function_address(id))) {} -ExternalReference::ExternalReference(ApiFunction* fun) - : address_(Redirect(fun->address())) {} +ExternalReference::ExternalReference( + ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL) + : address_(Redirect(fun->address(), type)) {} ExternalReference::ExternalReference(Builtins::Name name) @@ -888,17 +889,18 @@ ExternalReference ExternalReference::double_fp_operation( UNREACHABLE(); } // Passing true as 2nd parameter indicates that they return an fp value. - return ExternalReference(Redirect(FUNCTION_ADDR(function), true)); + return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL)); } ExternalReference ExternalReference::compare_doubles() { return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles), - false)); + BUILTIN_CALL)); } -ExternalReferenceRedirector* ExternalReference::redirector_ = NULL; +ExternalReference::ExternalReferenceRedirector* + ExternalReference::redirector_ = NULL; #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index a29aa064b8..e8bc5d6caa 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -459,9 +459,6 @@ class Debug_Address; #endif -typedef void* ExternalReferenceRedirector(void* original, bool fp_return); - - // An ExternalReference represents a C++ address used in the generated // code. All references to C++ functions and variables must be encapsulated in // an ExternalReference instance. This is done in order to track the origin of @@ -469,9 +466,29 @@ typedef void* ExternalReferenceRedirector(void* original, bool fp_return); // addresses when deserializing a heap. class ExternalReference BASE_EMBEDDED { public: + // Used in the simulator to support different native api calls. + // + // BUILTIN_CALL - builtin call. + // MaybeObject* f(v8::internal::Arguments). + // + // FP_RETURN_CALL - builtin call that returns floating point. + // double f(double, double). + // + // DIRECT_CALL - direct call to API function native callback + // from generated code. + // Handle f(v8::Arguments&) + // + enum Type { + BUILTIN_CALL, // default + FP_RETURN_CALL, + DIRECT_CALL + }; + + typedef void* ExternalReferenceRedirector(void* original, Type type); + explicit ExternalReference(Builtins::CFunctionId id); - explicit ExternalReference(ApiFunction* ptr); + explicit ExternalReference(ApiFunction* ptr, Type type); explicit ExternalReference(Builtins::Name name); @@ -599,17 +616,19 @@ class ExternalReference BASE_EMBEDDED { static ExternalReferenceRedirector* redirector_; - static void* Redirect(void* address, bool fp_return = false) { + static void* Redirect(void* address, + Type type = ExternalReference::BUILTIN_CALL) { if (redirector_ == NULL) return address; - void* answer = (*redirector_)(address, fp_return); + void* answer = (*redirector_)(address, type); return answer; } - static void* Redirect(Address address_arg, bool fp_return = false) { + static void* Redirect(Address address_arg, + Type type = ExternalReference::BUILTIN_CALL) { void* address = reinterpret_cast(address_arg); void* answer = (redirector_ == NULL) ? address : - (*redirector_)(address, fp_return); + (*redirector_)(address, type); return answer; } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index f80c89b8f9..0d0e37ffac 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -75,7 +75,8 @@ namespace internal { V(GetProperty) \ V(SetProperty) \ V(InvokeBuiltin) \ - V(RegExpCEntry) + V(RegExpCEntry) \ + V(DirectCEntry) #else #define CODE_STUB_LIST_ARM(V) #endif diff --git a/deps/v8/src/codegen-inl.h b/deps/v8/src/codegen-inl.h index 6534e7fd63..54677894cd 100644 --- a/deps/v8/src/codegen-inl.h +++ b/deps/v8/src/codegen-inl.h @@ -55,6 +55,10 @@ bool CodeGenerator::is_eval() { return info_->is_eval(); } Scope* CodeGenerator::scope() { return info_->function()->scope(); } +StrictModeFlag CodeGenerator::strict_mode_flag() { + return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode; +} + } } // namespace v8::internal #endif // V8_CODEGEN_INL_H_ diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 38438cb913..cccb7a4f21 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -136,7 +136,8 @@ class CompilationCacheEval: public CompilationSubCache { : CompilationSubCache(generations) { } Handle Lookup(Handle source, - Handle context); + Handle context, + StrictModeFlag strict_mode); void Put(Handle source, Handle context, @@ -371,7 +372,9 @@ void CompilationCacheScript::Put(Handle source, Handle CompilationCacheEval::Lookup( - Handle source, Handle context) { + Handle source, + Handle context, + StrictModeFlag strict_mode) { // Make sure not to leak the table into the surrounding handle // scope. Otherwise, we risk keeping old tables around even after // having cleared the cache. @@ -380,7 +383,7 @@ Handle CompilationCacheEval::Lookup( { HandleScope scope; for (generation = 0; generation < generations(); generation++) { Handle table = GetTable(generation); - result = table->LookupEval(*source, *context); + result = table->LookupEval(*source, *context, strict_mode); if (result->IsSharedFunctionInfo()) { break; } @@ -503,18 +506,20 @@ Handle CompilationCache::LookupScript(Handle source, } -Handle CompilationCache::LookupEval(Handle source, - Handle context, - bool is_global) { +Handle CompilationCache::LookupEval( + Handle source, + Handle context, + bool is_global, + StrictModeFlag strict_mode) { if (!IsEnabled()) { return Handle::null(); } Handle result; if (is_global) { - result = eval_global.Lookup(source, context); + result = eval_global.Lookup(source, context, strict_mode); } else { - result = eval_contextual.Lookup(source, context); + result = eval_contextual.Lookup(source, context, strict_mode); } return result; } diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h index 37e21be99d..f779a23aac 100644 --- a/deps/v8/src/compilation-cache.h +++ b/deps/v8/src/compilation-cache.h @@ -51,7 +51,8 @@ class CompilationCache { // contain a script for the given source string. static Handle LookupEval(Handle source, Handle context, - bool is_global); + bool is_global, + StrictModeFlag strict_mode); // Returns the regexp data associated with the given regexp if it // is in cache, otherwise an empty handle. diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 5c18c3e53e..77111a842e 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -548,7 +548,8 @@ Handle Compiler::Compile(Handle source, Handle Compiler::CompileEval(Handle source, Handle context, - bool is_global) { + bool is_global, + StrictModeFlag strict_mode) { int source_length = source->length(); Counters::total_eval_size.Increment(source_length); Counters::total_compile_size.Increment(source_length); @@ -559,7 +560,10 @@ Handle Compiler::CompileEval(Handle source, // Do a lookup in the compilation cache; if the entry is not there, invoke // the compiler and add the result to the cache. Handle result; - result = CompilationCache::LookupEval(source, context, is_global); + result = CompilationCache::LookupEval(source, + context, + is_global, + strict_mode); if (result.is_null()) { // Create a script object describing the script to be compiled. @@ -567,9 +571,14 @@ Handle Compiler::CompileEval(Handle source, CompilationInfo info(script); info.MarkAsEval(); if (is_global) info.MarkAsGlobal(); + if (strict_mode == kStrictMode) info.MarkAsStrict(); info.SetCallingContext(context); result = MakeFunctionInfo(&info); if (!result.is_null()) { + // If caller is strict mode, the result must be strict as well, + // but not the other way around. Consider: + // eval("'use strict'; ..."); + ASSERT(strict_mode == kNonStrictMode || result->strict_mode()); CompilationCache::PutEval(source, context, is_global, result); } } @@ -762,6 +771,7 @@ void Compiler::SetFunctionInfo(Handle function_info, *lit->this_property_assignments()); function_info->set_try_full_codegen(lit->try_full_codegen()); function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation()); + function_info->set_strict_mode(lit->strict_mode()); } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 44ac9c85ce..9843dd6452 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -49,6 +49,7 @@ class CompilationInfo BASE_EMBEDDED { bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; } bool is_eval() const { return (flags_ & IsEval::mask()) != 0; } bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; } + bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; } bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; } FunctionLiteral* function() const { return function_; } Scope* scope() const { return scope_; } @@ -69,6 +70,13 @@ class CompilationInfo BASE_EMBEDDED { ASSERT(!is_lazy()); flags_ |= IsGlobal::encode(true); } + void MarkAsStrict() { + ASSERT(!is_lazy()); + flags_ |= IsStrict::encode(true); + } + StrictModeFlag StrictMode() { + return is_strict() ? kStrictMode : kNonStrictMode; + } void MarkAsInLoop() { ASSERT(is_lazy()); flags_ |= IsInLoop::encode(true); @@ -162,6 +170,8 @@ class CompilationInfo BASE_EMBEDDED { class IsGlobal: public BitField {}; // Flags that can be set for lazy compilation. class IsInLoop: public BitField {}; + // Strict mode - used in eager compilation. + class IsStrict: public BitField {}; unsigned flags_; @@ -230,7 +240,8 @@ class Compiler : public AllStatic { // Compile a String source within a context for Eval. static Handle CompileEval(Handle source, Handle context, - bool is_global); + bool is_global, + StrictModeFlag strict_mode); // Compile from function info (used for lazy compilation). Returns true on // success and false if the compilation resulted in a stack overflow. diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index a954d6cc69..a348235d66 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -125,8 +125,8 @@ static bool isDigit(int x, int radix) { } -static double SignedZero(bool sign) { - return sign ? -0.0 : 0.0; +static double SignedZero(bool negative) { + return negative ? -0.0 : 0.0; } @@ -134,14 +134,14 @@ static double SignedZero(bool sign) { template static double InternalStringToIntDouble(Iterator current, EndMark end, - bool sign, + bool negative, bool allow_trailing_junk) { ASSERT(current != end); // Skip leading 0s. while (*current == '0') { ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); } int64_t number = 0; @@ -217,7 +217,7 @@ static double InternalStringToIntDouble(Iterator current, ASSERT(static_cast(static_cast(number)) == number); if (exponent == 0) { - if (sign) { + if (negative) { if (number == 0) return -0.0; number = -number; } @@ -227,7 +227,7 @@ static double InternalStringToIntDouble(Iterator current, ASSERT(number != 0); // The double could be constructed faster from number (mantissa), exponent // and sign. Assuming it's a rare case more simple code is used. - return static_cast(sign ? -number : number) * pow(2.0, exponent); + return static_cast(negative ? -number : number) * pow(2.0, exponent); } @@ -238,7 +238,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { if (!AdvanceToNonspace(¤t, end)) return empty_string_val; - bool sign = false; + bool negative = false; bool leading_zero = false; if (*current == '+') { @@ -248,14 +248,14 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { } else if (*current == '-') { ++current; if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE; - sign = true; + negative = true; } if (radix == 0) { // Radix detection. if (*current == '0') { ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); if (*current == 'x' || *current == 'X') { radix = 16; ++current; @@ -271,7 +271,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { if (*current == '0') { // Allow "0x" prefix. ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); if (*current == 'x' || *current == 'X') { ++current; if (current == end) return JUNK_STRING_VALUE; @@ -287,7 +287,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { while (*current == '0') { leading_zero = true; ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); } if (!leading_zero && !isDigit(*current, radix)) { @@ -298,21 +298,21 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { switch (radix) { case 2: return InternalStringToIntDouble<1>( - current, end, sign, allow_trailing_junk); + current, end, negative, allow_trailing_junk); case 4: return InternalStringToIntDouble<2>( - current, end, sign, allow_trailing_junk); + current, end, negative, allow_trailing_junk); case 8: return InternalStringToIntDouble<3>( - current, end, sign, allow_trailing_junk); + current, end, negative, allow_trailing_junk); case 16: return InternalStringToIntDouble<4>( - current, end, sign, allow_trailing_junk); + current, end, negative, allow_trailing_junk); case 32: return InternalStringToIntDouble<5>( - current, end, sign, allow_trailing_junk); + current, end, negative, allow_trailing_junk); default: UNREACHABLE(); } @@ -344,7 +344,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { ASSERT(buffer_pos < kBufferSize); buffer[buffer_pos] = '\0'; Vector buffer_vector(buffer, buffer_pos); - return sign ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0); + return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0); } // The following code causes accumulating rounding error for numbers greater @@ -406,7 +406,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) { return JUNK_STRING_VALUE; } - return sign ? -v : v; + return negative ? -v : v; } @@ -445,7 +445,7 @@ static double InternalStringToDouble(Iterator current, bool nonzero_digit_dropped = false; bool fractional_part = false; - bool sign = false; + bool negative = false; if (*current == '+') { // Ignore leading sign. @@ -454,7 +454,7 @@ static double InternalStringToDouble(Iterator current, } else if (*current == '-') { ++current; if (current == end) return JUNK_STRING_VALUE; - sign = true; + negative = true; } static const char kInfinitySymbol[] = "Infinity"; @@ -468,13 +468,13 @@ static double InternalStringToDouble(Iterator current, } ASSERT(buffer_pos == 0); - return sign ? -V8_INFINITY : V8_INFINITY; + return negative ? -V8_INFINITY : V8_INFINITY; } bool leading_zero = false; if (*current == '0') { ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); leading_zero = true; @@ -487,14 +487,14 @@ static double InternalStringToDouble(Iterator current, return InternalStringToIntDouble<4>(current, end, - sign, + negative, allow_trailing_junk); } // Ignore leading zeros in the integer part. while (*current == '0') { ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); } } @@ -539,7 +539,7 @@ static double InternalStringToDouble(Iterator current, // leading zeros (if any). while (*current == '0') { ++current; - if (current == end) return SignedZero(sign); + if (current == end) return SignedZero(negative); exponent--; // Move this 0 into the exponent. } } @@ -631,7 +631,7 @@ static double InternalStringToDouble(Iterator current, if (octal) { return InternalStringToIntDouble<3>(buffer, buffer + buffer_pos, - sign, + negative, allow_trailing_junk); } @@ -644,7 +644,7 @@ static double InternalStringToDouble(Iterator current, buffer[buffer_pos] = '\0'; double converted = Strtod(Vector(buffer, buffer_pos), exponent); - return sign ? -converted : converted; + return negative ? -converted : converted; } @@ -702,26 +702,12 @@ double StringToDouble(Vector str, const char* DoubleToCString(double v, Vector buffer) { - StringBuilder builder(buffer.start(), buffer.length()); - switch (fpclassify(v)) { - case FP_NAN: - builder.AddString("NaN"); - break; - - case FP_INFINITE: - if (v < 0.0) { - builder.AddString("-Infinity"); - } else { - builder.AddString("Infinity"); - } - break; - - case FP_ZERO: - builder.AddCharacter('0'); - break; - + case FP_NAN: return "NaN"; + case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity"); + case FP_ZERO: return "0"; default: { + StringBuilder builder(buffer.start(), buffer.length()); int decimal_point; int sign; const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1; @@ -764,9 +750,9 @@ const char* DoubleToCString(double v, Vector buffer) { if (exponent < 0) exponent = -exponent; builder.AddFormatted("%d", exponent); } + return builder.Finalize(); } } - return builder.Finalize(); } diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index f081576aa8..00e7d0ee2c 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -817,7 +817,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, // call to an unconditional call to the replacement code. ASSERT(unoptimized_code->kind() == Code::FUNCTION); Address stack_check_cursor = unoptimized_code->instruction_start() + - unoptimized_code->stack_check_table_start(); + unoptimized_code->stack_check_table_offset(); uint32_t table_length = Memory::uint32_at(stack_check_cursor); stack_check_cursor += kIntSize; for (uint32_t i = 0; i < table_length; ++i) { @@ -836,7 +836,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, // stack check calls. ASSERT(unoptimized_code->kind() == Code::FUNCTION); Address stack_check_cursor = unoptimized_code->instruction_start() + - unoptimized_code->stack_check_table_start(); + unoptimized_code->stack_check_table_offset(); uint32_t table_length = Memory::uint32_at(stack_check_cursor); stack_check_cursor += kIntSize; for (uint32_t i = 0; i < table_length; ++i) { diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index 194a299f02..243abf079c 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -313,12 +313,12 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) { // Called by Code::CodePrint. void Disassembler::Decode(FILE* f, Code* code) { int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION) - ? static_cast(code->safepoint_table_start()) + ? static_cast(code->safepoint_table_offset()) : code->instruction_size(); // If there might be a stack check table, stop before reaching it. if (code->kind() == Code::FUNCTION) { decode_size = - Min(decode_size, static_cast(code->stack_check_table_start())); + Min(decode_size, static_cast(code->stack_check_table_offset())); } byte* begin = code->instruction_start(); diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc index b8f081c54d..63daa05b5b 100644 --- a/deps/v8/src/extensions/gc-extension.cc +++ b/deps/v8/src/extensions/gc-extension.cc @@ -40,8 +40,12 @@ v8::Handle GCExtension::GetNativeFunction( v8::Handle GCExtension::GC(const v8::Arguments& args) { + bool compact = false; // All allocation spaces other than NEW_SPACE have the same effect. - Heap::CollectAllGarbage(false); + if (args.Length() >= 1 && args[0]->IsBoolean()) { + compact = args[0]->BooleanValue(); + } + Heap::CollectAllGarbage(compact); return v8::Undefined(); } diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index af3ac00bae..4ed3fecfe8 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -304,7 +304,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { cgen.PopulateDeoptimizationData(code); code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_allow_osr_at_loop_nesting_level(0); - code->set_stack_check_table_start(table_offset); + code->set_stack_check_table_offset(table_offset); CodeGenerator::PrintCode(code, info); info->SetCode(code); // may be an empty handle. #ifdef ENABLE_GDB_JIT_INTERFACE diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 0482ee8d94..2d0998d8cf 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -531,6 +531,9 @@ class FullCodeGenerator: public AstVisitor { Handle