Browse Source

V8: Upgrade to 3.11.10.25

v0.8.13-release
isaacs 12 years ago
parent
commit
29d12c7342
  1. 27
      deps/v8/build/common.gypi
  2. 108
      deps/v8/src/arm/assembler-arm.cc
  3. 3
      deps/v8/src/arm/assembler-arm.h
  4. 5
      deps/v8/src/arm/builtins-arm.cc
  5. 117
      deps/v8/src/arm/code-stubs-arm.cc
  6. 16
      deps/v8/src/arm/code-stubs-arm.h
  7. 10
      deps/v8/src/arm/codegen-arm.cc
  8. 8
      deps/v8/src/arm/full-codegen-arm.cc
  9. 8
      deps/v8/src/arm/lithium-codegen-arm.cc
  10. 24
      deps/v8/src/arm/macro-assembler-arm.cc
  11. 44
      deps/v8/src/arm/stub-cache-arm.cc
  12. 4
      deps/v8/src/flag-definitions.h
  13. 12
      deps/v8/src/mips/code-stubs-mips.cc
  14. 3
      deps/v8/src/mips/macro-assembler-mips.cc
  15. 3
      deps/v8/src/platform-linux.cc
  16. 22
      deps/v8/src/platform-macos.cc
  17. 17
      deps/v8/src/platform-posix.cc
  18. 8
      deps/v8/src/platform-solaris.cc
  19. 1
      deps/v8/src/v8globals.h
  20. 2
      deps/v8/src/version.cc
  21. 70
      deps/v8/test/cctest/test-api.cc
  22. 14
      deps/v8/test/cctest/test-assembler-arm.cc
  23. 41
      deps/v8/test/mjsunit/regress/regress-2234.js
  24. 3
      deps/v8/tools/gyp/v8.gyp

27
deps/v8/build/common.gypi

@ -43,13 +43,12 @@
# access is allowed for all CPUs. # access is allowed for all CPUs.
'v8_can_use_unaligned_accesses%': 'default', 'v8_can_use_unaligned_accesses%': 'default',
# Setting 'v8_can_use_vfp2_instructions' to 'true' will enable use of ARM VFP # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP
# instructions in the V8 generated code. VFP instructions will be enabled # instructions in the V8 generated code. VFP instructions will be enabled
# both for the snapshot and for the ARM target. Leaving the default value # both for the snapshot and for the ARM target. Leaving the default value
# of 'false' will avoid VFP instructions in the snapshot and use CPU feature # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
# probing when running on the target. # probing when running on the target.
'v8_can_use_vfp2_instructions%': 'false', 'v8_can_use_vfp_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
# Similar to vfp but on MIPS. # Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true', 'v8_can_use_fpu_instructions%': 'true',
@ -126,20 +125,15 @@
'CAN_USE_UNALIGNED_ACCESSES=0', 'CAN_USE_UNALIGNED_ACCESSES=0',
], ],
}], }],
[ 'v8_can_use_vfp2_instructions=="true"', { [ 'v8_can_use_vfp_instructions=="true"', {
'defines': [ 'defines': [
'CAN_USE_VFP2_INSTRUCTIONS', 'CAN_USE_VFP_INSTRUCTIONS',
],
}],
[ 'v8_can_use_vfp3_instructions=="true"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
], ],
}], }],
[ 'v8_use_arm_eabi_hardfloat=="true"', { [ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [ 'defines': [
'USE_EABI_HARDFLOAT=1', 'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP2_INSTRUCTIONS', 'CAN_USE_VFP_INSTRUCTIONS',
], ],
'target_conditions': [ 'target_conditions': [
['_toolset=="target"', { ['_toolset=="target"', {
@ -245,6 +239,7 @@
'WIN32', 'WIN32',
], ],
'msvs_configuration_attributes': { 'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1', 'CharacterSet': '1',
}, },
@ -276,7 +271,7 @@
'target_conditions': [ 'target_conditions': [
['_toolset=="host"', { ['_toolset=="host"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
}, },
'cflags': [ '<(m32flag)' ], 'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ],
@ -286,7 +281,7 @@
}], }],
['_toolset=="target"', { ['_toolset=="target"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
}, },
'cflags': [ '<(m32flag)' ], 'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ],
@ -329,7 +324,7 @@
}, },
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
], ],
@ -338,6 +333,10 @@
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', { or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [ 'cflags': [
'-fdata-sections', '-fdata-sections',
'-ffunction-sections', '-ffunction-sections',

108
deps/v8/src/arm/assembler-arm.cc

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -52,20 +52,17 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
// Get the CPU features enabled by the build. For cross compilation the // Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the // can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot. // snapshot.
static unsigned CpuFeaturesImpliedByCompiler() { static uint64_t CpuFeaturesImpliedByCompiler() {
unsigned answer = 0; uint64_t answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS #ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7; answer |= 1u << ARMv7;
#endif // CAN_USE_ARMV7_INSTRUCTIONS #endif // def CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS #ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7; answer |= 1u << VFP3 | 1u << ARMv7;
#endif // CAN_USE_VFP3_INSTRUCTIONS #endif // def CAN_USE_VFP_INSTRUCTIONS
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef __arm__ #ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code // If the compiler is allowed to use VFP then we can use VFP too in our code
@ -73,10 +70,10 @@ static unsigned CpuFeaturesImpliedByCompiler() {
// point support implies VFPv3, see ARM DDI 0406B, page A1-6. // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \ #if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__) && !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) #endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__) // && !defined(__SOFTFP__)
#endif // _arm__ #endif // def __arm__
return answer; return answer;
} }
@ -104,32 +101,27 @@ void CpuFeatures::Probe() {
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) { if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; supported_ |= 1u << VFP3 | 1u << ARMv7;
} }
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) { if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7; supported_ |= 1u << ARMv7;
} }
#else // __arm__ #else // def __arm__
// Probe for additional features not already known to be available. // Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime // This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6. // 0406B, page A1-6.
found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; supported_ |= 1u << VFP3 | 1u << ARMv7;
} else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
found_by_runtime_probing_ |= 1u << VFP2;
} }
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7; found_by_runtime_probing_ |= 1u << ARMv7;
} }
supported_ |= found_by_runtime_probing_;
#endif #endif
// Assert that VFP3 implies VFP2 and ARMv7.
ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
} }
@ -1664,7 +1656,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628. // Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset // Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
offset = -offset; offset = -offset;
@ -1706,7 +1698,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628. // Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset // Vdst(15-12) | 1010(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
offset = -offset; offset = -offset;
@ -1750,7 +1742,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786. // Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4) // Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
offset = -offset; offset = -offset;
@ -1791,7 +1783,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786. // Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4) // Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
offset = -offset; offset = -offset;
@ -1834,7 +1826,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626. // Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2) // first(15-12) | 1010(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code()); ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc)); ASSERT(!base.is(pc));
@ -1855,7 +1847,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2) // first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code()); ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc)); ASSERT(!base.is(pc));
@ -1875,7 +1867,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626. // Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2) // first(15-12) | 1010(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code()); ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc)); ASSERT(!base.is(pc));
@ -1896,7 +1888,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2) // first(15-12) | 1011(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code()); ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc)); ASSERT(!base.is(pc));
@ -1919,7 +1911,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats. // Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform. // We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
ASSERT(CpuFeatures::IsSupported(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
// VMOV can accept an immediate of the form: // VMOV can accept an immediate of the form:
// //
@ -1972,10 +1964,10 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) { const Condition cond) {
// Dd = immediate // Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640. // Instruction details available in ARM DDI 0406B, A8-640.
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
uint32_t enc; uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { if (FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction. // The double can be encoded in the instruction.
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc); emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
} else { } else {
@ -2009,7 +2001,7 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) { const Condition cond) {
// Sd = Sm // Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642. // Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
int sd, d, sm, m; int sd, d, sm, m;
dst.split_code(&sd, &d); dst.split_code(&sd, &d);
src.split_code(&sm, &m); src.split_code(&sm, &m);
@ -2022,7 +2014,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) { const Condition cond) {
// Dd = Dm // Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642. // Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 | emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code()); dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
} }
@ -2036,7 +2028,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-646. // Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src1.is(pc) && !src2.is(pc)); ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 | emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code()); src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@ -2051,7 +2043,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406A, A8-646. // Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst1.is(pc) && !dst2.is(pc)); ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code()); dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@ -2065,7 +2057,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642. // Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc)); ASSERT(!src.is(pc));
int sn, n; int sn, n;
dst.split_code(&sn, &n); dst.split_code(&sn, &n);
@ -2080,7 +2072,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642. // Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc)); ASSERT(!dst.is(pc));
int sn, n; int sn, n;
src.split_code(&sn, &n); src.split_code(&sn, &n);
@ -2205,7 +2197,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
} }
@ -2214,7 +2206,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
} }
@ -2223,7 +2215,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
} }
@ -2232,7 +2224,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
} }
@ -2241,7 +2233,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
} }
@ -2250,7 +2242,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
} }
@ -2259,7 +2251,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
} }
@ -2267,7 +2259,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
void Assembler::vneg(const DwVfpRegister dst, void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 | emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code()); 0x5*B9 | B8 | B6 | src.code());
} }
@ -2276,7 +2267,6 @@ void Assembler::vneg(const DwVfpRegister dst,
void Assembler::vabs(const DwVfpRegister dst, void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
0x5*B9 | B8 | 0x3*B6 | src.code()); 0x5*B9 | B8 | 0x3*B6 | src.code());
} }
@ -2291,7 +2281,7 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-536. // Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code()); dst.code()*B12 | 0x5*B9 | B8 | src2.code());
} }
@ -2306,7 +2296,7 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
} }
@ -2321,7 +2311,7 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code()); dst.code()*B12 | 0x5*B9 | B8 | src2.code());
} }
@ -2336,7 +2326,7 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-584. // Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 | emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code()); dst.code()*B12 | 0x5*B9 | B8 | src2.code());
} }
@ -2349,7 +2339,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570. // Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
} }
@ -2362,7 +2352,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570. // Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) | // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0); ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 | emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6); src1.code()*B12 | 0x5*B9 | B8 | B6);
@ -2373,7 +2363,7 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652. // Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xE*B20 | B16 | emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4); dst.code()*B12 | 0xA*B8 | B4);
} }
@ -2383,7 +2373,7 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652. // Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 | emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4); dst.code()*B12 | 0xA*B8 | B4);
} }
@ -2394,7 +2384,7 @@ void Assembler::vsqrt(const DwVfpRegister dst,
const Condition cond) { const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) | // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 | emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code()); dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
} }

3
deps/v8/src/arm/assembler-arm.h

@ -510,7 +510,6 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
ASSERT(initialized_); ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false; if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
return (supported_ & (1u << f)) != 0; return (supported_ & (1u << f)) != 0;
} }
@ -536,8 +535,6 @@ class CpuFeatures : public AllStatic {
public: public:
explicit Scope(CpuFeature f) { explicit Scope(CpuFeature f) {
unsigned mask = 1u << f; unsigned mask = 1u << f;
// VFP2 and ARMv7 are implied by VFP3.
if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
ASSERT(CpuFeatures::IsSupported(f)); ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() || ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0); (CpuFeatures::found_by_runtime_probing_ & mask) == 0);

5
deps/v8/src/arm/builtins-arm.cc

@ -1246,7 +1246,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3); CpuFeatures::TryForceFeatureScope scope(VFP3);
ASSERT(CPU::SupportsCrankshaft()); if (!CpuFeatures::IsSupported(VFP3)) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
// Lookup the function in the JavaScript frame and push it as an // Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function. // argument to the on-stack replacement function.

117
deps/v8/src/arm/code-stubs-arm.cc

@ -519,8 +519,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination, FloatingPointHelper::Destination destination,
Register scratch1, Register scratch1,
Register scratch2) { Register scratch2) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1); __ vmov(d7.high(), scratch1);
__ vcvt_f64_s32(d7, d7.high()); __ vcvt_f64_s32(d7, d7.high());
@ -589,9 +589,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number. // Handle loading a double from a heap number.
if (CpuFeatures::IsSupported(VFP2) && if (CpuFeatures::IsSupported(VFP3) &&
destination == kVFPRegisters) { destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber to double register. // Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag)); __ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(dst, scratch1, HeapNumber::kValueOffset); __ vldr(dst, scratch1, HeapNumber::kValueOffset);
@ -604,8 +604,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi. // Handle loading a double from a smi.
__ bind(&is_smi); __ bind(&is_smi);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions. // Convert smi to double using VFP instructions.
__ vmov(dst.high(), scratch1); __ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high()); __ vcvt_f64_s32(dst, dst.high());
@ -682,8 +682,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Label done; Label done;
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(single_scratch, int_scratch); __ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch); __ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) { if (destination == kCoreRegisters) {
@ -776,8 +776,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number. // Load the number.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Load the double value. // Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag)); __ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset); __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
@ -847,8 +847,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number. // Object is a heap number.
// Convert the floating point value to a 32-bit integer. // Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
SwVfpRegister single_scratch = double_scratch.low(); SwVfpRegister single_scratch = double_scratch.low();
// Load the double value. // Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag)); __ sub(scratch1, object, Operand(kHeapObjectTag));
@ -978,7 +978,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(lr); __ push(lr);
__ PrepareCallCFunction(0, 2, scratch); __ PrepareCallCFunction(0, 2, scratch);
if (masm->use_eabi_hardfloat()) { if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(d0, r0, r1); __ vmov(d0, r0, r1);
__ vmov(d1, r2, r3); __ vmov(d1, r2, r3);
} }
@ -990,7 +990,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Store answer in the overwritable heap number. Double returned in // Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0. // registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) { if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vstr(d0, __ vstr(d0,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else { } else {
@ -1209,9 +1209,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} }
// Lhs is a smi, rhs is a number. // Lhs is a smi, rhs is a number.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
// Convert lhs to a double in d7. // Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15); __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6. // Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, rhs, Operand(kHeapObjectTag)); __ sub(r7, rhs, Operand(kHeapObjectTag));
@ -1249,8 +1249,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} }
// Rhs is a smi, lhs is a heap number. // Rhs is a smi, lhs is a heap number.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7. // Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag)); __ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset); __ vldr(d7, r7, HeapNumber::kValueOffset);
@ -1362,7 +1362,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ push(lr); __ push(lr);
__ PrepareCallCFunction(0, 2, r5); __ PrepareCallCFunction(0, 2, r5);
if (masm->use_eabi_hardfloat()) { if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(d0, r0, r1); __ vmov(d0, r0, r1);
__ vmov(d1, r2, r3); __ vmov(d1, r2, r3);
} }
@ -1437,8 +1437,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have // Both are heap numbers. Load them up then jump to the code we have
// for that. // for that.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ sub(r7, rhs, Operand(kHeapObjectTag)); __ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset); __ vldr(d6, r7, HeapNumber::kValueOffset);
__ sub(r7, lhs, Operand(kHeapObjectTag)); __ sub(r7, lhs, Operand(kHeapObjectTag));
@ -1527,8 +1527,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache; Label load_result_from_cache;
if (!object_is_smi) { if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi); __ JumpIfSmi(object, &is_smi);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ CheckMap(object, __ CheckMap(object,
scratch1, scratch1,
Heap::kHeapNumberMapRootIndex, Heap::kHeapNumberMapRootIndex,
@ -1659,9 +1659,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if // The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3. // VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate(); Isolate* isolate = masm->isolate();
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan); __ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Label no_nan; Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison. // ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6); __ VFPCompareAndSetFlags(d7, d6);
@ -1780,7 +1780,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means // This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub. // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions. // This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Label patch; Label patch;
const Register map = r9.is(tos_) ? r7 : r9; const Register map = r9.is(tos_) ? r7 : r9;
@ -1892,7 +1892,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them. // restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit()); __ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) { if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i); DwVfpRegister reg = DwVfpRegister::from_code(i);
@ -1910,7 +1910,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()), ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count); argument_count);
if (save_doubles_ == kSaveFPRegs) { if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i); DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize)); __ vldr(reg, MemOperand(sp, i * kDoubleSize));
@ -2140,9 +2140,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ mov(r0, r2); // Move newly allocated heap number to r0. __ mov(r0, r2); // Move newly allocated heap number to r0.
} }
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted. // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1); __ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0); __ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag)); __ sub(r2, r0, Operand(kHeapObjectTag));
@ -2442,7 +2442,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not. // depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination = FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) && CpuFeatures::IsSupported(VFP3) &&
op_ != Token::MOD ? op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters; FloatingPointHelper::kCoreRegisters;
@ -2469,7 +2469,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Using VFP registers: // Using VFP registers:
// d6: Left value // d6: Left value
// d7: Right value // d7: Right value
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
__ vadd(d5, d6, d7); __ vadd(d5, d6, d7);
@ -2558,7 +2558,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of // The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we // writing the register as an unsigned int so we go to slow case if we
// hit this case. // hit this case.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi); __ b(mi, &result_not_a_smi);
} else { } else {
__ b(mi, not_numbers); __ b(mi, not_numbers);
@ -2597,10 +2597,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// result. // result.
__ mov(r0, Operand(r5)); __ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result. // mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2); __ vmov(s0, r2);
if (op_ == Token::SHR) { if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0); __ vcvt_f64_u32(d0, s0);
@ -2759,7 +2759,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Jump to type transition if they are not. The registers r0 and r1 (right // Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call. // and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination = FloatingPointHelper::Destination destination =
(CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
? FloatingPointHelper::kVFPRegisters ? FloatingPointHelper::kVFPRegisters
: FloatingPointHelper::kCoreRegisters; : FloatingPointHelper::kCoreRegisters;
@ -2787,7 +2787,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition); &transition);
if (destination == FloatingPointHelper::kVFPRegisters) { if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Label return_heap_number; Label return_heap_number;
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
@ -2954,9 +2954,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (r2) is 0. // We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try // This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can. // to return a heap number if we can.
// The non vfp2 code does not support this special case, so jump to // The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it. // runtime if we don't support it.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, (result_type_ <= BinaryOpIC::INT32) __ b(mi, (result_type_ <= BinaryOpIC::INT32)
? &transition ? &transition
: &return_heap_number); : &return_heap_number);
@ -2991,8 +2991,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2, scratch2,
&call_runtime); &call_runtime);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
if (op_ != Token::SHR) { if (op_ != Token::SHR) {
// Convert the result to a floating point value. // Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2); __ vmov(double_scratch.low(), r2);
@ -3221,8 +3221,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0; const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED); const bool tagged = (argument_type_ == TAGGED);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
if (tagged) { if (tagged) {
// Argument is a number and is on stack and in r0. // Argument is a number and is on stack and in r0.
// Load argument and check if it is a smi. // Load argument and check if it is a smi.
@ -3323,23 +3323,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate()); ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1); __ TailCallExternalReference(runtime_function, 1, 1);
} else { } else {
if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
Label no_update; Label no_update;
Label skip_cache; Label skip_cache;
// Call C function to calculate the result and update the cache. // Call C function to calculate the result and update the cache.
// Register r0 holds precalculated cache entry address; preserve // r0: precalculated cache entry address.
// it on the stack and pop it into register cache_entry after the // r2 and r3: parts of the double value.
// call. // Store r0, r2 and r3 on stack for later before calling C function.
__ push(cache_entry); __ Push(r3, r2, cache_entry);
GenerateCallCFunction(masm, scratch0); GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2); __ GetCFunctionDoubleResult(d2);
// Try to update the cache. If we cannot allocate a // Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating. // heap number, we return the result without updating.
__ pop(cache_entry); __ Pop(r3, r2, cache_entry);
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
@ -3385,7 +3385,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) { Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
Isolate* isolate = masm->isolate(); Isolate* isolate = masm->isolate();
__ push(lr); __ push(lr);
@ -3446,7 +3445,7 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp2_scope(VFP2); CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1; const Register base = r1;
const Register exponent = r2; const Register exponent = r2;
const Register heapnumbermap = r5; const Register heapnumbermap = r5;
@ -3545,7 +3544,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Add +0 to convert -0 to +0. // Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero); __ vadd(double_scratch, double_base, kDoubleRegZero);
__ vmov(double_result, 1.0); __ vmov(double_result, 1);
__ vsqrt(double_scratch, double_scratch); __ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch); __ vdiv(double_result, double_result, double_scratch);
__ jmp(&done); __ jmp(&done);
@ -3902,8 +3901,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr // Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit()); __ stm(db_w, sp, kCalleeSaved | lr.bit());
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Save callee-saved vfp registers. // Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0. // Set up the reserved register for 0.0.
@ -3918,7 +3917,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4. // Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
} }
__ ldr(r4, MemOperand(sp, offset_to_argv)); __ ldr(r4, MemOperand(sp, offset_to_argv));
@ -4056,8 +4055,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
} }
#endif #endif
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Restore callee-saved vfp registers. // Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
} }
@ -6584,8 +6583,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare // Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported. // stub if NaN is involved or VFP3 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Load left and right operand // Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag)); __ sub(r2, r1, Operand(kHeapObjectTag));

16
deps/v8/src/arm/code-stubs-arm.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -149,7 +149,7 @@ class BinaryOpStub: public CodeStub {
mode_(mode), mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED), operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) { result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp2_ = CpuFeatures::IsSupported(VFP2); use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
@ -159,7 +159,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)), : op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)), mode_(ModeBits::decode(key)),
use_vfp2_(VFP2Bits::decode(key)), use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type), operands_type_(operands_type),
result_type_(result_type) { } result_type_(result_type) { }
@ -171,7 +171,7 @@ class BinaryOpStub: public CodeStub {
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
bool use_vfp2_; bool use_vfp3_;
// Operand type information determined at runtime. // Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_; BinaryOpIC::TypeInfo operands_type_;
@ -182,7 +182,7 @@ class BinaryOpStub: public CodeStub {
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {}; class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP2Bits: public BitField<bool, 9, 1> {}; class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
@ -190,7 +190,7 @@ class BinaryOpStub: public CodeStub {
int MinorKey() { int MinorKey() {
return OpBits::encode(op_) return OpBits::encode(op_)
| ModeBits::encode(mode_) | ModeBits::encode(mode_)
| VFP2Bits::encode(use_vfp2_) | VFP3Bits::encode(use_vfp3_)
| OperandTypeInfoBits::encode(operands_type_) | OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_); | ResultTypeInfoBits::encode(result_type_);
} }
@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
masm->sub(sp, masm->sub(sp,
sp, sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
@ -586,7 +586,7 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm, inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) { SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Restore all VFP registers except d0. // Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i); DwVfpRegister reg = DwVfpRegister::from_code(i);

10
deps/v8/src/arm/codegen-arm.cc

@ -107,7 +107,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements) // -- r4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2); bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
// Check for empty arrays, which only require a map transition and no changes // Check for empty arrays, which only require a map transition and no changes
// to the backing store. // to the backing store.
@ -163,7 +163,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32 // r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged // r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged // r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp2_supported) __ Push(r1, r0); if (!vfp3_supported) __ Push(r1, r0);
__ b(&entry); __ b(&entry);
@ -191,8 +191,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store. // Normal smi, convert to double and store.
if (vfp2_supported) { if (vfp3_supported) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r9); __ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0); __ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0); __ vstr(d0, r7, 0);
@ -225,7 +225,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6); __ cmp(r7, r6);
__ b(lt, &loop); __ b(lt, &loop);
if (!vfp2_supported) __ Pop(r1, r0); if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr); __ pop(lr);
__ bind(&done); __ bind(&done);
} }

8
deps/v8/src/arm/full-codegen-arm.cc

@ -675,7 +675,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true, Label* if_true,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
ToBooleanStub stub(result_register()); ToBooleanStub stub(result_register());
__ CallStub(&stub); __ CallStub(&stub);
__ tst(result_register(), result_register()); __ tst(result_register(), result_register());
@ -3052,13 +3052,13 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double // Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing: // by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0); __ PrepareCallCFunction(1, r0);
__ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX)); __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// 0x41300000 is the top half of 1.0 x 2^20 as a double. // 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load. // Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000)); __ mov(r1, Operand(0x41000000));
@ -3181,7 +3181,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
MathPowStub stub(MathPowStub::ON_STACK); MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {

8
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2390,12 +2390,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Register temp = ToRegister(instr->TempAt(0)); Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4)); ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function()); __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 4; static const int kAdditionalDelta = 5;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta; Label before_push_delta;
__ bind(&before_push_delta); __ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta); __ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize)); __ mov(temp, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
__ StoreToSafepointRegisterSlot(temp, temp); __ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(), CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET, RelocInfo::CODE_TARGET,

24
deps/v8/src/arm/macro-assembler-arm.cc

@ -265,8 +265,8 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2)); ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
if (!dst.is(src)) { if (!dst.is(src)) {
vmov(dst, src); vmov(dst, src);
} }
@ -778,7 +778,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst, void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm, const double imm,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2)); ASSERT(CpuFeatures::IsEnabled(VFP3));
static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0); static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm); DoubleRepresentation value(imm);
@ -930,7 +930,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
} }
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(dst, d0); Move(dst, d0);
} else { } else {
@ -1968,7 +1967,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element // scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination; FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters; destination = FloatingPointHelper::kVFPRegisters;
} else { } else {
destination = FloatingPointHelper::kCoreRegisters; destination = FloatingPointHelper::kCoreRegisters;
@ -1985,7 +1984,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4, scratch4,
s2); s2);
if (destination == FloatingPointHelper::kVFPRegisters) { if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
vstr(d0, scratch1, 0); vstr(d0, scratch1, 0);
} else { } else {
str(mantissa_reg, MemOperand(scratch1, 0)); str(mantissa_reg, MemOperand(scratch1, 0));
@ -2332,8 +2331,8 @@ void MacroAssembler::ConvertToInt32(Register source,
Register scratch2, Register scratch2,
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Label *not_int32) { Label *not_int32) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag)); sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset); vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(double_scratch.low(), double_scratch); vcvt_s32_f64(double_scratch.low(), double_scratch);
@ -2428,8 +2427,8 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
CheckForInexactConversion check_inexact) { CheckForInexactConversion check_inexact) {
ASSERT(CpuFeatures::IsSupported(VFP2)); ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1; Register prev_fpscr = scratch1;
Register scratch = scratch2; Register scratch = scratch2;
@ -2547,7 +2546,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch, Register scratch,
Register input_high, Register input_high,
Register input_low) { Register input_low) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
ASSERT(!input_high.is(result)); ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result)); ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high)); ASSERT(!input_low.is(input_high));
@ -3333,7 +3332,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(d0, dreg); Move(d0, dreg);
} else { } else {
@ -3344,7 +3342,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) { DoubleRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) { if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1)); ASSERT(!dreg1.is(d1));
@ -3363,7 +3360,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) { Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(d0, dreg); Move(d0, dreg);
Move(r0, reg); Move(r0, reg);

44
deps/v8/src/arm/stub-cache-arm.cc

@ -986,8 +986,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval, Register fval,
Register scratch1, Register scratch1,
Register scratch2) { Register scratch2) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival); __ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2)); __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0); __ vcvt_f32_s32(s0, s0);
@ -2089,11 +2089,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver // -- sp[argc * 4] : receiver
// ----------------------------------- // -----------------------------------
if (!CpuFeatures::IsSupported(VFP2)) { if (!CpuFeatures::IsSupported(VFP3)) {
return Handle<Code>::null(); return Handle<Code>::null();
} }
CpuFeatures::Scope scope_vfp2(VFP2); CpuFeatures::Scope scope_vfp3(VFP3);
const int argc = arguments().immediate(); const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of // If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call. // arguments, bail out to the regular call.
@ -3549,8 +3549,8 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch1, Register scratch1,
DwVfpRegister double_scratch0, DwVfpRegister double_scratch0,
Label* fail) { Label* fail) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Label key_ok; Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap // Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi // number and check if the conversion is exact and fits into the smi
@ -3636,8 +3636,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ ldr(value, MemOperand(r3, key, LSL, 1)); __ ldr(value, MemOperand(r3, key, LSL, 1));
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1)); __ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0); __ vldr(s0, r2, 0);
} else { } else {
@ -3645,8 +3645,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} }
break; break;
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 2)); __ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0); __ vldr(d0, r2, 0);
} else { } else {
@ -3697,8 +3697,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// Now we can use r0 for the result as key is not needed any more. // Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5); __ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value); __ vmov(s0, value);
__ vcvt_f64_s32(d0, s0); __ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag)); __ sub(r3, r0, Operand(kHeapObjectTag));
@ -3725,8 +3725,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// The test is different for unsigned int values. Since we need // The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't // the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value. // handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
Label box_int, done; Label box_int, done;
__ tst(value, Operand(0xC0000000)); __ tst(value, Operand(0xC0000000));
__ b(ne, &box_int); __ b(ne, &box_int);
@ -3789,8 +3789,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a // For the floating-point array type, we need to always allocate a
// HeapNumber. // HeapNumber.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
@ -3857,8 +3857,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret(); __ Ret();
} }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as // Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to // AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space. // exhausted young space.
@ -3983,7 +3983,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ add(r3, r3, Operand(key, LSL, 2)); __ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element // r3: effective address of the double element
FloatingPointHelper::Destination destination; FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters; destination = FloatingPointHelper::kVFPRegisters;
} else { } else {
destination = FloatingPointHelper::kCoreRegisters; destination = FloatingPointHelper::kCoreRegisters;
@ -3993,7 +3993,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
d0, r6, r7, // These are: double_dst, dst1, dst2. d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch. r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) { if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ vstr(d0, r3, 0); __ vstr(d0, r3, 0);
} else { } else {
__ str(r6, MemOperand(r3, 0)); __ str(r6, MemOperand(r3, 0));
@ -4028,8 +4028,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and // The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more // +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero. // reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not // vldr requires offset to be a multiple of 4 so we can not

4
deps/v8/src/flag-definitions.h

@ -263,9 +263,7 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)") "enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true, DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies " "enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 and VFP2 instructions (ARM only)") "enabling ARMv7 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true, DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)") "enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true, DEFINE_bool(enable_fpu, true,

12
deps/v8/src/mips/code-stubs-mips.cc

@ -3453,23 +3453,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1, 1,
1); 1);
} else { } else {
if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE(); ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU); CpuFeatures::Scope scope(FPU);
Label no_update; Label no_update;
Label skip_cache; Label skip_cache;
// Call C function to calculate the result and update the cache. // Call C function to calculate the result and update the cache.
// Register a0 holds precalculated cache entry address; preserve // a0: precalculated cache entry address.
// it on the stack and pop it into register cache_entry after the // a2 and a3: parts of the double value.
// call. // Store a0, a2 and a3 on stack for later before calling C function.
__ Push(cache_entry, a2, a3); __ Push(a3, a2, cache_entry);
GenerateCallCFunction(masm, scratch0); GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4); __ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a // Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating. // heap number, we return the result without updating.
__ Pop(cache_entry, a2, a3); __ Pop(a3, a2, cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));

3
deps/v8/src/mips/macro-assembler-mips.cc

@ -4459,7 +4459,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize + size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize; FixedArrayBase::kHeaderSize;
Branch(no_map_match, ne, map_in_out, Operand(scratch)); lw(at, FieldMemOperand(scratch, offset));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map. // Use the transitioned cached map.
offset = transitioned_kind * kPointerSize + offset = transitioned_kind * kPointerSize +

3
deps/v8/src/platform-linux.cc

@ -132,9 +132,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures, // facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such. // so it's up to individual OSes to provide such.
switch (feature) { switch (feature) {
case VFP2:
search_string = "vfp";
break;
case VFP3: case VFP3:
search_string = "vfpv3"; search_string = "vfpv3";
break; break;

22
deps/v8/src/platform-macos.cc

@ -682,27 +682,17 @@ Mutex* OS::CreateMutex() {
class MacOSSemaphore : public Semaphore { class MacOSSemaphore : public Semaphore {
public: public:
explicit MacOSSemaphore(int count) { explicit MacOSSemaphore(int count) {
int r; semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
r = semaphore_create(mach_task_self(),
&semaphore_,
SYNC_POLICY_FIFO,
count);
ASSERT(r == KERN_SUCCESS);
} }
~MacOSSemaphore() { ~MacOSSemaphore() {
int r; semaphore_destroy(mach_task_self(), semaphore_);
r = semaphore_destroy(mach_task_self(), semaphore_);
ASSERT(r == KERN_SUCCESS);
} }
void Wait() { // The MacOS mach semaphore documentation claims it does not have spurious
int r; // wakeups, the way pthreads semaphores do. So the code from the linux
do { // platform is not needed here.
r = semaphore_wait(semaphore_); void Wait() { semaphore_wait(semaphore_); }
ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
} while (r == KERN_ABORTED);
}
bool Wait(int timeout); bool Wait(int timeout);

17
deps/v8/src/platform-posix.cc

@ -109,20 +109,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000); raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else #else
uint32_t raw_addr = V8::RandomPrivate(isolate); uint32_t raw_addr = V8::RandomPrivate(isolate);
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// For our 32-bit mmap() hint, we pick a random address in the bottom // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// half of the top half of the address space (that is, the third quarter). // 10.6 and 10.7.
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); systems will
// either try the hint and if that fails move higher (MacOS and other BSD
// derivatives) or try the hint and if that fails allocate as if there were
// no hint at all (Linux, Solaris, illumos and derivatives). The high hint
// prevents the break from getting hemmed in at low values, ceding half of
// the address space to the system heap.
raw_addr &= 0x3ffff000; raw_addr &= 0x3ffff000;
raw_addr += 0x80000000; raw_addr += 0x20000000;
#endif #endif
return reinterpret_cast<void*>(raw_addr); return reinterpret_cast<void*>(raw_addr);
} }

8
deps/v8/src/platform-solaris.cc

@ -125,8 +125,12 @@ const char* OS::LocalTimezone(double time) {
double OS::LocalTimeOffset() { double OS::LocalTimeOffset() {
tzset(); // On Solaris, struct tm does not contain a tm_gmtoff field.
return -static_cast<double>(timezone * msPerSecond); time_t utc = time(NULL);
ASSERT(utc != -1);
struct tm* loc = localtime(&utc);
ASSERT(loc != NULL);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
} }

1
deps/v8/src/v8globals.h

@ -442,7 +442,6 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86 CPUID = 10, // x86
VFP3 = 1, // ARM VFP3 = 1, // ARM
ARMv7 = 2, // ARM ARMv7 = 2, // ARM
VFP2 = 3, // ARM
SAHF = 0, // x86 SAHF = 0, // x86
FPU = 1}; // MIPS FPU = 1}; // MIPS

2
deps/v8/src/version.cc

@ -35,7 +35,7 @@
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 11 #define MINOR_VERSION 11
#define BUILD_NUMBER 10 #define BUILD_NUMBER 10
#define PATCH_LEVEL 22 #define PATCH_LEVEL 25
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0 #define IS_CANDIDATE_VERSION 0

70
deps/v8/test/cctest/test-api.cc

@ -27,11 +27,6 @@
#include <limits.h> #include <limits.h>
#ifndef WIN32
#include <signal.h> // kill
#include <unistd.h> // getpid
#endif // WIN32
#include "v8.h" #include "v8.h"
#include "api.h" #include "api.h"
@ -17022,68 +17017,3 @@ THREADED_TEST(Regress142088) {
CHECK(context->Global()->Get(v8_str("y_from_obj"))->IsUndefined()); CHECK(context->Global()->Get(v8_str("y_from_obj"))->IsUndefined());
CHECK(context->Global()->Get(v8_str("y_from_subobj"))->IsUndefined()); CHECK(context->Global()->Get(v8_str("y_from_subobj"))->IsUndefined());
} }
#ifndef WIN32
class ThreadInterruptTest {
public:
ThreadInterruptTest() : sem_(NULL), sem_value_(0) { }
~ThreadInterruptTest() { delete sem_; }
void RunTest() {
sem_ = i::OS::CreateSemaphore(0);
InterruptThread i_thread(this);
i_thread.Start();
sem_->Wait();
CHECK_EQ(kExpectedValue, sem_value_);
}
private:
static const int kExpectedValue = 1;
class InterruptThread : public i::Thread {
public:
explicit InterruptThread(ThreadInterruptTest* test)
: Thread("InterruptThread"), test_(test) {}
virtual void Run() {
struct sigaction action;
// Ensure that we'll enter waiting condition
i::OS::Sleep(100);
// Setup signal handler
memset(&action, 0, sizeof(action));
action.sa_handler = SignalHandler;
sigaction(SIGCHLD, &action, NULL);
// Send signal
kill(getpid(), SIGCHLD);
// Ensure that if wait has returned because of error
i::OS::Sleep(100);
// Set value and signal semaphore
test_->sem_value_ = 1;
test_->sem_->Signal();
}
static void SignalHandler(int signal) {
}
private:
ThreadInterruptTest* test_;
struct sigaction sa_;
};
i::Semaphore* sem_;
volatile int sem_value_;
};
THREADED_TEST(SemaphoreInterruption) {
ThreadInterruptTest().RunTest();
}
#endif // WIN32

14
deps/v8/test/cctest/test-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -642,8 +642,8 @@ TEST(8) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0); Assembler assm(Isolate::Current(), NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@ -753,8 +753,8 @@ TEST(9) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0); Assembler assm(Isolate::Current(), NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@ -868,8 +868,8 @@ TEST(10) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0); Assembler assm(Isolate::Current(), NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP2); CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());

41
deps/v8/test/mjsunit/regress/regress-2234.js

@ -0,0 +1,41 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
function test(i) {
// Overwrite random parts of the transcendental cache.
Math.sin(i / 1779 * Math.PI);
// Check whether the first cache line has been accidentally overwritten
// with incorrect key.
assertEquals(0, Math.sin(0));
}
for (i = 0; i < 10000; ++i) {
test(i);
if (i == 0) %OptimizeFunctionOnNextCall(test);
}

3
deps/v8/tools/gyp/v8.gyp

@ -721,9 +721,6 @@
'../../src/win32-math.h', '../../src/win32-math.h',
], ],
'msvs_disabled_warnings': [4351, 4355, 4800], 'msvs_disabled_warnings': [4351, 4355, 4800],
'direct_dependent_settings': {
'msvs_disabled_warnings': [4351, 4355, 4800],
},
'link_settings': { 'link_settings': {
'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ], 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
}, },

Loading…
Cancel
Save