Browse Source

Upgrade V8 to 3.1.2

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
a0702b54d1
  1. 1
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 12
      deps/v8/ChangeLog
  4. 10
      deps/v8/LICENSE.strongtalk
  5. 26
      deps/v8/LICENSE.v8
  6. 45
      deps/v8/LICENSE.valgrind
  7. 2
      deps/v8/SConstruct
  8. 16
      deps/v8/src/arm/assembler-arm.cc
  9. 18
      deps/v8/src/arm/assembler-arm.h
  10. 446
      deps/v8/src/arm/code-stubs-arm.cc
  11. 39
      deps/v8/src/arm/code-stubs-arm.h
  12. 12
      deps/v8/src/arm/codegen-arm.cc
  13. 1
      deps/v8/src/arm/codegen-arm.h
  14. 24
      deps/v8/src/arm/constants-arm.h
  15. 2
      deps/v8/src/arm/deoptimizer-arm.cc
  16. 10
      deps/v8/src/arm/full-codegen-arm.cc
  17. 25
      deps/v8/src/arm/ic-arm.cc
  18. 117
      deps/v8/src/arm/lithium-arm.cc
  19. 243
      deps/v8/src/arm/lithium-arm.h
  20. 196
      deps/v8/src/arm/lithium-codegen-arm.cc
  21. 5
      deps/v8/src/arm/lithium-codegen-arm.h
  22. 158
      deps/v8/src/arm/macro-assembler-arm.cc
  23. 36
      deps/v8/src/arm/macro-assembler-arm.h
  24. 213
      deps/v8/src/arm/simulator-arm.cc
  25. 8
      deps/v8/src/arm/simulator-arm.h
  26. 230
      deps/v8/src/arm/stub-cache-arm.cc
  27. 8
      deps/v8/src/array.js
  28. 12
      deps/v8/src/assembler.cc
  29. 35
      deps/v8/src/assembler.h
  30. 3
      deps/v8/src/code-stubs.h
  31. 4
      deps/v8/src/codegen-inl.h
  32. 21
      deps/v8/src/compilation-cache.cc
  33. 3
      deps/v8/src/compilation-cache.h
  34. 14
      deps/v8/src/compiler.cc
  35. 13
      deps/v8/src/compiler.h
  36. 78
      deps/v8/src/conversions.cc
  37. 4
      deps/v8/src/deoptimizer.cc
  38. 4
      deps/v8/src/disassembler.cc
  39. 6
      deps/v8/src/extensions/gc-extension.cc
  40. 2
      deps/v8/src/full-codegen.cc
  41. 3
      deps/v8/src/full-codegen.h
  42. 2
      deps/v8/src/handles.cc
  43. 3
      deps/v8/src/heap-profiler.cc
  44. 11
      deps/v8/src/heap.cc
  45. 11
      deps/v8/src/heap.h
  46. 10
      deps/v8/src/hydrogen-instructions.cc
  47. 114
      deps/v8/src/hydrogen-instructions.h
  48. 92
      deps/v8/src/hydrogen.cc
  49. 2
      deps/v8/src/hydrogen.h
  50. 48
      deps/v8/src/ia32/code-stubs-ia32.cc
  51. 19
      deps/v8/src/ia32/code-stubs-ia32.h
  52. 10
      deps/v8/src/ia32/codegen-ia32.cc
  53. 1
      deps/v8/src/ia32/codegen-ia32.h
  54. 115
      deps/v8/src/ia32/deoptimizer-ia32.cc
  55. 153
      deps/v8/src/ia32/full-codegen-ia32.cc
  56. 22
      deps/v8/src/ia32/ic-ia32.cc
  57. 45
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  58. 98
      deps/v8/src/ia32/lithium-ia32.cc
  59. 238
      deps/v8/src/ia32/lithium-ia32.h
  60. 20
      deps/v8/src/ia32/macro-assembler-ia32.cc
  61. 11
      deps/v8/src/ia32/macro-assembler-ia32.h
  62. 31
      deps/v8/src/ia32/stub-cache-ia32.cc
  63. 48
      deps/v8/src/ic.cc
  64. 140
      deps/v8/src/lithium-allocator-inl.h
  65. 171
      deps/v8/src/lithium-allocator.cc
  66. 118
      deps/v8/src/lithium-allocator.h
  67. 76
      deps/v8/src/lithium.h
  68. 9
      deps/v8/src/messages.js
  69. 28
      deps/v8/src/objects-inl.h
  70. 55
      deps/v8/src/objects.cc
  71. 23
      deps/v8/src/objects.h
  72. 126
      deps/v8/src/parser.cc
  73. 8
      deps/v8/src/parser.h
  74. 31
      deps/v8/src/preparser.cc
  75. 2
      deps/v8/src/preparser.h
  76. 25
      deps/v8/src/prettyprinter.cc
  77. 192
      deps/v8/src/runtime.cc
  78. 4
      deps/v8/src/runtime.h
  79. 22
      deps/v8/src/safepoint-table.cc
  80. 11
      deps/v8/src/safepoint-table.h
  81. 61
      deps/v8/src/scanner-base.cc
  82. 9
      deps/v8/src/scanner-base.h
  83. 19
      deps/v8/src/scanner.cc
  84. 9
      deps/v8/src/scanner.h
  85. 3
      deps/v8/src/scopes.cc
  86. 33
      deps/v8/src/stub-cache.cc
  87. 4
      deps/v8/src/stub-cache.h
  88. 18
      deps/v8/src/third_party/strongtalk/README.chromium
  89. 37
      deps/v8/src/token.h
  90. 2
      deps/v8/src/top.h
  91. 6
      deps/v8/src/type-info.h
  92. 3
      deps/v8/src/uri.js
  93. 6
      deps/v8/src/v8globals.h
  94. 146
      deps/v8/src/v8natives.js
  95. 2
      deps/v8/src/version.cc
  96. 27
      deps/v8/src/x64/assembler-x64.cc
  97. 32
      deps/v8/src/x64/assembler-x64.h
  98. 497
      deps/v8/src/x64/code-stubs-x64.cc
  99. 24
      deps/v8/src/x64/code-stubs-x64.h
  100. 10
      deps/v8/src/x64/codegen-x64.cc

1
deps/v8/.gitignore

@ -20,6 +20,7 @@ d8_g
shell shell
shell_g shell_g
/obj/ /obj/
/test/sputnik/sputniktests/
/tools/oom_dump/oom_dump /tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o /tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug /tools/visual_studio/Debug

3
deps/v8/AUTHORS

@ -26,6 +26,7 @@ Kun Zhang <zhangk@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com> Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com> Martyn Capewell <martyn.capewell@arm.com>
Michael Smith <mike@w3.org> Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com> Patrick Gansterer <paroga@paroga.com>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
@ -35,4 +36,4 @@ Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com> Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com> Vlad Burlik <vladbph@gmail.com>
Mike Gilbert <floppymaster@gmail.com> Zaheer Ahmad <zahmad@codeaurora.org>

12
deps/v8/ChangeLog

@ -1,3 +1,15 @@
2011-02-07: Version 3.1.2
Added better security checks when accessing properties via
Object.getOwnPropertyDescriptor.
Fixed bug in Object.defineProperty and related access bugs (issues
992, 1083 and 1092).
Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
copyright notice generation for embedders.
2011-02-02: Version 3.1.1 2011-02-02: Version 3.1.1
Perform security checks before fetching the value in Perform security checks before fetching the value in

10
deps/v8/src/third_party/strongtalk/LICENSE → deps/v8/LICENSE.strongtalk

@ -6,15 +6,15 @@ modification, are permitted provided that the following conditions are
met: met:
- Redistributions of source code must retain the above copyright notice, - Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. this list of conditions and the following disclaimer.
- Redistribution in binary form must reproduce the above copyright - Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. documentation and/or other materials provided with the distribution.
- Neither the name of Sun Microsystems or the names of contributors may - Neither the name of Sun Microsystems or the names of contributors may
be used to endorse or promote products derived from this software without be used to endorse or promote products derived from this software without
specific prior written permission. specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,

26
deps/v8/LICENSE.v8

@ -0,0 +1,26 @@
Copyright 2006-2011, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

45
deps/v8/LICENSE.valgrind

@ -0,0 +1,45 @@
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (valgrind.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2000-2007 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

2
deps/v8/SConstruct

@ -136,7 +136,7 @@ LIBRARY_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'], 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
}, },
'visibility:hidden': { 'visibility:hidden': {
# Use visibility=default to disable this. # Use visibility=default to disable this.

16
deps/v8/src/arm/assembler-arm.cc

@ -2124,7 +2124,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
const int dst_code, const int dst_code,
const VFPType src_type, const VFPType src_type,
const int src_code, const int src_code,
Assembler::ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(src_type != dst_type); ASSERT(src_type != dst_type);
int D, Vd, M, Vm; int D, Vd, M, Vm;
@ -2167,7 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
void Assembler::vcvt_f64_s32(const DwVfpRegister dst, void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
@ -2176,7 +2176,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
void Assembler::vcvt_f32_s32(const SwVfpRegister dst, void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
@ -2185,7 +2185,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
void Assembler::vcvt_f64_u32(const DwVfpRegister dst, void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
@ -2194,7 +2194,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
void Assembler::vcvt_s32_f64(const SwVfpRegister dst, void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
@ -2203,7 +2203,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
void Assembler::vcvt_u32_f64(const SwVfpRegister dst, void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
@ -2212,7 +2212,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
void Assembler::vcvt_f64_f32(const DwVfpRegister dst, void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
@ -2221,7 +2221,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
void Assembler::vcvt_f32_f64(const SwVfpRegister dst, void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));

18
deps/v8/src/arm/assembler-arm.h

@ -942,37 +942,33 @@ class Assembler : public Malloced {
void vmov(const Register dst, void vmov(const Register dst,
const SwVfpRegister src, const SwVfpRegister src,
const Condition cond = al); const Condition cond = al);
enum ConversionMode {
FPSCRRounding = 0,
RoundToZero = 1
};
void vcvt_f64_s32(const DwVfpRegister dst, void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst, void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst, void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst, void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst, void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst, void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst, void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
ConversionMode mode = RoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vabs(const DwVfpRegister dst, void vabs(const DwVfpRegister dst,

446
deps/v8/src/arm/code-stubs-arm.cc

@ -396,6 +396,19 @@ class FloatingPointHelper : public AllStatic {
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* not_number); Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If
// the object is not a 32-bit integer control continues at the label
// not_int32. If VFP is supported double_scratch is used but not scratch2.
static void LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
DwVfpRegister double_scratch,
Label* not_int32);
private: private:
static void LoadNumber(MacroAssembler* masm, static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination, FloatingPointHelper::Destination destination,
@ -461,15 +474,21 @@ void FloatingPointHelper::LoadOperands(
void FloatingPointHelper::LoadNumber(MacroAssembler* masm, void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination, Destination destination,
Register object, Register object,
DwVfpRegister dst, DwVfpRegister dst,
Register dst1, Register dst1,
Register dst2, Register dst2,
Register heap_number_map, Register heap_number_map,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* not_number) { Label* not_number) {
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
Label is_smi, done; Label is_smi, done;
__ JumpIfSmi(object, &is_smi); __ JumpIfSmi(object, &is_smi);
@ -514,6 +533,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
} }
void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
DwVfpRegister double_scratch,
Label* not_int32) {
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_int32);
__ ConvertToInt32(
object, dst, scratch1, scratch2, double_scratch, not_int32);
__ jmp(&done);
__ bind(&is_smi);
__ SmiUntag(dst, object);
__ bind(&done);
}
// See comment for class. // See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int; Label max_negative_int;
@ -1676,7 +1723,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map); __ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
__ ConvertToInt32(lhs, r3, r5, r4, &slow); __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
__ jmp(&done_checking_lhs); __ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi); __ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1)); __ mov(r3, Operand(lhs, ASR, 1));
@ -1687,7 +1734,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map); __ cmp(r4, heap_number_map);
__ b(ne, &slow); __ b(ne, &slow);
__ ConvertToInt32(rhs, r2, r5, r4, &slow); __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
__ jmp(&done_checking_rhs); __ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi); __ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1)); __ mov(r2, Operand(rhs, ASR, 1));
@ -2529,6 +2576,18 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ and_(right, left, Operand(scratch1)); __ and_(right, left, Operand(scratch1));
__ Ret(); __ Ret();
break; break;
case Token::BIT_OR:
__ orr(right, left, Operand(right));
__ Ret();
break;
case Token::BIT_AND:
__ and_(right, left, Operand(right));
__ Ret();
break;
case Token::BIT_XOR:
__ eor(right, left, Operand(right));
__ Ret();
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -2545,90 +2604,179 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch1 = r7; Register scratch1 = r7;
Register scratch2 = r9; Register scratch2 = r9;
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending ASSERT(smi_operands || (not_numbers != NULL));
// on whether VFP3 is available. if (smi_operands && FLAG_debug_code) {
FloatingPointHelper::Destination destination = __ AbortIfNotSmi(left);
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? __ AbortIfNotSmi(right);
FloatingPointHelper::kVFPRegisters : }
FloatingPointHelper::kCoreRegisters;
Register heap_number_map = r6; Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Allocate new heap number for result. switch (op_) {
Register result = r5; case Token::ADD:
__ AllocateHeapNumber( case Token::SUB:
result, scratch1, scratch2, heap_number_map, gc_required); case Token::MUL:
case Token::DIV:
case Token::MOD: {
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
FloatingPointHelper::LoadOperands(masm,
destination,
heap_number_map,
scratch1,
scratch2,
not_numbers);
}
// Load the operands. // Calculate the result.
if (smi_operands) { if (destination == FloatingPointHelper::kVFPRegisters) {
if (FLAG_debug_code) { // Using VFP registers:
__ AbortIfNotSmi(left); // d6: Left value
__ AbortIfNotSmi(right); // d7: Right value
} CpuFeatures::Scope scope(VFP3);
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); switch (op_) {
} else { case Token::ADD:
FloatingPointHelper::LoadOperands(masm, __ vadd(d5, d6, d7);
destination, break;
heap_number_map, case Token::SUB:
scratch1, __ vsub(d5, d6, d7);
scratch2, break;
not_numbers); case Token::MUL:
} __ vmul(d5, d6, d7);
break;
case Token::DIV:
__ vdiv(d5, d6, d7);
break;
default:
UNREACHABLE();
}
// Calculate the result. __ sub(r0, result, Operand(kHeapObjectTag));
if (destination == FloatingPointHelper::kVFPRegisters) { __ vstr(d5, r0, HeapNumber::kValueOffset);
// Using VFP registers: __ add(r0, r0, Operand(kHeapObjectTag));
// d6: Left value __ Ret();
// d7: Right value } else {
CpuFeatures::Scope scope(VFP3); // Using core registers:
switch (op_) { // r0: Left value (least significant part of mantissa).
case Token::ADD: // r1: Left value (sign, exponent, top of mantissa).
__ vadd(d5, d6, d7); // r2: Right value (least significant part of mantissa).
break; // r3: Right value (sign, exponent, top of mantissa).
case Token::SUB:
__ vsub(d5, d6, d7);
break;
case Token::MUL:
__ vmul(d5, d6, d7);
break;
case Token::DIV:
__ vdiv(d5, d6, d7);
break;
default:
UNREACHABLE();
}
__ sub(r0, result, Operand(kHeapObjectTag)); // Push the current return address before the C call. Return will be
__ vstr(d5, r0, HeapNumber::kValueOffset); // through pop(pc) below.
__ add(r0, r0, Operand(kHeapObjectTag)); __ push(lr);
__ Ret(); __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
} else { // Call C routine that may not cause GC or other trouble. r5 is callee
// Using core registers: // save.
// r0: Left value (least significant part of mantissa). __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// r1: Left value (sign, exponent, top of mantissa). // Store answer in the overwritable heap number.
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
__ push(lr); // For later.
__ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI) #if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as // Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we // register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from r5. // need to substract the tag from r5.
__ sub(scratch1, result, Operand(kHeapObjectTag)); __ sub(scratch1, result, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
#else #else
// Double returned in registers 0 and 1. // Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
#endif #endif
__ mov(r0, Operand(result)); // Plase result in r0 and return to the pushed return address.
// And we are done. __ mov(r0, Operand(result));
__ pop(pc); __ pop(pc);
}
break;
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
if (smi_operands) {
__ SmiUntag(r3, left);
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
FloatingPointHelper::LoadNumberAsInteger(masm,
left,
r3,
heap_number_map,
scratch1,
scratch2,
d0,
not_numbers);
FloatingPointHelper::LoadNumberAsInteger(masm,
right,
r2,
heap_number_map,
scratch1,
scratch2,
d0,
not_numbers);
}
switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
case Token::BIT_XOR:
__ eor(r2, r3, Operand(r2));
break;
case Token::BIT_AND:
__ and_(r2, r3, Operand(r2));
break;
default:
UNREACHABLE();
}
Label result_not_a_smi;
// Check that the *signed* result fits in a smi.
__ add(r3, r2, Operand(0x40000000), SetCC);
__ b(mi, &result_not_a_smi);
__ SmiTag(r0, r2);
__ Ret();
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
__ AllocateHeapNumber(
r5, scratch1, scratch2, heap_number_map, gc_required);
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
// Nothing can go wrong now, so move the heap number to r0, which is the
// result.
__ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
break;
}
default:
UNREACHABLE();
} }
} }
@ -2646,7 +2794,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
op_ == Token::SUB || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::MUL ||
op_ == Token::DIV || op_ == Token::DIV ||
op_ == Token::MOD); op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Register left = r1; Register left = r1;
Register right = r0; Register right = r0;
@ -2678,7 +2829,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
op_ == Token::SUB || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::MUL ||
op_ == Token::DIV || op_ == Token::DIV ||
op_ == Token::MOD); op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED || if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) { result_type_ == TRBinaryOpIC::SMI) {
@ -2714,7 +2868,10 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
op_ == Token::SUB || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::MUL ||
op_ == Token::DIV || op_ == Token::DIV ||
op_ == Token::MOD); op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32); ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@ -2727,7 +2884,10 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
op_ == Token::SUB || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::MUL ||
op_ == Token::DIV || op_ == Token::DIV ||
op_ == Token::MOD); op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label not_numbers, call_runtime; Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@ -2747,7 +2907,10 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
op_ == Token::SUB || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::MUL ||
op_ == Token::DIV || op_ == Token::DIV ||
op_ == Token::MOD); op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label call_runtime; Label call_runtime;
@ -2812,6 +2975,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::MOD: case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_JS); __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
break; break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -3037,7 +3209,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow); __ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1. // Convert the heap number is r0 to an untagged integer in r1.
__ ConvertToInt32(r0, r1, r2, r3, &slow); __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result // Do the bitwise operation (move negated) and check if the result
// fits in a smi. // fits in a smi.
@ -3329,9 +3501,17 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the // this by performing a garbage collection and retrying the
// builtin once. // builtin once.
// Compute the argv pointer in a callee-saved register.
__ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++. // Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(save_doubles_); __ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
__ mov(r4, Operand(r0));
__ mov(r5, Operand(r1));
// r4: number of arguments (C callee-saved) // r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved) // r5: pointer to builtin function (C callee-saved)
// r6: pointer to first argument (C callee-saved) // r6: pointer to first argument (C callee-saved)
@ -5734,6 +5914,90 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
} }
void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ ldr(pc, MemOperand(sp, 0));
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
ApiFunction *function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Push return address (accessible to GC through exit frame pc).
__ mov(r2,
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
__ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function.
}
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements_map,
Register elements,
Register scratch1,
Register scratch2,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// elements - set to be the receiver's elements on exit.
//
// elements_map - set to be the map of the receiver's elements
// on exit.
//
// result - holds the result of the pixel array load on exit,
// tagged as a smi if successful.
//
// Scratch registers:
//
// scratch1 - used a scratch register in map check, if map
// check is successful, contains the length of the
// pixel array, the pointer to external elements and
// the untagged result.
//
// scratch2 - holds the untaged key.
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
__ SmiUntag(scratch2, key);
// Verify that the receiver has pixel array elements.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
not_pixel_array, true);
// Key must be in range of the pixel array.
__ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(scratch2, scratch1);
__ b(hs, out_of_range); // unsigned check handles negative keys.
// Perform the indexed load and tag the result as a smi.
__ ldr(scratch1,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ ldrb(scratch1, MemOperand(scratch1, scratch2));
__ SmiTag(r0, scratch1);
__ Ret();
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

39
deps/v8/src/arm/code-stubs-arm.h

@ -571,6 +571,45 @@ class RegExpCEntryStub: public CodeStub {
}; };
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub: public CodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, ApiFunction *function);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "DirectCEntryStub"; }
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements_map,
Register elements,
Register scratch1,
Register scratch2,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_ #endif // V8_ARM_CODE_STUBS_ARM_H_

12
deps/v8/src/arm/codegen-arm.cc

@ -1110,7 +1110,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() {
Register int32 = r2; Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub. // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
__ ConvertToInt32(tos_register_, int32, r4, r5, entry_label()); __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
// tos_register_ (r0 or r1): Original heap number. // tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int. // int32: signed 32bits int.
@ -4177,7 +4177,10 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver()); __ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1); frame_->EmitPush(r1);
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3); // Push the strict mode flag.
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(); done.Jump();
slow.Bind(); slow.Bind();
@ -4197,8 +4200,11 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver()); __ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1); frame_->EmitPush(r1);
// Push the strict mode flag.
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
// Resolve the call. // Resolve the call.
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast // If we generated fast-case code bind the jump-target where fast
// and slow case merge. // and slow case merge.

1
deps/v8/src/arm/codegen-arm.h

@ -287,6 +287,7 @@ class CodeGenerator: public AstVisitor {
// Accessors // Accessors
inline bool is_eval(); inline bool is_eval();
inline Scope* scope(); inline Scope* scope();
inline StrictModeFlag strict_mode_flag();
// Generating deferred code. // Generating deferred code.
void ProcessDeferred(); void ProcessDeferred();

24
deps/v8/src/arm/constants-arm.h

@ -380,10 +380,13 @@ enum VFPRegPrecision {
// VFP FPSCR constants. // VFP FPSCR constants.
enum VFPConversionMode {
kFPSCRRounding = 0,
kDefaultRoundToZero = 1
};
static const uint32_t kVFPExceptionMask = 0xf; static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24; static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1; static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31; static const uint32_t kVFPNConditionFlagBit = 1 << 31;
@ -393,13 +396,20 @@ static const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29. // VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum FPSCRRoundingModes { enum VFPRoundingMode {
RN, // Round to Nearest. RN = 0 << 22, // Round to Nearest.
RP, // Round towards Plus Infinity. RP = 1 << 22, // Round towards Plus Infinity.
RM, // Round towards Minus Infinity. RM = 2 << 22, // Round towards Minus Infinity.
RZ // Round towards zero. RZ = 3 << 22, // Round towards zero.
// Aliases.
kRoundToNearest = RN,
kRoundToPlusInf = RP,
kRoundToMinusInf = RM,
kRoundToZero = RZ
}; };
static const uint32_t kVFPRoundingModeMask = 3 << 22;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Hints. // Hints.

2
deps/v8/src/arm/deoptimizer-arm.cc

@ -97,7 +97,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#ifdef DEBUG #ifdef DEBUG
// Destroy the code which is not supposed to be run again. // Destroy the code which is not supposed to be run again.
int instructions = int instructions =
(code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize; (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset, CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions); instructions);
for (int x = 0; x < instructions; x++) { for (int x = 0; x < instructions; x++) {

10
deps/v8/src/arm/full-codegen-arm.cc

@ -1554,7 +1554,10 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
op == Token::SUB || op == Token::SUB ||
op == Token::MUL || op == Token::MUL ||
op == Token::DIV || op == Token::DIV ||
op == Token::MOD) { op == Token::MOD ||
op == Token::BIT_OR ||
op == Token::BIT_AND ||
op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode); TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
@ -1923,7 +1926,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ ldr(r1, __ ldr(r1,
MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(r1); __ push(r1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); // Push the strict mode flag.
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
__ push(r1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in r0 (function) and // The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values. // r1 (receiver). Touch up the stack with the right values.

25
deps/v8/src/arm/ic-arm.cc

@ -1189,19 +1189,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// r0: key // r0: key
// r1: receiver // r1: receiver
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
__ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); GenerateFastPixelArrayLoad(masm,
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); r1,
__ cmp(r3, ip); r0,
__ b(ne, &check_number_dictionary); r3,
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset)); r4,
__ mov(r2, Operand(key, ASR, kSmiTagSize)); r2,
__ cmp(r2, ip); r5,
__ b(hs, &slow); r0,
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset)); &check_number_dictionary,
__ ldrb(r2, MemOperand(ip, r2)); NULL,
__ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi. &slow);
__ Ret();
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary. // Check whether the elements is a number dictionary.

117
deps/v8/src/arm/lithium-arm.cc

@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h" #include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h" #include "arm/lithium-codegen-arm.h"
@ -56,6 +57,31 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index,
} }
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
LUnallocated::cast(operand)->IsUsedAtStart() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
}
#endif
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index, void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) { LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot()); ASSERT(spill_operand->IsDoubleStackSlot());
@ -66,9 +92,8 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
void LInstruction::PrintTo(StringStream* stream) { void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic()); stream->Add("%s ", this->Mnemonic());
if (HasResult()) {
PrintOutputOperandTo(stream); PrintOutputOperandTo(stream);
}
PrintDataTo(stream); PrintDataTo(stream);
@ -158,6 +183,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t"; case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t"; case Token::MOD: return "mod-t";
case Token::DIV: return "div-t"; case Token::DIV: return "div-t";
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
default: default:
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;
@ -258,7 +286,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) { void LLoadContextSlot::PrintDataTo(StringStream* stream) {
stream->Add("(%d, %d)", context_chain_length(), slot_index()); InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
InputAt(1)->PrintTo(stream);
} }
@ -390,7 +426,7 @@ void LChunk::MarkEmptyBlocks() {
} }
int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block); LGap* gap = new LGap(block);
int index = -1; int index = -1;
if (instr->IsControl()) { if (instr->IsControl()) {
@ -406,7 +442,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map()); pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index); instr->pointer_map()->set_lithium_position(index);
} }
return index;
} }
@ -672,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr, HInstruction* hinstr,
CanDeoptimize can_deoptimize) { CanDeoptimize can_deoptimize) {
allocator_->MarkAsCall(); #ifdef DEBUG
instr->VerifyCall();
#endif
instr->MarkAsCall();
instr = AssignPointerMap(instr); instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) { if (hinstr->HasSideEffects()) {
@ -697,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
allocator_->MarkAsSaveDoubles(); instr->MarkAsSaveDoubles();
return instr; return instr;
} }
@ -742,13 +780,23 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op, LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) { HBitwiseBinaryOperation* instr) {
ASSERT(instr->representation().IsInteger32()); if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineSameAsFirst(new LBitI(op, left, right)); return DefineSameAsFirst(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
} }
@ -887,7 +935,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) { void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_; HInstruction* old_current = current_instruction_;
current_instruction_ = current; current_instruction_ = current;
allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position(); if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this); LInstruction* instr = current->CompileToLithium(this);
@ -910,11 +957,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current); instr->set_hydrogen_value(current);
} }
int index = chunk_->AddInstruction(instr, current_block_); chunk_->AddInstruction(instr, current_block_);
allocator_->SummarizeInstruction(index);
} else {
// This instruction should be omitted.
allocator_->OmitInstruction();
} }
current_instruction_ = old_current; current_instruction_ = old_current;
} }
@ -1105,13 +1148,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
} }
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return DefineAsRegister(new LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LOuterContext(context));
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
return DefineAsRegister(new LGlobalObject); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context));
} }
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
return DefineAsRegister(new LGlobalReceiver); LOperand* global_object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalReceiver(global_object));
} }
@ -1514,7 +1570,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
LDoubleToI* res = new LDoubleToI(value); LDoubleToI* res = new LDoubleToI(value, TempRegister());
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} }
} else if (from.IsInteger32()) { } else if (from.IsInteger32()) {
@ -1621,7 +1677,20 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
return DefineAsRegister(new LLoadContextSlot); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context = UseTempRegister(instr->context());
LOperand* value;
if (instr->NeedsWriteBarrier()) {
value = UseTempRegister(instr->value());
} else {
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
} }

243
deps/v8/src/arm/lithium-arm.h

@ -39,118 +39,6 @@ namespace internal {
// Forward declarations. // Forward declarations.
class LCodeGen; class LCodeGen;
// Type hierarchy:
//
// LInstruction
// LTemplateInstruction
// LControlInstruction
// LBranch
// LClassOfTestAndBranch
// LCmpJSObjectEqAndBranch
// LCmpIDAndBranch
// LHasCachedArrayIndexAndBranch
// LHasInstanceTypeAndBranch
// LInstanceOfAndBranch
// LIsNullAndBranch
// LIsObjectAndBranch
// LIsSmiAndBranch
// LTypeofIsAndBranch
// LAccessArgumentsAt
// LArgumentsElements
// LArgumentsLength
// LAddI
// LApplyArguments
// LArithmeticD
// LArithmeticT
// LBitI
// LBoundsCheck
// LCmpID
// LCmpJSObjectEq
// LCmpT
// LDivI
// LInstanceOf
// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
// LModI
// LMulI
// LPower
// LShiftI
// LSubI
// LCallConstantFunction
// LCallFunction
// LCallGlobal
// LCallKeyed
// LCallKnownGlobal
// LCallNamed
// LCallRuntime
// LCallStub
// LConstant
// LConstantD
// LConstantI
// LConstantT
// LDeoptimize
// LFunctionLiteral
// LGap
// LLabel
// LGlobalObject
// LGlobalReceiver
// LGoto
// LLazyBailout
// LLoadGlobal
// LCheckPrototypeMaps
// LLoadContextSlot
// LArrayLiteral
// LObjectLiteral
// LRegExpLiteral
// LOsrEntry
// LParameter
// LRegExpConstructResult
// LStackCheck
// LStoreKeyed
// LStoreKeyedFastElement
// LStoreKeyedGeneric
// LStoreNamed
// LStoreNamedField
// LStoreNamedGeneric
// LStringCharCodeAt
// LBitNotI
// LCallNew
// LCheckFunction
// LCheckPrototypeMaps
// LCheckInstanceType
// LCheckMap
// LCheckSmi
// LClassOfTest
// LDeleteProperty
// LDoubleToI
// LFixedArrayLength
// LHasCachedArrayIndex
// LHasInstanceType
// LInteger32ToDouble
// LIsNull
// LIsObject
// LIsSmi
// LJSArrayLength
// LLoadNamedField
// LLoadNamedGeneric
// LLoadFunctionPrototype
// LNumberTagD
// LNumberTagI
// LPushArgument
// LReturn
// LSmiTag
// LStoreGlobal
// LStringLength
// LTaggedToI
// LThrow
// LTypeof
// LTypeofIs
// LUnaryMathOperation
// LValueOf
// LUnknownOSRValue
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \ #define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \ V(ControlInstruction) \
V(Constant) \ V(Constant) \
@ -187,6 +75,8 @@ class LCodeGen;
V(CheckMap) \ V(CheckMap) \
V(CheckPrototypeMaps) \ V(CheckPrototypeMaps) \
V(CheckSmi) \ V(CheckSmi) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpID) \ V(CmpID) \
V(CmpIDAndBranch) \ V(CmpIDAndBranch) \
V(CmpJSObjectEq) \ V(CmpJSObjectEq) \
@ -197,6 +87,7 @@ class LCodeGen;
V(ConstantD) \ V(ConstantD) \
V(ConstantI) \ V(ConstantI) \
V(ConstantT) \ V(ConstantT) \
V(Context) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(DivI) \ V(DivI) \
@ -207,6 +98,10 @@ class LCodeGen;
V(GlobalObject) \ V(GlobalObject) \
V(GlobalReceiver) \ V(GlobalReceiver) \
V(Goto) \ V(Goto) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(InstanceOf) \ V(InstanceOf) \
V(InstanceOfAndBranch) \ V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \ V(InstanceOfKnownGlobal) \
@ -218,22 +113,16 @@ class LCodeGen;
V(IsSmi) \ V(IsSmi) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(JSArrayLength) \ V(JSArrayLength) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(Label) \ V(Label) \
V(LazyBailout) \ V(LazyBailout) \
V(LoadContextSlot) \ V(LoadContextSlot) \
V(LoadElements) \ V(LoadElements) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \ V(LoadGlobal) \
V(LoadKeyedFastElement) \ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \ V(LoadKeyedGeneric) \
V(LoadNamedField) \ V(LoadNamedField) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(ModI) \ V(ModI) \
V(MulI) \ V(MulI) \
V(NumberTagD) \ V(NumberTagD) \
@ -241,6 +130,7 @@ class LCodeGen;
V(NumberUntagD) \ V(NumberUntagD) \
V(ObjectLiteral) \ V(ObjectLiteral) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \
V(Parameter) \ V(Parameter) \
V(PushArgument) \ V(PushArgument) \
V(RegExpLiteral) \ V(RegExpLiteral) \
@ -249,14 +139,15 @@ class LCodeGen;
V(SmiTag) \ V(SmiTag) \
V(SmiUntag) \ V(SmiUntag) \
V(StackCheck) \ V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobal) \ V(StoreGlobal) \
V(StoreKeyedFastElement) \ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \ V(StoreKeyedGeneric) \
V(StoreNamedField) \ V(StoreNamedField) \
V(StoreNamedGeneric) \ V(StoreNamedGeneric) \
V(SubI) \
V(StringCharCodeAt) \ V(StringCharCodeAt) \
V(StringLength) \ V(StringLength) \
V(SubI) \
V(TaggedToI) \ V(TaggedToI) \
V(Throw) \ V(Throw) \
V(Typeof) \ V(Typeof) \
@ -290,7 +181,10 @@ class LCodeGen;
class LInstruction: public ZoneObject { class LInstruction: public ZoneObject {
public: public:
LInstruction() LInstruction()
: hydrogen_value_(NULL) { } : environment_(NULL),
hydrogen_value_(NULL),
is_call_(false),
is_save_doubles_(false) { }
virtual ~LInstruction() { } virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0; virtual void CompileToNative(LCodeGen* generator) = 0;
@ -307,16 +201,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; } virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { } virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_.set(env); } void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_.get(); } LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_.is_set(); } bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); } LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); } bool HasPointerMap() const { return pointer_map_.is_set(); }
virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; } HValue* hydrogen_value() const { return hydrogen_value_; }
@ -330,11 +222,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set(); return deoptimization_environment_.is_set();
} }
void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
#ifdef DEBUG
void VerifyCall();
#endif
private: private:
SetOncePointer<LEnvironment> environment_; LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_; SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_; HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_; SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
bool is_save_doubles_;
}; };
@ -361,6 +277,11 @@ class OperandContainer<ElementType, 0> {
public: public:
int length() { return 0; } int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { } void PrintOperandsTo(StringStream* stream) { }
ElementType& operator[](int i) {
UNREACHABLE();
static ElementType t = 0;
return t;
}
}; };
@ -1266,18 +1187,41 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
}; };
class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
int context_chain_length() { return hydrogen()->context_chain_length(); } LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); } int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
}; };
class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LTemplateInstruction<0, 1, 0> { class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LPushArgument(LOperand* value) { explicit LPushArgument(LOperand* value) {
@ -1288,15 +1232,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
}; };
class LGlobalObject: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
};
class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
LOperand* context() { return InputAt(0); }
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
}; };
class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> { class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
LOperand* global() { return InputAt(0); }
}; };
@ -1431,10 +1405,11 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32. // Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 0> { class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public: public:
explicit LDoubleToI(LOperand* value) { explicit LDoubleToI(LOperand* value, LOperand* temp1) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp1;
} }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@ -1789,7 +1764,7 @@ class LChunk: public ZoneObject {
public: public:
explicit LChunk(HGraph* graph); explicit LChunk(HGraph* graph);
int AddInstruction(LInstruction* instruction, HBasicBlock* block); void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant); LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const; Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const; Representation LookupLiteralRepresentation(LConstantOperand* operand) const;

196
deps/v8/src/arm/lithium-codegen-arm.cc

@ -223,7 +223,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done()); ASSERT(is_done());
code->set_stack_slots(StackSlotCount()); code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_start(safepoints_.GetCodeOffset()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code); PopulateDeoptimizationData(code);
} }
@ -1174,7 +1174,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// scratch:left = left * right. // scratch:left = left * right.
__ smull(scratch, left, left, right); __ smull(left, scratch, left, right);
__ mov(ip, Operand(left, ASR, 31)); __ mov(ip, Operand(left, ASR, 31));
__ cmp(ip, Operand(scratch)); __ cmp(ip, Operand(scratch));
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
@ -1398,7 +1398,18 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vdiv(left, left, right); __ vdiv(left, left, right);
break; break;
case Token::MOD: { case Token::MOD: {
Abort("DoArithmeticD unimplemented for MOD."); // Save r0-r3 on the stack.
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
__ PrepareCallCFunction(4, scratch0());
__ vmov(r0, r1, left);
__ vmov(r2, r3, right);
__ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
// Move the result in the double result register.
__ vmov(ToDoubleRegister(instr->result()), r0, r1);
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
break; break;
} }
default: default:
@ -1595,17 +1606,58 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right)); __ cmp(ToRegister(left), ToOperand(right));
Abort("EmitCmpI untested.");
} }
void LCodeGen::DoCmpID(LCmpID* instr) { void LCodeGen::DoCmpID(LCmpID* instr) {
Abort("DoCmpID unimplemented."); LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
Register scratch = scratch0();
Label unordered, done;
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
} else {
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
__ b(cc, &done);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
__ bind(&done);
} }
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
Abort("DoCmpIDAndBranch unimplemented."); LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
} else {
EmitCmpI(left, right);
}
Condition cc = TokenToCondition(instr->op(), instr->is_double());
EmitBranch(true_block, false_block, cc);
} }
@ -2201,13 +2253,27 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
// TODO(antonm): load a context with a separate instruction. Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ LoadContext(result, instr->context_chain_length()); __ ldr(result,
MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ ldr(result, ContextOperand(result, instr->slot_index())); __ ldr(result, ContextOperand(result, instr->slot_index()));
} }
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ ldr(context,
MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWrite(context, Operand(offset), value, scratch0());
}
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->InputAt(0)); Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
@ -2458,16 +2524,32 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
} }
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, cp);
}
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result,
MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
} }
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
__ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
} }
@ -2625,34 +2707,53 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} }
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { // Truncates a double using a specific rounding mode.
DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); // Clears the z flag (ne condition) if an overflow occurs.
Register result = ToRegister(instr->result()); void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register prev_fpscr = ToRegister(instr->TempAt(0)); SwVfpRegister result,
SwVfpRegister single_scratch = double_scratch0().low(); DwVfpRegister double_input,
Register scratch = scratch0(); Register scratch1,
Register scratch2) {
Register prev_fpscr = scratch1;
Register scratch = scratch2;
// Set custom FPCSR: // Set custom FPCSR:
// - Set rounding mode to "Round towards Minus Infinity". // - Set rounding mode.
// - Clear vfp cumulative exception flags. // - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset. // - Make sure Flush-to-zero mode control bit is unset.
__ vmrs(prev_fpscr); __ vmrs(prev_fpscr);
__ bic(scratch, prev_fpscr, __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); kVFPRoundingModeMask |
__ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits)); kVFPFlushToZeroMask));
__ orr(scratch, scratch, Operand(rounding_mode));
__ vmsr(scratch); __ vmsr(scratch);
// Convert the argument to an integer. // Convert the argument to an integer.
__ vcvt_s32_f64(single_scratch, __ vcvt_s32_f64(result,
input, double_input,
Assembler::FPSCRRounding, kFPSCRRounding);
al);
// Retrieve FPSCR and check for vfp exceptions. // Retrieve FPSCR.
__ vmrs(scratch); __ vmrs(scratch);
// Restore FPSCR // Restore FPSCR.
__ vmsr(prev_fpscr); __ vmsr(prev_fpscr);
// Check for vfp exceptions.
__ tst(scratch, Operand(kVFPExceptionMask)); __ tst(scratch, Operand(kVFPExceptionMask));
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
SwVfpRegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
EmitVFPTruncate(kRoundToMinusInf,
single_scratch,
input,
scratch1,
scratch2);
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0. // Move the result back to general purpose register r0.
@ -2662,8 +2763,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Label done; Label done;
__ cmp(result, Operand(0)); __ cmp(result, Operand(0));
__ b(ne, &done); __ b(ne, &done);
__ vmov(scratch, input.high()); __ vmov(scratch1, input.high());
__ tst(scratch, Operand(HeapNumber::kSignMask)); __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
__ bind(&done); __ bind(&done);
} }
@ -3297,7 +3398,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) { void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Abort("DoDoubleToI unimplemented."); LOperand* input = instr->InputAt(0);
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
DoubleRegister double_input = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
SwVfpRegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
: kRoundToNearest;
EmitVFPTruncate(rounding_mode,
single_scratch,
double_input,
scratch1,
scratch2);
// Deoptimize if we had a vfp invalid exception.
DeoptimizeIf(ne, instr->environment());
// Retrieve the result.
__ vmov(result_reg, single_scratch);
if (instr->truncating() &&
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand(0));
__ b(ne, &done);
// Check for -0.
__ vmov(scratch1, double_input.high());
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
} }
@ -3497,7 +3633,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info(); Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = !instr->hydrogen()->pretenure(); bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) { if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub; FastNewClosureStub stub;
__ mov(r1, Operand(shared_info)); __ mov(r1, Operand(shared_info));

5
deps/v8/src/arm/lithium-codegen-arm.h

@ -219,6 +219,11 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr); void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
Register scratch1,
Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr);

158
deps/v8/src/arm/macro-assembler-arm.cc

@ -632,11 +632,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
} }
void MacroAssembler::EnterExitFrame(bool save_doubles) { void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Compute the argv pointer in a callee-saved register.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
sub(r6, r6, Operand(kPointerSize));
// Setup the frame structure on the stack. // Setup the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@ -658,10 +654,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
mov(ip, Operand(ExternalReference(Top::k_context_address))); mov(ip, Operand(ExternalReference(Top::k_context_address)));
str(cp, MemOperand(ip)); str(cp, MemOperand(ip));
// Setup argc and the builtin function in callee-saved registers.
mov(r4, Operand(r0));
mov(r5, Operand(r1));
// Optionally save all double registers. // Optionally save all double registers.
if (save_doubles) { if (save_doubles) {
sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize)); sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
@ -675,10 +667,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// since the sp slot and code slot were pushed after the fp. // since the sp slot and code slot were pushed after the fp.
} }
// Reserve place for the return address and align the frame preparing for // Reserve place for the return address and stack space and align the frame
// calling the runtime function. // preparing for calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand(kPointerSize)); sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) { if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment)); ASSERT(IsPowerOf2(frame_alignment));
and_(sp, sp, Operand(-frame_alignment)); and_(sp, sp, Operand(-frame_alignment));
@ -1475,14 +1467,112 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
return result;
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ApiFunction* function, int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
ExternalReference::handle_scope_limit_address(),
next_address);
const int kLevelOffset = AddressOffset(
ExternalReference::handle_scope_level_address(),
next_address);
// Allocate HandleScope in callee-save registers.
mov(r7, Operand(next_address));
ldr(r4, MemOperand(r7, kNextOffset));
ldr(r5, MemOperand(r7, kLimitOffset));
ldr(r6, MemOperand(r7, kLevelOffset));
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
stub.GenerateCall(this, function);
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
// If result is non-zero, dereference to get the result value
// otherwise set it to undefined.
cmp(r0, Operand(0));
LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
ldr(r0, MemOperand(r0), ne);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
if (FLAG_debug_code) {
ldr(r1, MemOperand(r7, kLevelOffset));
cmp(r1, r6);
Check(eq, "Unexpected level after return from api call");
}
sub(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
ldr(ip, MemOperand(r7, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
// Check if the function scheduled an exception.
bind(&leave_exit_frame);
LoadRoot(r4, Heap::kTheHoleValueRootIndex);
mov(ip, Operand(ExternalReference::scheduled_exception_address()));
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
// LeaveExitFrame expects unwind space to be in r4.
mov(r4, Operand(stack_space));
LeaveExitFrame(false);
bind(&promote_scheduled_exception);
MaybeObject* result = TryTailCallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
if (result->IsFailure()) {
return result;
}
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(0, r5);
CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
mov(r0, r4);
jmp(&leave_exit_frame);
return result;
} }
@ -1577,13 +1667,14 @@ void MacroAssembler::ConvertToInt32(Register source,
Register dest, Register dest,
Register scratch, Register scratch,
Register scratch2, Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) { Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag)); sub(scratch, source, Operand(kHeapObjectTag));
vldr(d0, scratch, HeapNumber::kValueOffset); vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(s0, d0); vcvt_s32_f64(double_scratch.low(), double_scratch);
vmov(dest, s0); vmov(dest, double_scratch.low());
// Signed vcvt instruction will saturate to the minimum (0x80000000) or // Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of // maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the // range. When substracting one, the minimum signed integer becomes the
@ -1739,6 +1830,17 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
} }
MaybeObject* MacroAssembler::TryTailCallExternalReference(
const ExternalReference& ext, int num_arguments, int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
return TryJumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments, int num_arguments,
int result_size) { int result_size) {
@ -1757,6 +1859,18 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
} }
MaybeObject* MacroAssembler::TryJumpToExternalReference(
const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
CEntryStub stub(1);
return TryTailCallStub(&stub);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags, InvokeJSFlags flags,
PostCallGenerator* post_call_generator) { PostCallGenerator* post_call_generator) {
@ -1999,6 +2113,16 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
} }
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
ASSERT(!src.is(ip));
LoadRoot(ip, root_value_index);
cmp(src, ip);
Assert(eq, message);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object, void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map, Register heap_number_map,
Register scratch, Register scratch,

36
deps/v8/src/arm/macro-assembler-arm.h

@ -287,10 +287,8 @@ class MacroAssembler: public Assembler {
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame. // Enter exit frame.
// Expects the number of arguments in register r0 and // stack_space - extra stack space, used for alignment before call to C.
// the builtin function to call in register r1. Exits with argc in void EnterExitFrame(bool save_doubles, int stack_space = 0);
// r4, argv in r6, and and the builtin function to call in r5.
void EnterExitFrame(bool save_doubles);
// Leave the current exit frame. Expects the return value in r0. // Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(bool save_doubles); void LeaveExitFrame(bool save_doubles);
@ -589,11 +587,13 @@ class MacroAssembler: public Assembler {
// Convert the HeapNumber pointed to by source to a 32bits signed integer // Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch // dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. // to not_int32 label. If VFP3 is available double_scratch is used but not
// scratch2.
void ConvertToInt32(Register source, void ConvertToInt32(Register source,
Register dest, Register dest,
Register scratch, Register scratch,
Register scratch2, Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32); Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
@ -614,6 +614,12 @@ class MacroAssembler: public Assembler {
// Call a code stub. // Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al); void TailCallStub(CodeStub* stub, Condition cond = al);
// Tail call a code stub (jump) and return the code object called. Try to
// generate the code if necessary. Do not perform a GC but instead return
// a retry after GC failure.
MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
Condition cond = al);
// Call a runtime routine. // Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments); void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id); void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@ -632,6 +638,12 @@ class MacroAssembler: public Assembler {
int num_arguments, int num_arguments,
int result_size); int result_size);
// Tail call of a runtime routine (jump). Try to generate the code if
// necessary. Do not perform a GC but instead return a retry after GC
// failure.
MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
const ExternalReference& ext, int num_arguments, int result_size);
// Convenience function: tail call a runtime routine (jump). // Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid, void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments, int num_arguments,
@ -655,9 +667,18 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments); void CallCFunction(Register function, int num_arguments);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js
// arguments space and the additional space allocated for the fast call).
MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
int stack_space);
// Jump to a runtime routine. // Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin); void JumpToExternalReference(const ExternalReference& builtin);
MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
// Invoke specified builtin JavaScript function. Adds an entry to // Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve. // the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, void InvokeBuiltin(Builtins::JavaScript id,
@ -763,6 +784,11 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object); void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object); void AbortIfNotSmi(Register object);
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// HeapNumber utilities // HeapNumber utilities

213
deps/v8/src/arm/simulator-arm.cc

@ -744,10 +744,10 @@ Simulator::Simulator() {
// offset from the svc instruction so the simulator knows what to call. // offset from the svc instruction so the simulator knows what to call.
class Redirection { class Redirection {
public: public:
Redirection(void* external_function, bool fp_return) Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function), : external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected), swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
fp_return_(fp_return), type_(type),
next_(list_) { next_(list_) {
Simulator::current()-> Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_), FlushICache(reinterpret_cast<void*>(&swi_instruction_),
@ -760,14 +760,15 @@ class Redirection {
} }
void* external_function() { return external_function_; } void* external_function() { return external_function_; }
bool fp_return() { return fp_return_; } ExternalReference::Type type() { return type_; }
static Redirection* Get(void* external_function, bool fp_return) { static Redirection* Get(void* external_function,
ExternalReference::Type type) {
Redirection* current; Redirection* current;
for (current = list_; current != NULL; current = current->next_) { for (current = list_; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current; if (current->external_function_ == external_function) return current;
} }
return new Redirection(external_function, fp_return); return new Redirection(external_function, type);
} }
static Redirection* FromSwiInstruction(Instruction* swi_instruction) { static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@ -780,7 +781,7 @@ class Redirection {
private: private:
void* external_function_; void* external_function_;
uint32_t swi_instruction_; uint32_t swi_instruction_;
bool fp_return_; ExternalReference::Type type_;
Redirection* next_; Redirection* next_;
static Redirection* list_; static Redirection* list_;
}; };
@ -790,8 +791,8 @@ Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function, void* Simulator::RedirectExternalReference(void* external_function,
bool fp_return) { ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, fp_return); Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction(); return redirection->address_of_swi_instruction();
} }
@ -1528,6 +1529,9 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg2, int32_t arg2,
int32_t arg3); int32_t arg3);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
// Software interrupt instructions are used by the simulator to call into the // Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. // C-based V8 runtime.
@ -1550,9 +1554,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// This is dodgy but it works because the C entry stubs are never moved. // This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173. // See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr); int32_t saved_lr = get_register(lr);
if (redirection->fp_return()) { intptr_t external =
intptr_t external = reinterpret_cast<intptr_t>(redirection->external_function());
reinterpret_cast<intptr_t>(redirection->external_function()); if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
SimulatorRuntimeFPCall target = SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external); reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@ -1568,9 +1572,28 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned); CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3); double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result); SetFpResult(result);
} else if (redirection->type() == ExternalReference::DIRECT_CALL) {
SimulatorRuntimeApiCall target =
reinterpret_cast<SimulatorRuntimeApiCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p args %08x",
FUNCTION_ADDR(target),
arg0);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
v8::Handle<v8::Value> result = target(arg0);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
} else { } else {
intptr_t external = // builtin call.
reinterpret_cast<int32_t>(redirection->external_function()); ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target = SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external); reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@ -2539,7 +2562,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(overflow_vfp_flag_ << 2) | (overflow_vfp_flag_ << 2) |
(div_zero_vfp_flag_ << 1) | (div_zero_vfp_flag_ << 1) |
(inv_op_vfp_flag_ << 0) | (inv_op_vfp_flag_ << 0) |
(FPSCR_rounding_mode_ << 22); (FPSCR_rounding_mode_);
set_register(rt, fpscr); set_register(rt, fpscr);
} }
} else if ((instr->VLValue() == 0x0) && } else if ((instr->VLValue() == 0x0) &&
@ -2562,7 +2585,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
div_zero_vfp_flag_ = (rt_value >> 1) & 1; div_zero_vfp_flag_ = (rt_value >> 1) & 1;
inv_op_vfp_flag_ = (rt_value >> 0) & 1; inv_op_vfp_flag_ = (rt_value >> 0) & 1;
FPSCR_rounding_mode_ = FPSCR_rounding_mode_ =
static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3); static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
} }
} else { } else {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
@ -2651,87 +2674,135 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
} }
} }
bool get_inv_op_vfp_flag(VFPRoundingMode mode,
double val,
bool unsigned_) {
ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
double max_uint = static_cast<double>(0xffffffffu);
double max_int = static_cast<double>(kMaxInt);
double min_int = static_cast<double>(kMinInt);
// Check for NaN.
if (val != val) {
return true;
}
// Check for overflow. This code works because 32bit integers can be
// exactly represented by ieee-754 64bit floating-point values.
switch (mode) {
case RN:
return unsigned_ ? (val >= (max_uint + 0.5)) ||
(val < -0.5)
: (val >= (max_int + 0.5)) ||
(val < (min_int - 0.5));
case RM:
return unsigned_ ? (val >= (max_uint + 1.0)) ||
(val < 0)
: (val >= (max_int + 1.0)) ||
(val < min_int);
case RZ:
return unsigned_ ? (val >= (max_uint + 1.0)) ||
(val <= -1)
: (val >= (max_int + 1.0)) ||
(val <= (min_int - 1.0));
default:
UNREACHABLE();
return true;
}
}
// We call this function only if we had a vfp invalid exception.
// It returns the correct saturated value.
int VFPConversionSaturate(double val, bool unsigned_res) {
if (val != val) {
return 0;
} else {
if (unsigned_res) {
return (val < 0) ? 0 : 0xffffffffu;
} else {
return (val < 0) ? kMinInt : kMaxInt;
}
}
}
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
(instr->Bits(27, 23) == 0x1D));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1))); (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer. // Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1); bool to_integer = (instr->Bit(18) == 1);
VFPRegPrecision src_precision = kSinglePrecision; VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
if (instr->SzValue() == 1) { : kSinglePrecision;
src_precision = kDoublePrecision;
}
if (to_integer) { if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0); // We are playing with code close to the C++ standard's limits below,
FPSCRRoundingModes mode; // hence the very simple code and heavy checks.
if (instr->Bit(7) != 1) { //
// Use FPSCR defined rounding mode. // Note:
mode = FPSCR_rounding_mode_; // C++ defines default type casting from floating point to integer as
// Only RZ and RM modes are supported. // (close to) rounding toward zero ("fractional part discarded").
ASSERT((mode == RM) || (mode == RZ));
} else {
// VFP uses round towards zero by default.
mode = RZ;
}
int dst = instr->VFPDRegValue(kSinglePrecision); int dst = instr->VFPDRegValue(kSinglePrecision);
int src = instr->VFPMRegValue(src_precision); int src = instr->VFPMRegValue(src_precision);
int32_t kMaxInt = v8::internal::kMaxInt;
int32_t kMinInt = v8::internal::kMinInt;
switch (mode) {
case RM:
if (src_precision == kDoublePrecision) {
double val = get_double_from_d_register(src);
inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
// mode or the default Round to Zero mode.
VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
: RZ;
ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
int sint = unsigned_integer ? static_cast<uint32_t>(val) : bool unsigned_integer = (instr->Bit(16) == 0);
static_cast<int32_t>(val); bool double_precision = (src_precision == kDoublePrecision);
sint = sint > val ? sint - 1 : sint;
set_s_register_from_sinteger(dst, sint); double val = double_precision ? get_double_from_d_register(src)
} else { : get_float_from_s_register(src);
float val = get_float_from_s_register(src);
inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val); int temp = unsigned_integer ? static_cast<uint32_t>(val)
: static_cast<int32_t>(val);
int sint = unsigned_integer ? static_cast<uint32_t>(val) : inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
static_cast<int32_t>(val);
sint = sint > val ? sint - 1 : sint;
set_s_register_from_sinteger(dst, sint); if (inv_op_vfp_flag_) {
temp = VFPConversionSaturate(val, unsigned_integer);
} else {
switch (mode) {
case RN: {
double abs_diff =
unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
: fabs(val - temp);
int val_sign = (val > 0) ? 1 : -1;
if (abs_diff > 0.5) {
temp += val_sign;
} else if (abs_diff == 0.5) {
// Round to even if exactly halfway.
temp = ((temp % 2) == 0) ? temp : temp + val_sign;
}
break;
} }
break;
case RZ:
if (src_precision == kDoublePrecision) {
double val = get_double_from_d_register(src);
inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint); case RM:
} else { temp = temp > val ? temp - 1 : temp;
float val = get_float_from_s_register(src); break;
inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint); case RZ:
} // Nothing to do.
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
}
} }
// Update the destination register.
set_s_register_from_sinteger(dst, temp);
} else { } else {
bool unsigned_integer = (instr->Bit(7) == 0); bool unsigned_integer = (instr->Bit(7) == 0);

8
deps/v8/src/arm/simulator-arm.h

@ -79,6 +79,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h" #include "constants-arm.h"
#include "hashmap.h" #include "hashmap.h"
#include "assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -285,8 +286,9 @@ class Simulator {
static CachePage* GetCachePage(void* page); static CachePage* GetCachePage(void* page);
// Runtime call support. // Runtime call support.
static void* RedirectExternalReference(void* external_function, static void* RedirectExternalReference(
bool fp_return); void* external_function,
v8::internal::ExternalReference::Type type);
// For use in calls that take two double values, constructed from r0, r1, r2 // For use in calls that take two double values, constructed from r0, r1, r2
// and r3. // and r3.
@ -312,7 +314,7 @@ class Simulator {
bool v_flag_FPSCR_; bool v_flag_FPSCR_;
// VFP rounding mode. See ARM DDI 0406B Page A2-29. // VFP rounding mode. See ARM DDI 0406B Page A2-29.
FPSCRRoundingModes FPSCR_rounding_mode_; VFPRoundingMode FPSCR_rounding_mode_;
// VFP FP exception flags architecture state. // VFP FP exception flags architecture state.
bool inv_op_vfp_flag_; bool inv_op_vfp_flag_;

230
deps/v8/src/arm/stub-cache-arm.cc

@ -575,72 +575,94 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ CallStub(&stub); __ CallStub(&stub);
} }
static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the // Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame. // caller's frame.
// //
// These arguments are set by CheckPrototypes and GenerateFastApiCall. // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
static void ReserveSpaceForFastApiCall(MacroAssembler* masm, static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
Register scratch) { Register scratch) {
__ mov(scratch, Operand(Smi::FromInt(0))); __ mov(scratch, Operand(Smi::FromInt(0)));
__ push(scratch); for (int i = 0; i < kFastApiCallArguments; i++) {
__ push(scratch); __ push(scratch);
__ push(scratch); }
__ push(scratch);
} }
// Undoes the effects of ReserveSpaceForFastApiCall. // Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm) { static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
__ Drop(4); __ Drop(kFastApiCallArguments);
} }
// Generates call to FastHandleApiCall builtin. static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
static void GenerateFastApiCall(MacroAssembler* masm, const CallOptimization& optimization,
const CallOptimization& optimization, int argc) {
int argc) { // ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
// -- sp[8] : call data
// -- sp[12] : last js argument
// -- ...
// -- sp[(argc + 3) * 4] : first js argument
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context. // Get the function and setup the context.
JSFunction* function = optimization.constant_function(); JSFunction* function = optimization.constant_function();
__ mov(r5, Operand(Handle<JSFunction>(function))); __ mov(r5, Operand(Handle<JSFunction>(function)));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects. // Pass the additional arguments FastHandleApiCall expects.
bool info_loaded = false;
Object* callback = optimization.api_call_info()->callback();
if (Heap::InNewSpace(callback)) {
info_loaded = true;
__ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
__ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
} else {
__ Move(r7, Handle<Object>(callback));
}
Object* call_data = optimization.api_call_info()->data(); Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
if (Heap::InNewSpace(call_data)) { if (Heap::InNewSpace(call_data)) {
if (!info_loaded) { __ Move(r0, api_call_info_handle);
__ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
}
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else { } else {
__ Move(r6, Handle<Object>(call_data)); __ Move(r6, Handle<Object>(call_data));
} }
// Store js function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
__ add(sp, sp, Operand(1 * kPointerSize)); // r2 points to call data as expected by Arguments
__ stm(ia, sp, r5.bit() | r6.bit() | r7.bit()); // (refer to layout above).
__ sub(sp, sp, Operand(1 * kPointerSize)); __ add(r2, sp, Operand(2 * kPointerSize));
// Set the number of arguments.
__ mov(r0, Operand(argc + 4));
// Jump to the fast api call builtin (tail call). Object* callback = optimization.api_call_info()->callback();
Handle<Code> code = Handle<Code>( Address api_function_address = v8::ToCData<Address>(callback);
Builtins::builtin(Builtins::FastHandleApiCall)); ApiFunction fun(api_function_address);
ParameterCount expected(0);
__ InvokeCode(code, expected, expected, const int kApiStackSpace = 4;
RelocInfo::CODE_TARGET, JUMP_FUNCTION); __ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// v8::Arguments::implicit_args = data
__ str(r2, MemOperand(r0, 0 * kPointerSize));
// v8::Arguments::values = last argument
__ add(ip, r2, Operand(argc * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// v8::Arguments::length_ = argc
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
__ mov(ip, Operand(0));
__ str(ip, MemOperand(r0, 3 * kPointerSize));
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
MaybeObject* result = masm->TryCallApiFunctionAndReturn(
&fun, argc + kFastApiCallArguments + 1);
if (result->IsFailure()) {
return result;
}
return Heap::undefined_value();
} }
class CallInterceptorCompiler BASE_EMBEDDED { class CallInterceptorCompiler BASE_EMBEDDED {
public: public:
CallInterceptorCompiler(StubCompiler* stub_compiler, CallInterceptorCompiler(StubCompiler* stub_compiler,
@ -650,16 +672,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_(arguments), arguments_(arguments),
name_(name) {} name_(name) {}
void Compile(MacroAssembler* masm, MaybeObject* Compile(MacroAssembler* masm,
JSObject* object, JSObject* object,
JSObject* holder, JSObject* holder,
String* name, String* name,
LookupResult* lookup, LookupResult* lookup,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Label* miss) { Label* miss) {
ASSERT(holder->HasNamedInterceptor()); ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@ -669,17 +691,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallOptimization optimization(lookup); CallOptimization optimization(lookup);
if (optimization.is_constant_call()) { if (optimization.is_constant_call()) {
CompileCacheable(masm, return CompileCacheable(masm,
object, object,
receiver, receiver,
scratch1, scratch1,
scratch2, scratch2,
scratch3, scratch3,
holder, holder,
lookup, lookup,
name, name,
optimization, optimization,
miss); miss);
} else { } else {
CompileRegular(masm, CompileRegular(masm,
object, object,
@ -690,21 +712,22 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name, name,
holder, holder,
miss); miss);
return Heap::undefined_value();
} }
} }
private: private:
void CompileCacheable(MacroAssembler* masm, MaybeObject* CompileCacheable(MacroAssembler* masm,
JSObject* object, JSObject* object,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
JSObject* interceptor_holder, JSObject* interceptor_holder,
LookupResult* lookup, LookupResult* lookup,
String* name, String* name,
const CallOptimization& optimization, const CallOptimization& optimization,
Label* miss_label) { Label* miss_label) {
ASSERT(optimization.is_constant_call()); ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject()); ASSERT(!lookup->holder()->IsGlobalObject());
@ -768,7 +791,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function. // Invoke function.
if (can_do_fast_api_call) { if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate()); MaybeObject* result = GenerateFastApiDirectCall(masm,
optimization,
arguments_.immediate());
if (result->IsFailure()) return result;
} else { } else {
__ InvokeFunction(optimization.constant_function(), arguments_, __ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION); JUMP_FUNCTION);
@ -786,6 +812,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) { if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm); FreeSpaceForFastApiCall(masm);
} }
return Heap::undefined_value();
} }
void CompileRegular(MacroAssembler* masm, void CompileRegular(MacroAssembler* masm,
@ -2055,11 +2083,11 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// - Make sure Flush-to-zero mode control bit is unset (bit 22). // - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3, __ bic(r9, r3,
Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
__ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits)); __ orr(r9, r9, Operand(kRoundToMinusInf));
__ vmsr(r9); __ vmsr(r9);
// Convert the argument to an integer. // Convert the argument to an integer.
__ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al); __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
// Use vcvt latency to start checking for special cases. // Use vcvt latency to start checking for special cases.
// Get the argument exponent and clear the sign bit. // Get the argument exponent and clear the sign bit.
@ -2368,7 +2396,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} }
if (depth != kInvalidProtoDepth) { if (depth != kInvalidProtoDepth) {
GenerateFastApiCall(masm(), optimization, argc); MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
if (result->IsFailure()) return result;
} else { } else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION); __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
} }
@ -2412,16 +2441,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ ldr(r1, MemOperand(sp, argc * kPointerSize)); __ ldr(r1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), r2); CallInterceptorCompiler compiler(this, arguments(), r2);
compiler.Compile(masm(), MaybeObject* result = compiler.Compile(masm(),
object, object,
holder, holder,
name, name,
&lookup, &lookup,
r1, r1,
r3, r3,
r4, r4,
r0, r0,
&miss); &miss);
if (result->IsFailure()) {
return result;
}
// Move returned value, the function to call, to r1. // Move returned value, the function to call, to r1.
__ mov(r1, r0); __ mov(r1, r0);
@ -3087,6 +3119,38 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
} }
MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss;
// Check that the map matches.
__ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
GenerateFastPixelArrayLoad(masm(),
r1,
r0,
r2,
r3,
r4,
r5,
r0,
&miss,
&miss,
&miss);
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index, int index,
Map* transition, Map* transition,
@ -3764,9 +3828,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Not infinity or NaN simply convert to int. // Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) { if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
} else { } else {
__ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
} }
__ vmov(r5, s0, ne); __ vmov(r5, s0, ne);

8
deps/v8/src/array.js

@ -1018,9 +1018,11 @@ function ArrayIndexOf(element, index) {
} else { } else {
index = TO_INTEGER(index); index = TO_INTEGER(index);
// If index is negative, index from the end of the array. // If index is negative, index from the end of the array.
if (index < 0) index = length + index; if (index < 0) {
// If index is still negative, search the entire array. index = length + index;
if (index < 0) index = 0; // If index is still negative, search the entire array.
if (index < 0) index = 0;
}
} }
var min = index; var min = index;
var max = length; var max = length;

12
deps/v8/src/assembler.cc

@ -553,8 +553,9 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {} : address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(ApiFunction* fun) ExternalReference::ExternalReference(
: address_(Redirect(fun->address())) {} ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
: address_(Redirect(fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name) ExternalReference::ExternalReference(Builtins::Name name)
@ -888,17 +889,18 @@ ExternalReference ExternalReference::double_fp_operation(
UNREACHABLE(); UNREACHABLE();
} }
// Passing true as 2nd parameter indicates that they return an fp value. // Passing true as 2nd parameter indicates that they return an fp value.
return ExternalReference(Redirect(FUNCTION_ADDR(function), true)); return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
} }
ExternalReference ExternalReference::compare_doubles() { ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles), return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
false)); BUILTIN_CALL));
} }
ExternalReferenceRedirector* ExternalReference::redirector_ = NULL; ExternalReference::ExternalReferenceRedirector*
ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

35
deps/v8/src/assembler.h

@ -459,9 +459,6 @@ class Debug_Address;
#endif #endif
typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
// An ExternalReference represents a C++ address used in the generated // An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in // code. All references to C++ functions and variables must be encapsulated in
// an ExternalReference instance. This is done in order to track the origin of // an ExternalReference instance. This is done in order to track the origin of
@ -469,9 +466,29 @@ typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
// addresses when deserializing a heap. // addresses when deserializing a heap.
class ExternalReference BASE_EMBEDDED { class ExternalReference BASE_EMBEDDED {
public: public:
// Used in the simulator to support different native api calls.
//
// BUILTIN_CALL - builtin call.
// MaybeObject* f(v8::internal::Arguments).
//
// FP_RETURN_CALL - builtin call that returns floating point.
// double f(double, double).
//
// DIRECT_CALL - direct call to API function native callback
// from generated code.
// Handle<Value> f(v8::Arguments&)
//
enum Type {
BUILTIN_CALL, // default
FP_RETURN_CALL,
DIRECT_CALL
};
typedef void* ExternalReferenceRedirector(void* original, Type type);
explicit ExternalReference(Builtins::CFunctionId id); explicit ExternalReference(Builtins::CFunctionId id);
explicit ExternalReference(ApiFunction* ptr); explicit ExternalReference(ApiFunction* ptr, Type type);
explicit ExternalReference(Builtins::Name name); explicit ExternalReference(Builtins::Name name);
@ -599,17 +616,19 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReferenceRedirector* redirector_; static ExternalReferenceRedirector* redirector_;
static void* Redirect(void* address, bool fp_return = false) { static void* Redirect(void* address,
Type type = ExternalReference::BUILTIN_CALL) {
if (redirector_ == NULL) return address; if (redirector_ == NULL) return address;
void* answer = (*redirector_)(address, fp_return); void* answer = (*redirector_)(address, type);
return answer; return answer;
} }
static void* Redirect(Address address_arg, bool fp_return = false) { static void* Redirect(Address address_arg,
Type type = ExternalReference::BUILTIN_CALL) {
void* address = reinterpret_cast<void*>(address_arg); void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector_ == NULL) ? void* answer = (redirector_ == NULL) ?
address : address :
(*redirector_)(address, fp_return); (*redirector_)(address, type);
return answer; return answer;
} }

3
deps/v8/src/code-stubs.h

@ -75,7 +75,8 @@ namespace internal {
V(GetProperty) \ V(GetProperty) \
V(SetProperty) \ V(SetProperty) \
V(InvokeBuiltin) \ V(InvokeBuiltin) \
V(RegExpCEntry) V(RegExpCEntry) \
V(DirectCEntry)
#else #else
#define CODE_STUB_LIST_ARM(V) #define CODE_STUB_LIST_ARM(V)
#endif #endif

4
deps/v8/src/codegen-inl.h

@ -55,6 +55,10 @@ bool CodeGenerator::is_eval() { return info_->is_eval(); }
Scope* CodeGenerator::scope() { return info_->function()->scope(); } Scope* CodeGenerator::scope() { return info_->function()->scope(); }
StrictModeFlag CodeGenerator::strict_mode_flag() {
return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CODEGEN_INL_H_ #endif // V8_CODEGEN_INL_H_

21
deps/v8/src/compilation-cache.cc

@ -136,7 +136,8 @@ class CompilationCacheEval: public CompilationSubCache {
: CompilationSubCache(generations) { } : CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source, Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context); Handle<Context> context,
StrictModeFlag strict_mode);
void Put(Handle<String> source, void Put(Handle<String> source,
Handle<Context> context, Handle<Context> context,
@ -371,7 +372,9 @@ void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup( Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source, Handle<Context> context) { Handle<String> source,
Handle<Context> context,
StrictModeFlag strict_mode) {
// Make sure not to leak the table into the surrounding handle // Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after // scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache. // having cleared the cache.
@ -380,7 +383,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
{ HandleScope scope; { HandleScope scope;
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context); result = table->LookupEval(*source, *context, strict_mode);
if (result->IsSharedFunctionInfo()) { if (result->IsSharedFunctionInfo()) {
break; break;
} }
@ -503,18 +506,20 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
} }
Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source, Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<Context> context, Handle<String> source,
bool is_global) { Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode) {
if (!IsEnabled()) { if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
Handle<SharedFunctionInfo> result; Handle<SharedFunctionInfo> result;
if (is_global) { if (is_global) {
result = eval_global.Lookup(source, context); result = eval_global.Lookup(source, context, strict_mode);
} else { } else {
result = eval_contextual.Lookup(source, context); result = eval_contextual.Lookup(source, context, strict_mode);
} }
return result; return result;
} }

3
deps/v8/src/compilation-cache.h

@ -51,7 +51,8 @@ class CompilationCache {
// contain a script for the given source string. // contain a script for the given source string.
static Handle<SharedFunctionInfo> LookupEval(Handle<String> source, static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global); bool is_global,
StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it // Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle. // is in cache, otherwise an empty handle.

14
deps/v8/src/compiler.cc

@ -548,7 +548,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global) { bool is_global,
StrictModeFlag strict_mode) {
int source_length = source->length(); int source_length = source->length();
Counters::total_eval_size.Increment(source_length); Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length); Counters::total_compile_size.Increment(source_length);
@ -559,7 +560,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// Do a lookup in the compilation cache; if the entry is not there, invoke // Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache. // the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result; Handle<SharedFunctionInfo> result;
result = CompilationCache::LookupEval(source, context, is_global); result = CompilationCache::LookupEval(source,
context,
is_global,
strict_mode);
if (result.is_null()) { if (result.is_null()) {
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
@ -567,9 +571,14 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
CompilationInfo info(script); CompilationInfo info(script);
info.MarkAsEval(); info.MarkAsEval();
if (is_global) info.MarkAsGlobal(); if (is_global) info.MarkAsGlobal();
if (strict_mode == kStrictMode) info.MarkAsStrict();
info.SetCallingContext(context); info.SetCallingContext(context);
result = MakeFunctionInfo(&info); result = MakeFunctionInfo(&info);
if (!result.is_null()) { if (!result.is_null()) {
// If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider:
// eval("'use strict'; ...");
ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
CompilationCache::PutEval(source, context, is_global, result); CompilationCache::PutEval(source, context, is_global, result);
} }
} }
@ -762,6 +771,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
*lit->this_property_assignments()); *lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen()); function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation()); function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode(lit->strict_mode());
} }

13
deps/v8/src/compiler.h

@ -49,6 +49,7 @@ class CompilationInfo BASE_EMBEDDED {
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; } bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; } bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; } bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; } bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; } FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
@ -69,6 +70,13 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy()); ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true); flags_ |= IsGlobal::encode(true);
} }
void MarkAsStrict() {
ASSERT(!is_lazy());
flags_ |= IsStrict::encode(true);
}
StrictModeFlag StrictMode() {
return is_strict() ? kStrictMode : kNonStrictMode;
}
void MarkAsInLoop() { void MarkAsInLoop() {
ASSERT(is_lazy()); ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true); flags_ |= IsInLoop::encode(true);
@ -162,6 +170,8 @@ class CompilationInfo BASE_EMBEDDED {
class IsGlobal: public BitField<bool, 2, 1> {}; class IsGlobal: public BitField<bool, 2, 1> {};
// Flags that can be set for lazy compilation. // Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {}; class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class IsStrict: public BitField<bool, 4, 1> {};
unsigned flags_; unsigned flags_;
@ -230,7 +240,8 @@ class Compiler : public AllStatic {
// Compile a String source within a context for Eval. // Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source, static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global); bool is_global,
StrictModeFlag strict_mode);
// Compile from function info (used for lazy compilation). Returns true on // Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow. // success and false if the compilation resulted in a stack overflow.

78
deps/v8/src/conversions.cc

@ -125,8 +125,8 @@ static bool isDigit(int x, int radix) {
} }
static double SignedZero(bool sign) { static double SignedZero(bool negative) {
return sign ? -0.0 : 0.0; return negative ? -0.0 : 0.0;
} }
@ -134,14 +134,14 @@ static double SignedZero(bool sign) {
template <int radix_log_2, class Iterator, class EndMark> template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(Iterator current, static double InternalStringToIntDouble(Iterator current,
EndMark end, EndMark end,
bool sign, bool negative,
bool allow_trailing_junk) { bool allow_trailing_junk) {
ASSERT(current != end); ASSERT(current != end);
// Skip leading 0s. // Skip leading 0s.
while (*current == '0') { while (*current == '0') {
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
} }
int64_t number = 0; int64_t number = 0;
@ -217,7 +217,7 @@ static double InternalStringToIntDouble(Iterator current,
ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number); ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
if (exponent == 0) { if (exponent == 0) {
if (sign) { if (negative) {
if (number == 0) return -0.0; if (number == 0) return -0.0;
number = -number; number = -number;
} }
@ -227,7 +227,7 @@ static double InternalStringToIntDouble(Iterator current,
ASSERT(number != 0); ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent // The double could be constructed faster from number (mantissa), exponent
// and sign. Assuming it's a rare case more simple code is used. // and sign. Assuming it's a rare case more simple code is used.
return static_cast<double>(sign ? -number : number) * pow(2.0, exponent); return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
} }
@ -238,7 +238,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (!AdvanceToNonspace(&current, end)) return empty_string_val; if (!AdvanceToNonspace(&current, end)) return empty_string_val;
bool sign = false; bool negative = false;
bool leading_zero = false; bool leading_zero = false;
if (*current == '+') { if (*current == '+') {
@ -248,14 +248,14 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
} else if (*current == '-') { } else if (*current == '-') {
++current; ++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE; if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
sign = true; negative = true;
} }
if (radix == 0) { if (radix == 0) {
// Radix detection. // Radix detection.
if (*current == '0') { if (*current == '0') {
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') { if (*current == 'x' || *current == 'X') {
radix = 16; radix = 16;
++current; ++current;
@ -271,7 +271,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (*current == '0') { if (*current == '0') {
// Allow "0x" prefix. // Allow "0x" prefix.
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') { if (*current == 'x' || *current == 'X') {
++current; ++current;
if (current == end) return JUNK_STRING_VALUE; if (current == end) return JUNK_STRING_VALUE;
@ -287,7 +287,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
while (*current == '0') { while (*current == '0') {
leading_zero = true; leading_zero = true;
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
} }
if (!leading_zero && !isDigit(*current, radix)) { if (!leading_zero && !isDigit(*current, radix)) {
@ -298,21 +298,21 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
switch (radix) { switch (radix) {
case 2: case 2:
return InternalStringToIntDouble<1>( return InternalStringToIntDouble<1>(
current, end, sign, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 4: case 4:
return InternalStringToIntDouble<2>( return InternalStringToIntDouble<2>(
current, end, sign, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 8: case 8:
return InternalStringToIntDouble<3>( return InternalStringToIntDouble<3>(
current, end, sign, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 16: case 16:
return InternalStringToIntDouble<4>( return InternalStringToIntDouble<4>(
current, end, sign, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 32: case 32:
return InternalStringToIntDouble<5>( return InternalStringToIntDouble<5>(
current, end, sign, allow_trailing_junk); current, end, negative, allow_trailing_junk);
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -344,7 +344,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
ASSERT(buffer_pos < kBufferSize); ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0'; buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos); Vector<const char> buffer_vector(buffer, buffer_pos);
return sign ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0); return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
} }
// The following code causes accumulating rounding error for numbers greater // The following code causes accumulating rounding error for numbers greater
@ -406,7 +406,7 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
return sign ? -v : v; return negative ? -v : v;
} }
@ -445,7 +445,7 @@ static double InternalStringToDouble(Iterator current,
bool nonzero_digit_dropped = false; bool nonzero_digit_dropped = false;
bool fractional_part = false; bool fractional_part = false;
bool sign = false; bool negative = false;
if (*current == '+') { if (*current == '+') {
// Ignore leading sign. // Ignore leading sign.
@ -454,7 +454,7 @@ static double InternalStringToDouble(Iterator current,
} else if (*current == '-') { } else if (*current == '-') {
++current; ++current;
if (current == end) return JUNK_STRING_VALUE; if (current == end) return JUNK_STRING_VALUE;
sign = true; negative = true;
} }
static const char kInfinitySymbol[] = "Infinity"; static const char kInfinitySymbol[] = "Infinity";
@ -468,13 +468,13 @@ static double InternalStringToDouble(Iterator current,
} }
ASSERT(buffer_pos == 0); ASSERT(buffer_pos == 0);
return sign ? -V8_INFINITY : V8_INFINITY; return negative ? -V8_INFINITY : V8_INFINITY;
} }
bool leading_zero = false; bool leading_zero = false;
if (*current == '0') { if (*current == '0') {
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
leading_zero = true; leading_zero = true;
@ -487,14 +487,14 @@ static double InternalStringToDouble(Iterator current,
return InternalStringToIntDouble<4>(current, return InternalStringToIntDouble<4>(current,
end, end,
sign, negative,
allow_trailing_junk); allow_trailing_junk);
} }
// Ignore leading zeros in the integer part. // Ignore leading zeros in the integer part.
while (*current == '0') { while (*current == '0') {
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
} }
} }
@ -539,7 +539,7 @@ static double InternalStringToDouble(Iterator current,
// leading zeros (if any). // leading zeros (if any).
while (*current == '0') { while (*current == '0') {
++current; ++current;
if (current == end) return SignedZero(sign); if (current == end) return SignedZero(negative);
exponent--; // Move this 0 into the exponent. exponent--; // Move this 0 into the exponent.
} }
} }
@ -631,7 +631,7 @@ static double InternalStringToDouble(Iterator current,
if (octal) { if (octal) {
return InternalStringToIntDouble<3>(buffer, return InternalStringToIntDouble<3>(buffer,
buffer + buffer_pos, buffer + buffer_pos,
sign, negative,
allow_trailing_junk); allow_trailing_junk);
} }
@ -644,7 +644,7 @@ static double InternalStringToDouble(Iterator current,
buffer[buffer_pos] = '\0'; buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent); double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
return sign ? -converted : converted; return negative ? -converted : converted;
} }
@ -702,26 +702,12 @@ double StringToDouble(Vector<const char> str,
const char* DoubleToCString(double v, Vector<char> buffer) { const char* DoubleToCString(double v, Vector<char> buffer) {
StringBuilder builder(buffer.start(), buffer.length());
switch (fpclassify(v)) { switch (fpclassify(v)) {
case FP_NAN: case FP_NAN: return "NaN";
builder.AddString("NaN"); case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
break; case FP_ZERO: return "0";
case FP_INFINITE:
if (v < 0.0) {
builder.AddString("-Infinity");
} else {
builder.AddString("Infinity");
}
break;
case FP_ZERO:
builder.AddCharacter('0');
break;
default: { default: {
StringBuilder builder(buffer.start(), buffer.length());
int decimal_point; int decimal_point;
int sign; int sign;
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1; const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@ -764,9 +750,9 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
if (exponent < 0) exponent = -exponent; if (exponent < 0) exponent = -exponent;
builder.AddFormatted("%d", exponent); builder.AddFormatted("%d", exponent);
} }
return builder.Finalize();
} }
} }
return builder.Finalize();
} }

4
deps/v8/src/deoptimizer.cc

@ -817,7 +817,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
// call to an unconditional call to the replacement code. // call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION); ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() + Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_start(); unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor); uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize; stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) { for (uint32_t i = 0; i < table_length; ++i) {
@ -836,7 +836,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
// stack check calls. // stack check calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION); ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() + Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_start(); unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor); uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize; stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) { for (uint32_t i = 0; i < table_length; ++i) {

4
deps/v8/src/disassembler.cc

@ -313,12 +313,12 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
// Called by Code::CodePrint. // Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) { void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION) int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
? static_cast<int>(code->safepoint_table_start()) ? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size(); : code->instruction_size();
// If there might be a stack check table, stop before reaching it. // If there might be a stack check table, stop before reaching it.
if (code->kind() == Code::FUNCTION) { if (code->kind() == Code::FUNCTION) {
decode_size = decode_size =
Min(decode_size, static_cast<int>(code->stack_check_table_start())); Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
} }
byte* begin = code->instruction_start(); byte* begin = code->instruction_start();

6
deps/v8/src/extensions/gc-extension.cc

@ -40,8 +40,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect. // All allocation spaces other than NEW_SPACE have the same effect.
Heap::CollectAllGarbage(false); if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
Heap::CollectAllGarbage(compact);
return v8::Undefined(); return v8::Undefined();
} }

2
deps/v8/src/full-codegen.cc

@ -304,7 +304,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
cgen.PopulateDeoptimizationData(code); cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_allow_osr_at_loop_nesting_level(0); code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_start(table_offset); code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info); CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle. info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE #ifdef ENABLE_GDB_JIT_INTERFACE

3
deps/v8/src/full-codegen.h

@ -531,6 +531,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); } Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); } bool is_eval() { return info_->is_eval(); }
StrictModeFlag strict_mode_flag() {
return function()->strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); } FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); } Scope* scope() { return info_->scope(); }

2
deps/v8/src/handles.cc

@ -873,7 +873,7 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
int expected_additional_properties, int expected_additional_properties,
bool condition) { bool condition) {
object_ = object; object_ = object;
if (condition && object_->HasFastProperties()) { if (condition && object_->HasFastProperties() && !object->IsJSGlobalProxy()) {
// Normalize the properties of object to avoid n^2 behavior // Normalize the properties of object to avoid n^2 behavior
// when extending the object multiple properties. Indicate the number of // when extending the object multiple properties. Indicate the number of
// properties to be added. // properties to be added.

3
deps/v8/src/heap-profiler.cc

@ -373,6 +373,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true; bool generation_completed = true;
switch (s_type) { switch (s_type) {
case HeapSnapshot::kFull: { case HeapSnapshot::kFull: {
Heap::CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control); HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot(); generation_completed = generator.GenerateSnapshot();
break; break;
@ -808,7 +809,7 @@ void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
void AggregatedHeapSnapshotGenerator::GenerateSnapshot() { void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
HeapIterator iterator(HeapIterator::kFilterFreeListNodes); HeapIterator iterator(HeapIterator::kFilterUnreachable);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj); CollectStats(obj);
agg_snapshot_->js_cons_profile()->CollectStats(obj); agg_snapshot_->js_cons_profile()->CollectStats(obj);

11
deps/v8/src/heap.cc

@ -1943,6 +1943,14 @@ void Heap::CreateJSConstructEntryStub() {
} }
#if V8_TARGET_ARCH_ARM
void Heap::CreateDirectCEntryStub() {
DirectCEntryStub stub;
set_direct_c_entry_code(*stub.GetCode());
}
#endif
void Heap::CreateFixedStubs() { void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC // Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc). // for cooking and uncooking (check out frames.cc).
@ -1963,6 +1971,9 @@ void Heap::CreateFixedStubs() {
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Heap::CreateRegExpCEntryStub(); Heap::CreateRegExpCEntryStub();
#endif #endif
#if V8_TARGET_ARCH_ARM
Heap::CreateDirectCEntryStub();
#endif
} }

11
deps/v8/src/heap.h

@ -122,7 +122,12 @@ namespace internal {
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#define STRONG_ROOT_LIST(V) \ #define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \ UNCONDITIONAL_STRONG_ROOT_LIST(V) \
V(Code, re_c_entry_code, RegExpCEntryCode) V(Code, re_c_entry_code, RegExpCEntryCode) \
V(Code, direct_c_entry_code, DirectCEntryCode)
#elif V8_TARGET_ARCH_ARM
#define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \
V(Code, direct_c_entry_code, DirectCEntryCode)
#else #else
#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V) #define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
#endif #endif
@ -178,6 +183,7 @@ namespace internal {
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \ V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \ V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \ V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \ V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \ V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \ V(out_of_memory_symbol, "out-of-memory") \
@ -1319,12 +1325,13 @@ class Heap : public AllStatic {
static bool CreateInitialMaps(); static bool CreateInitialMaps();
static bool CreateInitialObjects(); static bool CreateInitialObjects();
// These four Create*EntryStub functions are here and forced to not be inlined // These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries. // because of a gcc-4.4 bug that assigns wrong vtable entries.
NO_INLINE(static void CreateCEntryStub()); NO_INLINE(static void CreateCEntryStub());
NO_INLINE(static void CreateJSEntryStub()); NO_INLINE(static void CreateJSEntryStub());
NO_INLINE(static void CreateJSConstructEntryStub()); NO_INLINE(static void CreateJSConstructEntryStub());
NO_INLINE(static void CreateRegExpCEntryStub()); NO_INLINE(static void CreateRegExpCEntryStub());
NO_INLINE(static void CreateDirectCEntryStub());
static void CreateFixedStubs(); static void CreateFixedStubs();

10
deps/v8/src/hydrogen-instructions.cc

@ -1193,7 +1193,15 @@ void HStoreGlobal::PrintDataTo(StringStream* stream) const {
void HLoadContextSlot::PrintDataTo(StringStream* stream) const { void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
stream->Add("(%d, %d)", context_chain_length(), slot_index()); value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
}
void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
context()->PrintNameTo(stream);
stream->Add("[%d] = ", slot_index());
value()->PrintNameTo(stream);
} }

114
deps/v8/src/hydrogen-instructions.h

@ -98,6 +98,7 @@ class LChunkBuilder;
V(CompareJSObjectEq) \ V(CompareJSObjectEq) \
V(CompareMap) \ V(CompareMap) \
V(Constant) \ V(Constant) \
V(Context) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(Div) \ V(Div) \
@ -129,6 +130,7 @@ class LChunkBuilder;
V(Mul) \ V(Mul) \
V(ObjectLiteral) \ V(ObjectLiteral) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \
V(Parameter) \ V(Parameter) \
V(Power) \ V(Power) \
V(PushArgument) \ V(PushArgument) \
@ -139,6 +141,7 @@ class LChunkBuilder;
V(Shr) \ V(Shr) \
V(Simulate) \ V(Simulate) \
V(StackCheck) \ V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobal) \ V(StoreGlobal) \
V(StoreKeyedFastElement) \ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \ V(StoreKeyedGeneric) \
@ -163,6 +166,7 @@ class LChunkBuilder;
V(GlobalVars) \ V(GlobalVars) \
V(Maps) \ V(Maps) \
V(ArrayLengths) \ V(ArrayLengths) \
V(ContextSlots) \
V(OsrEntries) V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \ #define DECLARE_INSTRUCTION(type) \
@ -1060,12 +1064,39 @@ class HPushArgument: public HUnaryOperation {
}; };
class HGlobalObject: public HInstruction { class HContext: public HInstruction {
public: public:
HGlobalObject() { HContext() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
DECLARE_CONCRETE_INSTRUCTION(Context, "context");
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HOuterContext: public HUnaryOperation {
public:
explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HGlobalObject: public HUnaryOperation {
public:
explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetFlag(kDependsOnCalls);
} }
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object") DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
@ -1075,12 +1106,12 @@ class HGlobalObject: public HInstruction {
}; };
class HGlobalReceiver: public HInstruction { class HGlobalReceiver: public HUnaryOperation {
public: public:
HGlobalReceiver() { explicit HGlobalReceiver(HValue* global_object)
: HUnaryOperation(global_object) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetFlag(kDependsOnCalls);
} }
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver") DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
@ -2613,35 +2644,66 @@ class HStoreGlobal: public HUnaryOperation {
}; };
class HLoadContextSlot: public HInstruction { class HLoadContextSlot: public HUnaryOperation {
public: public:
HLoadContextSlot(int context_chain_length , int slot_index) HLoadContextSlot(HValue* context , int slot_index)
: context_chain_length_(context_chain_length), slot_index_(slot_index) { : HUnaryOperation(context), slot_index_(slot_index) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetFlag(kDependsOnCalls); SetFlag(kDependsOnContextSlots);
} }
int context_chain_length() const { return context_chain_length_; }
int slot_index() const { return slot_index_; } int slot_index() const { return slot_index_; }
virtual void PrintDataTo(StringStream* stream) const; virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
virtual intptr_t Hashcode() const {
return context_chain_length() * 29 + slot_index();
} }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot") DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
protected: protected:
virtual bool DataEquals(HValue* other) const { virtual bool DataEquals(HValue* other) const {
HLoadContextSlot* b = HLoadContextSlot::cast(other); HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (context_chain_length() == b->context_chain_length()) return (slot_index() == b->slot_index());
&& (slot_index() == b->slot_index());
} }
private: private:
int context_chain_length_; int slot_index_;
};
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsSmi() &&
!(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}
class HStoreContextSlot: public HBinaryOperation {
public:
HStoreContextSlot(HValue* context, int slot_index, HValue* value)
: HBinaryOperation(context, value), slot_index_(slot_index) {
SetFlag(kChangesContextSlots);
}
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
int slot_index() const { return slot_index_; }
bool NeedsWriteBarrier() const {
return StoringValueNeedsWriteBarrier(value());
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
private:
int slot_index_; int slot_index_;
}; };
@ -2777,12 +2839,6 @@ class HLoadKeyedGeneric: public HLoadKeyed {
}; };
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsSmi() &&
!(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}
class HStoreNamed: public HBinaryOperation { class HStoreNamed: public HBinaryOperation {
public: public:
HStoreNamed(HValue* obj, Handle<Object> name, HValue* val) HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
@ -2800,10 +2856,6 @@ class HStoreNamed: public HBinaryOperation {
HValue* value() const { return OperandAt(1); } HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); } void set_value(HValue* value) { SetOperandAt(1, value); }
bool NeedsWriteBarrier() const {
return StoringValueNeedsWriteBarrier(value());
}
DECLARE_INSTRUCTION(StoreNamed) DECLARE_INSTRUCTION(StoreNamed)
private: private:
@ -2831,7 +2883,7 @@ class HStoreNamedField: public HStoreNamed {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field") DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual void PrintDataTo(StringStream* stream) const; virtual void PrintDataTo(StringStream* stream) const;
@ -2840,6 +2892,10 @@ class HStoreNamedField: public HStoreNamed {
Handle<Map> transition() const { return transition_; } Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; } void set_transition(Handle<Map> map) { transition_ = map; }
bool NeedsWriteBarrier() const {
return StoringValueNeedsWriteBarrier(value());
}
private: private:
bool is_in_object_; bool is_in_object_;
int offset_; int offset_;

92
deps/v8/src/hydrogen.cc

@ -2955,6 +2955,19 @@ void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
} }
HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HInstruction* context = new HContext;
AddInstruction(context);
int length = graph()->info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
context = new HOuterContext(context);
AddInstruction(context);
}
return context;
}
void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Variable* variable = expr->AsVariable(); Variable* variable = expr->AsVariable();
if (variable == NULL) { if (variable == NULL) {
@ -2968,16 +2981,9 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (variable->mode() == Variable::CONST) { if (variable->mode() == Variable::CONST) {
BAILOUT("reference to const context slot"); BAILOUT("reference to const context slot");
} }
Slot* slot = variable->AsSlot(); HValue* context = BuildContextChainWalk(variable);
CompilationInfo* info = graph()->info(); int index = variable->AsSlot()->index();
int context_chain_length = info->function()->scope()-> HLoadContextSlot* instr = new HLoadContextSlot(context, index);
ContextChainLength(slot->var()->scope());
ASSERT(context_chain_length >= 0);
// TODO(antonm): if slot's value is not modified by closures, instead
// of reading it out of context, we could just embed the value as
// a constant.
HLoadContextSlot* instr =
new HLoadContextSlot(context_chain_length, slot->index());
ast_context()->ReturnInstruction(instr, expr->id()); ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) { } else if (variable->is_global()) {
LookupResult lookup; LookupResult lookup;
@ -3515,35 +3521,48 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
if (proxy->IsArguments()) BAILOUT("assignment to arguments"); if (proxy->IsArguments()) BAILOUT("assignment to arguments");
// Handle the assignment. // Handle the assignment.
if (var->is_global()) { if (var->IsStackAllocated()) {
HValue* value = NULL;
// Handle stack-allocated variables on the right-hand side directly.
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
// permitted. Handling such assignments here bypasses the check for
// the arguments object in VisitVariableProxy.
Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
value = environment()->Lookup(rhs_var);
} else {
VISIT_FOR_VALUE(expr->value());
value = Pop();
}
Bind(var, value);
ast_context()->ReturnValue(value);
} else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
VISIT_FOR_VALUE(expr->value());
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
ast_context()->ReturnValue(Pop());
} else if (var->is_global()) {
VISIT_FOR_VALUE(expr->value()); VISIT_FOR_VALUE(expr->value());
HandleGlobalVariableAssignment(var, HandleGlobalVariableAssignment(var,
Top(), Top(),
expr->position(), expr->position(),
expr->AssignmentId()); expr->AssignmentId());
} else if (var->IsStackAllocated()) { ast_context()->ReturnValue(Pop());
// We allow reference to the arguments object only in assignemtns
// to local variables to make sure that the arguments object does
// not escape and is not modified.
VariableProxy* rhs = expr->value()->AsVariableProxy();
if (rhs != NULL &&
rhs->var()->IsStackAllocated() &&
environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
Push(environment()->Lookup(rhs->var()));
} else {
VISIT_FOR_VALUE(expr->value());
}
Bind(proxy->var(), Top());
} else { } else {
BAILOUT("Assigning to no non-stack-allocated/non-global variable"); BAILOUT("assignment to LOOKUP or const CONTEXT variable");
} }
// Return the value.
ast_context()->ReturnValue(Pop());
} else if (prop != NULL) { } else if (prop != NULL) {
HandlePropertyAssignment(expr); HandlePropertyAssignment(expr);
} else { } else {
BAILOUT("unsupported invalid lhs"); BAILOUT("invalid left-hand side in assignment");
} }
} }
@ -4422,7 +4441,10 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (known_global_function) { if (known_global_function) {
// Push the global object instead of the global receiver because // Push the global object instead of the global receiver because
// code generated by the full code generator expects it. // code generated by the full code generator expects it.
PushAndAdd(new HGlobalObject); HContext* context = new HContext;
HGlobalObject* global_object = new HGlobalObject(context);
AddInstruction(context);
PushAndAdd(global_object);
VisitArgumentList(expr->arguments()); VisitArgumentList(expr->arguments());
CHECK_BAILOUT; CHECK_BAILOUT;
@ -4431,7 +4453,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
AddInstruction(new HCheckFunction(function, expr->target())); AddInstruction(new HCheckFunction(function, expr->target()));
// Replace the global object with the global receiver. // Replace the global object with the global receiver.
HGlobalReceiver* global_receiver = new HGlobalReceiver; HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
// Index of the receiver from the top of the expression stack. // Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1; const int receiver_index = argument_count - 1;
AddInstruction(global_receiver); AddInstruction(global_receiver);
@ -4458,7 +4480,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = new HCallKnownGlobal(expr->target(), argument_count); call = new HCallKnownGlobal(expr->target(), argument_count);
} else { } else {
PushAndAdd(new HGlobalObject); HContext* context = new HContext;
AddInstruction(context);
PushAndAdd(new HGlobalObject(context));
VisitArgumentList(expr->arguments()); VisitArgumentList(expr->arguments());
CHECK_BAILOUT; CHECK_BAILOUT;
@ -4466,7 +4490,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
} }
} else { } else {
PushAndAdd(new HGlobalReceiver); HContext* context = new HContext;
HGlobalObject* global_object = new HGlobalObject(context);
AddInstruction(context);
AddInstruction(global_object);
PushAndAdd(new HGlobalReceiver(global_object));
VisitArgumentList(expr->arguments()); VisitArgumentList(expr->arguments());
CHECK_BAILOUT; CHECK_BAILOUT;

2
deps/v8/src/hydrogen.h

@ -823,6 +823,8 @@ class HGraphBuilder: public AstVisitor {
HValue* switch_value, HValue* switch_value,
CaseClause* clause); CaseClause* clause);
HValue* BuildContextChainWalk(Variable* var);
void AddCheckConstantFunction(Call* expr, void AddCheckConstantFunction(Call* expr,
HValue* receiver, HValue* receiver,
Handle<Map> receiver_map, Handle<Map> receiver_map,

48
deps/v8/src/ia32/code-stubs-ia32.cc

@ -6511,6 +6511,54 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
} }
// Loads a indexed element from a pixel array.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register untagged_key,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// elements - is set to the the receiver's element if
// the receiver doesn't have a pixel array or the
// key is not a smi, otherwise it's the elements'
// external pointer.
// untagged_key - is set to the untagged key
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
// Verify that the receiver has pixel array elements.
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
// Key must be in range.
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
__ j(above_equal, out_of_range); // unsigned check handles negative keys.
// Perform the indexed load and tag the result as a smi.
__ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
__ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
__ SmiTag(result);
__ ret(0);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

19
deps/v8/src/ia32/code-stubs-ia32.h

@ -490,6 +490,25 @@ class NumberToStringStub: public CodeStub {
}; };
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register untagged_key,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_ #endif // V8_IA32_CODE_STUBS_IA32_H_

10
deps/v8/src/ia32/codegen-ia32.cc

@ -6102,9 +6102,12 @@ void CodeGenerator::VisitCall(Call* node) {
} }
frame_->PushParameterAt(-1); frame_->PushParameterAt(-1);
// Push the strict mode flag.
frame_->Push(Smi::FromInt(strict_mode_flag()));
// Resolve the call. // Resolve the call.
result = result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3); frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result); done.Jump(&result);
slow.Bind(); slow.Bind();
@ -6121,8 +6124,11 @@ void CodeGenerator::VisitCall(Call* node) {
} }
frame_->PushParameterAt(-1); frame_->PushParameterAt(-1);
// Push the strict mode flag.
frame_->Push(Smi::FromInt(strict_mode_flag()));
// Resolve the call. // Resolve the call.
result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast // If we generated fast-case code bind the jump-target where fast
// and slow case merge. // and slow case merge.

1
deps/v8/src/ia32/codegen-ia32.h

@ -365,6 +365,7 @@ class CodeGenerator: public AstVisitor {
// Accessors // Accessors
inline bool is_eval(); inline bool is_eval();
inline Scope* scope(); inline Scope* scope();
inline StrictModeFlag strict_mode_flag();
// Generating deferred code. // Generating deferred code.
void ProcessDeferred(); void ProcessDeferred();

115
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -45,6 +45,16 @@ int Deoptimizer::patch_size() {
} }
static void ZapCodeRange(Address start, Address end) {
#ifdef DEBUG
ASSERT(start <= end);
int size = end - start;
CodePatcher destroyer(start, size);
while (size-- > 0) destroyer.masm()->int3();
#endif
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
@ -52,90 +62,61 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code. // Get the optimized code.
Code* code = function->code(); Code* code = function->code();
Address code_start_address = code->instruction_start();
// For each return after a safepoint insert a absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
// We will overwrite the code's relocation info in-place. Relocation info // We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte array. // is written backward. The relocation info is the payload of a byte
// Later on we will align this at the start of the byte array and create // array. Later on we will slide this to the start of the byte array and
// a trash byte array of the remaining space. // create a filler object in the remaining space.
ByteArray* reloc_info = code->relocation_info(); ByteArray* reloc_info = code->relocation_info();
Address end_address = reloc_info->address() + reloc_info->Size(); Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(end_address, code->instruction_start()); RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
// For each return after a safepoint insert a call to the corresponding
// deoptimization entry. Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
SafepointTable table(code);
Address prev_address = code_start_address;
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_address = code_start_address + table.GetPcOffset(i);
ZapCodeRange(prev_address, curr_address);
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i); SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index(); int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
last_pc_offset += gap_code_size; // The gap code is needed to get to the state expected at the bailout.
Address call_pc = code->instruction_start() + last_pc_offset; curr_address += safepoint_entry.gap_code_size();
CodePatcher patcher(call_pc, patch_size());
Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY); CodePatcher patcher(curr_address, patch_size());
patcher.masm()->call(entry, RelocInfo::NONE); Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
last_pc_offset += patch_size(); patcher.masm()->call(deopt_entry, RelocInfo::NONE);
RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(entry)); // We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
curr_address += patch_size();
} }
prev_address = curr_address;
} }
#ifdef DEBUG ZapCodeRange(prev_address,
// Destroy the code which is not supposed to run again. code_start_address + code->safepoint_table_offset());
unsigned instructions = code->safepoint_table_start() - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
// Move the relocation info to the beginning of the byte array. // Move the relocation info to the beginning of the byte array.
int reloc_size = end_address - reloc_info_writer.pos(); int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size); memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size. // The relocation info is in place, update the size.
reloc_info->set_length(reloc_size); reloc_info->set_length(new_reloc_size);
// Handle the junk part after the new relocation info. We will create // Handle the junk part after the new relocation info. We will create
// a non-live object in the extra space at the end of the former reloc info. // a non-live object in the extra space at the end of the former reloc info.
Address junk = reloc_info->address() + reloc_info->Size(); Address junk_address = reloc_info->address() + reloc_info->Size();
ASSERT(junk <= end_address); ASSERT(junk_address <= reloc_end_address);
Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
if (end_address - junk <= ByteArray::kHeaderSize) {
// We get in here if there is not enough space for a ByteArray.
// Both addresses are kPointerSize alligned.
CHECK_EQ((end_address - junk) % 4, 0);
Map* filler_map = Heap::one_pointer_filler_map();
while (junk < end_address) {
HeapObject::FromAddress(junk)->set_map(filler_map);
junk += kPointerSize;
}
} else {
int size = end_address - junk;
// Since the reloc_end address and junk are both alligned, we shouild,
// never have junk which is not a multipla of kPointerSize.
CHECK_EQ(size % kPointerSize, 0);
CHECK_GT(size, 0);
HeapObject* junk_object = HeapObject::FromAddress(junk);
junk_object->set_map(Heap::byte_array_map());
int length = ByteArray::LengthFor(end_address - junk);
ByteArray::cast(junk_object)->set_length(length);
}
// Add the deoptimizing code to the list. // Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);

153
deps/v8/src/ia32/full-codegen-ia32.cc

@ -206,45 +206,48 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, ecx, ebx, edx); Move(dot_arguments_slot, ecx, ebx, edx);
} }
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
EmitDeclaration(scope()->function(), Variable::CONST, NULL);
}
// Visit all the explicit declarations unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
scope()->VisitIllegalRedeclaration(this);
} else {
VisitDeclarations(scope()->declarations());
}
}
if (FLAG_trace) { if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0); __ CallRuntime(Runtime::kTraceEnter, 0);
} }
{ Comment cmnt(masm_, "[ Stack check"); // Visit the declarations and body unless there is an illegal
PrepareForBailout(info->function(), NO_REGISTERS); // redeclaration.
NearLabel ok; if (scope()->HasIllegalRedeclaration()) {
ExternalReference stack_limit = Comment cmnt(masm_, "[ Declarations");
ExternalReference::address_of_stack_limit(); scope()->VisitIllegalRedeclaration(this);
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body"); } else {
ASSERT(loop_depth() == 0); { Comment cmnt(masm_, "[ Declarations");
VisitStatements(function()->body()); // For named function expressions, declare the function name as a
ASSERT(loop_depth() == 0); // constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
EmitDeclaration(scope()->function(), Variable::CONST, NULL);
}
VisitDeclarations(scope()->declarations());
}
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailout(info->function(), NO_REGISTERS);
NearLabel ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
}
} }
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;"); { Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value()); __ mov(eax, Factory::undefined_value());
EmitReturnSequence(); EmitReturnSequence();
} }
@ -610,7 +613,7 @@ void FullCodeGenerator::Move(Slot* dst,
__ mov(location, src); __ mov(location, src);
// Emit the write barrier code if the location is in the heap. // Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) { if (dst->type() == Slot::CONTEXT) {
int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize; int offset = Context::SlotOffset(dst->index());
__ RecordWrite(scratch1, offset, src, scratch2); __ RecordWrite(scratch1, offset, src, scratch2);
} }
} }
@ -666,10 +669,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about // We bypass the general EmitSlotSearch because we know more about
// this specific context. // this specific context.
// The variable in the decl always resides in the current context. // The variable in the decl always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check that we're not inside a 'with'.
__ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX)); __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
__ cmp(ebx, Operand(esi)); __ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context."); __ Check(equal, "Unexpected declaration in current context.");
@ -1124,8 +1128,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
// Check that last extension is NULL. // Check that last extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow); __ j(not_equal, slow);
__ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
return ContextOperand(temp, slot->index()); // This function is used only for loads, not stores, so it's safe to
// return an esi-based operand (the write barrier cannot be allowed to
// destroy the esi register).
return ContextOperand(context, slot->index());
} }
@ -2000,57 +2007,75 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { } else if (op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization // Like var declarations, const declarations are hoisted to function
// of const variables. Const assignments are simply skipped. // scope. However, unlike var initializers, const initializers are able
Label done; // to drill a hole to that function context, even from inside a 'with'
// context. We thus bypass the normal static scope lookup.
Slot* slot = var->AsSlot();
Label skip;
switch (slot->type()) {
case Slot::PARAMETER:
// No const parameters.
UNREACHABLE();
break;
case Slot::LOCAL:
__ mov(edx, Operand(ebp, SlotOffset(slot)));
__ cmp(edx, Factory::the_hole_value());
__ j(not_equal, &skip);
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
case Slot::CONTEXT: {
__ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
__ mov(edx, ContextOperand(ecx, slot->index()));
__ cmp(edx, Factory::the_hole_value());
__ j(not_equal, &skip);
__ mov(ContextOperand(ecx, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
__ mov(edx, eax); // Preserve the stored value in eax.
__ RecordWrite(ecx, offset, edx, ebx);
break;
}
case Slot::LOOKUP:
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
break;
}
__ bind(&skip);
} else if (var->mode() != Variable::CONST) {
// Perform the assignment for non-const variables. Const assignments
// are simply skipped.
Slot* slot = var->AsSlot(); Slot* slot = var->AsSlot();
switch (slot->type()) { switch (slot->type()) {
case Slot::PARAMETER: case Slot::PARAMETER:
case Slot::LOCAL: case Slot::LOCAL:
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ mov(edx, Operand(ebp, SlotOffset(slot)));
__ cmp(edx, Factory::the_hole_value());
__ j(not_equal, &done);
}
// Perform the assignment. // Perform the assignment.
__ mov(Operand(ebp, SlotOffset(slot)), eax); __ mov(Operand(ebp, SlotOffset(slot)), eax);
break; break;
case Slot::CONTEXT: { case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, ecx); MemOperand target = EmitSlotSearch(slot, ecx);
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ mov(edx, target);
__ cmp(edx, Factory::the_hole_value());
__ j(not_equal, &done);
}
// Perform the assignment and issue the write barrier. // Perform the assignment and issue the write barrier.
__ mov(target, eax); __ mov(target, eax);
// The value of the assignment is in eax. RecordWrite clobbers its // The value of the assignment is in eax. RecordWrite clobbers its
// register arguments. // register arguments.
__ mov(edx, eax); __ mov(edx, eax);
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = Context::SlotOffset(slot->index());
__ RecordWrite(ecx, offset, edx, ebx); __ RecordWrite(ecx, offset, edx, ebx);
break; break;
} }
case Slot::LOOKUP: case Slot::LOOKUP:
// Call the runtime for the assignment. The runtime will ignore // Call the runtime for the assignment.
// const reinitialization.
__ push(eax); // Value. __ push(eax); // Value.
__ push(esi); // Context. __ push(esi); // Context.
__ push(Immediate(var->name())); __ push(Immediate(var->name()));
if (op == Token::INIT_CONST) { __ CallRuntime(Runtime::kStoreContextSlot, 3);
// The runtime will ignore const redeclaration.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
__ CallRuntime(Runtime::kStoreContextSlot, 3);
}
break; break;
} }
__ bind(&done);
} }
} }
@ -2270,7 +2295,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push the receiver of the enclosing function and do runtime call. // Push the receiver of the enclosing function and do runtime call.
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize)); __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); // Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in eax (function) and // The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values. // edx (receiver). Touch up the stack with the right values.

22
deps/v8/src/ia32/ic-ia32.cc

@ -556,19 +556,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0); __ ret(0);
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
// Check whether the elements is a pixel array. GenerateFastPixelArrayLoad(masm,
// edx: receiver edx,
// eax: key eax,
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); ecx,
__ mov(ebx, eax); ebx,
__ SmiUntag(ebx); eax,
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true); &check_number_dictionary,
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset)); NULL,
__ j(above_equal, &slow); &slow);
__ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ movzx_b(eax, Operand(eax, ebx, times_1, 0));
__ SmiTag(eax);
__ ret(0);
__ bind(&check_number_dictionary); __ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary. // Check whether the elements is a number dictionary.

45
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -77,7 +77,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done()); ASSERT(is_done());
code->set_stack_slots(StackSlotCount()); code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_start(safepoints_.GetCodeOffset()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code); PopulateDeoptimizationData(code);
} }
@ -1914,10 +1914,21 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
// TODO(antonm): load a context with a separate instruction. Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ LoadContext(result, instr->context_chain_length()); __ mov(result, ContextOperand(context, instr->slot_index()));
__ mov(result, ContextOperand(result, instr->slot_index())); }
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ mov(ContextOperand(context, instr->slot_index()), value);
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWrite(context, offset, value, temp);
}
} }
@ -2142,6 +2153,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(receiver.is(eax)); ASSERT(receiver.is(eax));
v8::internal::ParameterCount actual(eax); v8::internal::ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator); __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
// Restore context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} }
@ -2155,16 +2169,31 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
} }
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, esi);
}
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ mov(result, FieldOperand(result, JSFunction::kContextOffset));
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
} }
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
__ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
} }
@ -3406,7 +3435,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info(); Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = !instr->hydrogen()->pretenure(); bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) { if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub; FastNewClosureStub stub;
__ push(Immediate(shared_info)); __ push(Immediate(shared_info));

98
deps/v8/src/ia32/lithium-ia32.cc

@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_IA32)
#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h" #include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h" #include "ia32/lithium-codegen-ia32.h"
@ -68,11 +69,35 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
} }
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
LUnallocated::cast(operand)->IsUsedAtStart() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
}
#endif
void LInstruction::PrintTo(StringStream* stream) { void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic()); stream->Add("%s ", this->Mnemonic());
if (HasResult()) {
PrintOutputOperandTo(stream); PrintOutputOperandTo(stream);
}
PrintDataTo(stream); PrintDataTo(stream);
@ -268,7 +293,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) { void LLoadContextSlot::PrintDataTo(StringStream* stream) {
stream->Add("(%d, %d)", context_chain_length(), slot_index()); InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
InputAt(1)->PrintTo(stream);
} }
@ -391,7 +424,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} }
int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block); LGap* gap = new LGap(block);
int index = -1; int index = -1;
if (instr->IsControl()) { if (instr->IsControl()) {
@ -407,7 +440,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map()); pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index); instr->pointer_map()->set_lithium_position(index);
} }
return index;
} }
@ -675,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr, HInstruction* hinstr,
CanDeoptimize can_deoptimize) { CanDeoptimize can_deoptimize) {
allocator_->MarkAsCall(); #ifdef DEBUG
instr->VerifyCall();
#endif
instr->MarkAsCall();
instr = AssignPointerMap(instr); instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) { if (hinstr->HasSideEffects()) {
@ -700,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
allocator_->MarkAsSaveDoubles(); instr->MarkAsSaveDoubles();
return instr; return instr;
} }
@ -909,7 +944,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) { void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_; HInstruction* old_current = current_instruction_;
current_instruction_ = current; current_instruction_ = current;
allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position(); if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this); LInstruction* instr = current->CompileToLithium(this);
@ -932,11 +966,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current); instr->set_hydrogen_value(current);
} }
int index = chunk_->AddInstruction(instr, current_block_); chunk_->AddInstruction(instr, current_block_);
allocator_->SummarizeInstruction(index);
} else {
// This instruction should be omitted.
allocator_->OmitInstruction();
} }
current_instruction_ = old_current; current_instruction_ = old_current;
} }
@ -1140,13 +1170,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
} }
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return DefineAsRegister(new LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LOuterContext(context));
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
return DefineAsRegister(new LGlobalObject); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context));
} }
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
return DefineAsRegister(new LGlobalReceiver); LOperand* global_object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalReceiver(global_object));
} }
@ -1658,7 +1701,25 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
return DefineAsRegister(new LLoadContextSlot); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
LOperand* temp;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
return new LStoreContextSlot(context, value, temp);
} }
@ -1756,7 +1817,8 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we // We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property). // have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier) LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL; ? TempRegister()
: NULL;
return new LStoreNamedField(obj, val, temp); return new LStoreNamedField(obj, val, temp);
} }

238
deps/v8/src/ia32/lithium-ia32.h

@ -39,118 +39,6 @@ namespace internal {
// Forward declarations. // Forward declarations.
class LCodeGen; class LCodeGen;
// Type hierarchy:
//
// LInstruction
// LTemplateInstruction
// LControlInstruction
// LBranch
// LClassOfTestAndBranch
// LCmpJSObjectEqAndBranch
// LCmpIDAndBranch
// LHasCachedArrayIndexAndBranch
// LHasInstanceTypeAndBranch
// LInstanceOfAndBranch
// LIsNullAndBranch
// LIsObjectAndBranch
// LIsSmiAndBranch
// LTypeofIsAndBranch
// LAccessArgumentsAt
// LArgumentsElements
// LArgumentsLength
// LAddI
// LApplyArguments
// LArithmeticD
// LArithmeticT
// LBitI
// LBoundsCheck
// LCmpID
// LCmpJSObjectEq
// LCmpT
// LDivI
// LInstanceOf
// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
// LModI
// LMulI
// LPower
// LShiftI
// LSubI
// LCallConstantFunction
// LCallFunction
// LCallGlobal
// LCallKeyed
// LCallKnownGlobal
// LCallNamed
// LCallRuntime
// LCallStub
// LConstant
// LConstantD
// LConstantI
// LConstantT
// LDeoptimize
// LFunctionLiteral
// LGap
// LLabel
// LGlobalObject
// LGlobalReceiver
// LGoto
// LLazyBailout
// LLoadGlobal
// LCheckPrototypeMaps
// LLoadContextSlot
// LArrayLiteral
// LObjectLiteral
// LRegExpLiteral
// LOsrEntry
// LParameter
// LRegExpConstructResult
// LStackCheck
// LStoreKeyed
// LStoreKeyedFastElement
// LStoreKeyedGeneric
// LStoreNamed
// LStoreNamedField
// LStoreNamedGeneric
// LStringCharCodeAt
// LBitNotI
// LCallNew
// LCheckFunction
// LCheckPrototypeMaps
// LCheckInstanceType
// LCheckMap
// LCheckSmi
// LClassOfTest
// LDeleteProperty
// LDoubleToI
// LFixedArrayLength
// LHasCachedArrayIndex
// LHasInstanceType
// LInteger32ToDouble
// LIsNull
// LIsObject
// LIsSmi
// LJSArrayLength
// LLoadNamedField
// LLoadNamedGeneric
// LLoadFunctionPrototype
// LNumberTagD
// LNumberTagI
// LPushArgument
// LReturn
// LSmiTag
// LStoreGlobal
// LStringLength
// LTaggedToI
// LThrow
// LTypeof
// LTypeofIs
// LUnaryMathOperation
// LValueOf
// LUnknownOSRValue
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \ #define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \ V(ControlInstruction) \
V(Constant) \ V(Constant) \
@ -187,6 +75,8 @@ class LCodeGen;
V(CheckMap) \ V(CheckMap) \
V(CheckPrototypeMaps) \ V(CheckPrototypeMaps) \
V(CheckSmi) \ V(CheckSmi) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpID) \ V(CmpID) \
V(CmpIDAndBranch) \ V(CmpIDAndBranch) \
V(CmpJSObjectEq) \ V(CmpJSObjectEq) \
@ -197,16 +87,21 @@ class LCodeGen;
V(ConstantD) \ V(ConstantD) \
V(ConstantI) \ V(ConstantI) \
V(ConstantT) \ V(ConstantT) \
V(Context) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(DivI) \ V(DivI) \
V(DoubleToI) \ V(DoubleToI) \
V(FixedArrayLength) \
V(FunctionLiteral) \ V(FunctionLiteral) \
V(Gap) \ V(Gap) \
V(GlobalObject) \ V(GlobalObject) \
V(GlobalReceiver) \ V(GlobalReceiver) \
V(Goto) \ V(Goto) \
V(FixedArrayLength) \ V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(InstanceOf) \ V(InstanceOf) \
V(InstanceOfAndBranch) \ V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \ V(InstanceOfKnownGlobal) \
@ -218,22 +113,16 @@ class LCodeGen;
V(IsSmi) \ V(IsSmi) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(JSArrayLength) \ V(JSArrayLength) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(Label) \ V(Label) \
V(LazyBailout) \ V(LazyBailout) \
V(LoadContextSlot) \ V(LoadContextSlot) \
V(LoadElements) \ V(LoadElements) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \ V(LoadGlobal) \
V(LoadKeyedFastElement) \ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \ V(LoadKeyedGeneric) \
V(LoadNamedField) \ V(LoadNamedField) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(ModI) \ V(ModI) \
V(MulI) \ V(MulI) \
V(NumberTagD) \ V(NumberTagD) \
@ -241,6 +130,7 @@ class LCodeGen;
V(NumberUntagD) \ V(NumberUntagD) \
V(ObjectLiteral) \ V(ObjectLiteral) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \
V(Parameter) \ V(Parameter) \
V(Power) \ V(Power) \
V(PushArgument) \ V(PushArgument) \
@ -250,6 +140,7 @@ class LCodeGen;
V(SmiTag) \ V(SmiTag) \
V(SmiUntag) \ V(SmiUntag) \
V(StackCheck) \ V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobal) \ V(StoreGlobal) \
V(StoreKeyedFastElement) \ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \ V(StoreKeyedGeneric) \
@ -291,7 +182,10 @@ class LCodeGen;
class LInstruction: public ZoneObject { class LInstruction: public ZoneObject {
public: public:
LInstruction() LInstruction()
: hydrogen_value_(NULL) { } : environment_(NULL),
hydrogen_value_(NULL),
is_call_(false),
is_save_doubles_(false) { }
virtual ~LInstruction() { } virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0; virtual void CompileToNative(LCodeGen* generator) = 0;
@ -308,15 +202,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; } virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { } virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_.set(env); } void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_.get(); } LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_.is_set(); } bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); } LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); } bool HasPointerMap() const { return pointer_map_.is_set(); }
virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; } HValue* hydrogen_value() const { return hydrogen_value_; }
@ -331,11 +224,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set(); return deoptimization_environment_.is_set();
} }
void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
#ifdef DEBUG
void VerifyCall();
#endif
private: private:
SetOncePointer<LEnvironment> environment_; LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_; SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_; HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_; SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
bool is_save_doubles_;
}; };
@ -362,6 +279,11 @@ class OperandContainer<ElementType, 0> {
public: public:
int length() { return 0; } int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { } void PrintOperandsTo(StringStream* stream) { }
ElementType& operator[](int i) {
UNREACHABLE();
static ElementType t = 0;
return t;
}
}; };
@ -1286,18 +1208,42 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
}; };
class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> { class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
int context_chain_length() { return hydrogen()->context_chain_length(); } LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); } int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
}; };
class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LTemplateInstruction<0, 1, 0> { class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LPushArgument(LOperand* value) { explicit LPushArgument(LOperand* value) {
@ -1308,15 +1254,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
}; };
class LGlobalObject: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
};
class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
LOperand* context() { return InputAt(0); }
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
}; };
class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> { class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
LOperand* global() { return InputAt(0); }
}; };
@ -1815,7 +1791,7 @@ class LChunk: public ZoneObject {
pointer_maps_(8), pointer_maps_(8),
inlined_closures_(1) { } inlined_closures_(1) { }
int AddInstruction(LInstruction* instruction, HBasicBlock* block); void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant); LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const; Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const; Representation LookupLiteralRepresentation(LConstantOperand* operand) const;

20
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1523,11 +1523,21 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
mov(dst, FieldOperand(dst, JSFunction::kContextOffset)); mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
} }
// The context may be an intermediate context, not a function context. } else {
mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); // Slot is in the current function context. Move it into the
} else { // Slot is in the current function context. // destination register in case we store into it (the write barrier
// The context may be an intermediate context, not a function context. // cannot be allowed to destroy the context in esi).
mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX))); mov(dst, esi);
}
// We should not have found a 'with' context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (FLAG_debug_code) {
cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
Check(equal, "Yo dawg, I heard you liked function contexts "
"so I put function contexts in all your contexts");
} }
} }

11
deps/v8/src/ia32/macro-assembler-ia32.h

@ -258,6 +258,17 @@ class MacroAssembler: public Assembler {
j(not_carry, is_smi); j(not_carry, is_smi);
} }
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, not_taken);
}
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, not_taken);
}
// Assumes input is a heap object. // Assumes input is a heap object.
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number); void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);

31
deps/v8/src/ia32/stub-cache-ia32.cc

@ -3155,6 +3155,37 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
} }
MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
// Check that the map matches.
__ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
GenerateFastPixelArrayLoad(masm(),
edx,
eax,
ecx,
ebx,
eax,
&miss,
&miss,
&miss);
// Handle load cache miss.
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
// Specialized stub for constructing objects from functions which only have only // Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body. // simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {

48
deps/v8/src/ic.cc

@ -1204,23 +1204,31 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (use_ic) { if (use_ic) {
Code* stub = generic_stub(); Code* stub = generic_stub();
if (object->IsString() && key->IsNumber()) { if (state == UNINITIALIZED) {
stub = string_stub(); if (object->IsString() && key->IsNumber()) {
} else if (object->IsJSObject()) { stub = string_stub();
Handle<JSObject> receiver = Handle<JSObject>::cast(object); } else if (object->IsJSObject()) {
if (receiver->HasExternalArrayElements()) { Handle<JSObject> receiver = Handle<JSObject>::cast(object);
MaybeObject* probe = if (receiver->HasExternalArrayElements()) {
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false); MaybeObject* probe =
stub = StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); false);
} else if (receiver->HasIndexedInterceptor()) { stub = probe->IsFailure() ?
stub = indexed_interceptor_stub(); NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED && } else if (receiver->HasIndexedInterceptor()) {
key->IsSmi() && stub = indexed_interceptor_stub();
receiver->map()->has_fast_elements()) { } else if (receiver->HasPixelElements()) {
MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver); MaybeObject* probe =
stub = StubCache::ComputeKeyedLoadPixelArray(*receiver);
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked()); stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
} else if (key->IsSmi() &&
receiver->map()->has_fast_elements()) {
MaybeObject* probe =
StubCache::ComputeKeyedLoadSpecialized(*receiver);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
}
} }
} }
if (stub != NULL) set_target(stub); if (stub != NULL) set_target(stub);
@ -2053,6 +2061,8 @@ TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
} }
if (left_type.IsInteger32() && right_type.IsInteger32()) { if (left_type.IsInteger32() && right_type.IsInteger32()) {
// Platforms with 32-bit Smis have no distinct INT32 type.
if (kSmiValueSize == 32) return SMI;
return INT32; return INT32;
} }
@ -2096,9 +2106,11 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
} }
if (type == TRBinaryOpIC::SMI && if (type == TRBinaryOpIC::SMI &&
previous_type == TRBinaryOpIC::SMI) { previous_type == TRBinaryOpIC::SMI) {
if (op == Token::DIV || op == Token::MUL) { if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number. // Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub. // That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
result_type = TRBinaryOpIC::HEAP_NUMBER; result_type = TRBinaryOpIC::HEAP_NUMBER;
} else { } else {
// Other operations on SMIs that overflow yield int32s. // Other operations on SMIs that overflow yield int32s.

140
deps/v8/src/lithium-allocator-inl.h

@ -0,0 +1,140 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
#define V8_LITHIUM_ALLOCATOR_INL_H_
#include "lithium-allocator.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#else
#error "Unknown architecture."
#endif
namespace v8 {
namespace internal {
bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
LInstruction* LAllocator::InstructionAt(int index) {
return chunk_->instructions()->at(index);
}
LGap* LAllocator::GapAt(int index) {
return chunk_->GetGapAt(index);
}
TempIterator::TempIterator(LInstruction* instr)
: instr_(instr),
limit_(instr->TempCount()),
current_(0) {
current_ = AdvanceToNext(0);
}
bool TempIterator::HasNext() { return current_ < limit_; }
LOperand* TempIterator::Next() {
ASSERT(HasNext());
return instr_->TempAt(current_);
}
int TempIterator::AdvanceToNext(int start) {
while (start < limit_ && instr_->TempAt(start) == NULL) start++;
return start;
}
void TempIterator::Advance() {
current_ = AdvanceToNext(current_ + 1);
}
InputIterator::InputIterator(LInstruction* instr)
: instr_(instr),
limit_(instr->InputCount()),
current_(0) {
current_ = AdvanceToNext(0);
}
bool InputIterator::HasNext() { return current_ < limit_; }
LOperand* InputIterator::Next() {
ASSERT(HasNext());
return instr_->InputAt(current_);
}
void InputIterator::Advance() {
current_ = AdvanceToNext(current_ + 1);
}
int InputIterator::AdvanceToNext(int start) {
while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
return start;
}
UseIterator::UseIterator(LInstruction* instr)
: input_iterator_(instr), env_iterator_(instr->environment()) { }
bool UseIterator::HasNext() {
return input_iterator_.HasNext() || env_iterator_.HasNext();
}
LOperand* UseIterator::Next() {
ASSERT(HasNext());
return input_iterator_.HasNext()
? input_iterator_.Next()
: env_iterator_.Next();
}
void UseIterator::Advance() {
input_iterator_.HasNext()
? input_iterator_.Advance()
: env_iterator_.Advance();
}
} } // namespace v8::internal
#endif // V8_LITHIUM_ALLOCATOR_INL_H_

171
deps/v8/src/lithium-allocator.cc

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "lithium-allocator.h" #include "lithium-allocator-inl.h"
#include "hydrogen.h" #include "hydrogen.h"
#include "string-stream.h" #include "string-stream.h"
@ -532,7 +532,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
void LAllocator::InitializeLivenessAnalysis() { void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL. // Initialize the live_in sets for each block to NULL.
int block_count = graph()->blocks()->length(); int block_count = graph_->blocks()->length();
live_in_sets_.Initialize(block_count); live_in_sets_.Initialize(block_count);
live_in_sets_.AddBlock(NULL, block_count); live_in_sets_.AddBlock(NULL, block_count);
} }
@ -613,7 +613,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
} }
if (is_tagged) { if (is_tagged) {
TraceAlloc("Fixed reg is tagged at %d\n", pos); TraceAlloc("Fixed reg is tagged at %d\n", pos);
LInstruction* instr = chunk_->instructions()->at(pos); LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) { if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(operand); instr->pointer_map()->RecordPointer(operand);
} }
@ -668,17 +668,17 @@ LiveRange* LAllocator::LiveRangeFor(int index) {
} }
LGap* LAllocator::GetLastGap(HBasicBlock* block) const { LGap* LAllocator::GetLastGap(HBasicBlock* block) {
int last_instruction = block->last_instruction_index(); int last_instruction = block->last_instruction_index();
int index = chunk_->NearestGapPos(last_instruction); int index = chunk_->NearestGapPos(last_instruction);
return chunk_->GetGapAt(index); return GapAt(index);
} }
HPhi* LAllocator::LookupPhi(LOperand* operand) const { HPhi* LAllocator::LookupPhi(LOperand* operand) const {
if (!operand->IsUnallocated()) return NULL; if (!operand->IsUnallocated()) return NULL;
int index = operand->VirtualRegister(); int index = operand->VirtualRegister();
HValue* instr = graph()->LookupValue(index); HValue* instr = graph_->LookupValue(index);
if (instr != NULL && instr->IsPhi()) { if (instr != NULL && instr->IsPhi()) {
return HPhi::cast(instr); return HPhi::cast(instr);
} }
@ -737,7 +737,7 @@ void LAllocator::Use(LifetimePosition block_start,
void LAllocator::AddConstraintsGapMove(int index, void LAllocator::AddConstraintsGapMove(int index,
LOperand* from, LOperand* from,
LOperand* to) { LOperand* to) {
LGap* gap = chunk_->GetGapAt(index); LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START); LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
if (from->IsUnallocated()) { if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands(); const ZoneList<LMoveOperands>* move_operands = move->move_operands();
@ -760,24 +760,24 @@ void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index(); int start = block->first_instruction_index();
int end = block->last_instruction_index(); int end = block->last_instruction_index();
for (int i = start; i <= end; ++i) { for (int i = start; i <= end; ++i) {
if (chunk_->IsGapAt(i)) { if (IsGapAt(i)) {
InstructionSummary* summary = NULL; LInstruction* instr = NULL;
InstructionSummary* prev_summary = NULL; LInstruction* prev_instr = NULL;
if (i < end) summary = GetSummary(i + 1); if (i < end) instr = InstructionAt(i + 1);
if (i > start) prev_summary = GetSummary(i - 1); if (i > start) prev_instr = InstructionAt(i - 1);
MeetConstraintsBetween(prev_summary, summary, i); MeetConstraintsBetween(prev_instr, instr, i);
} }
} }
} }
void LAllocator::MeetConstraintsBetween(InstructionSummary* first, void LAllocator::MeetConstraintsBetween(LInstruction* first,
InstructionSummary* second, LInstruction* second,
int gap_index) { int gap_index) {
// Handle fixed temporaries. // Handle fixed temporaries.
if (first != NULL) { if (first != NULL) {
for (int i = 0; i < first->TempCount(); ++i) { for (TempIterator it(first); it.HasNext(); it.Advance()) {
LUnallocated* temp = LUnallocated::cast(first->TempAt(i)); LUnallocated* temp = LUnallocated::cast(it.Next());
if (temp->HasFixedPolicy()) { if (temp->HasFixedPolicy()) {
AllocateFixed(temp, gap_index - 1, false); AllocateFixed(temp, gap_index - 1, false);
} }
@ -810,7 +810,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
// and splitting of live ranges do not account for it. // and splitting of live ranges do not account for it.
// Thus it should be inserted to a lifetime position corresponding to // Thus it should be inserted to a lifetime position corresponding to
// the instruction end. // the instruction end.
LGap* gap = chunk_->GetGapAt(gap_index); LGap* gap = GapAt(gap_index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE); LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
move->AddMove(first_output, range->GetSpillOperand()); move->AddMove(first_output, range->GetSpillOperand());
} }
@ -818,8 +818,8 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
// Handle fixed input operands of second instruction. // Handle fixed input operands of second instruction.
if (second != NULL) { if (second != NULL) {
for (int i = 0; i < second->InputCount(); ++i) { for (UseIterator it(second); it.HasNext(); it.Advance()) {
LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i)); LUnallocated* cur_input = LUnallocated::cast(it.Next());
if (cur_input->HasFixedPolicy()) { if (cur_input->HasFixedPolicy()) {
LUnallocated* input_copy = cur_input->CopyUnconstrained(); LUnallocated* input_copy = cur_input->CopyUnconstrained();
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister()); bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@ -848,7 +848,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
if (second != NULL && second->Output() != NULL) { if (second != NULL && second->Output() != NULL) {
LUnallocated* second_output = LUnallocated::cast(second->Output()); LUnallocated* second_output = LUnallocated::cast(second->Output());
if (second_output->HasSameAsInputPolicy()) { if (second_output->HasSameAsInputPolicy()) {
LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0)); LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
int output_vreg = second_output->VirtualRegister(); int output_vreg = second_output->VirtualRegister();
int input_vreg = cur_input->VirtualRegister(); int input_vreg = cur_input->VirtualRegister();
@ -858,7 +858,7 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) { if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
int index = gap_index + 1; int index = gap_index + 1;
LInstruction* instr = chunk_->instructions()->at(index); LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) { if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(input_copy); instr->pointer_map()->RecordPointer(input_copy);
} }
@ -886,9 +886,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
LifetimePosition curr_position = LifetimePosition curr_position =
LifetimePosition::FromInstructionIndex(index); LifetimePosition::FromInstructionIndex(index);
if (chunk_->IsGapAt(index)) { if (IsGapAt(index)) {
// We have a gap at this position. // We have a gap at this position.
LGap* gap = chunk_->GetGapAt(index); LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START); LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
const ZoneList<LMoveOperands>* move_operands = move->move_operands(); const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) { for (int i = 0; i < move_operands->length(); ++i) {
@ -922,17 +922,17 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
} }
} }
} else { } else {
ASSERT(!chunk_->IsGapAt(index)); ASSERT(!IsGapAt(index));
InstructionSummary* summary = GetSummary(index); LInstruction* instr = InstructionAt(index);
if (summary != NULL) { if (instr != NULL) {
LOperand* output = summary->Output(); LOperand* output = instr->Output();
if (output != NULL) { if (output != NULL) {
if (output->IsUnallocated()) live->Remove(output->VirtualRegister()); if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
Define(curr_position, output, NULL); Define(curr_position, output, NULL);
} }
if (summary->IsCall()) { if (instr->IsMarkedAsCall()) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() || if (output == NULL || !output->IsRegister() ||
output->index() != i) { output->index() != i) {
@ -943,7 +943,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
} }
} }
if (summary->IsCall() || summary->IsSaveDoubles()) { if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsDoubleRegister() || if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) { output->index() != i) {
@ -954,8 +954,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
} }
} }
for (int i = 0; i < summary->InputCount(); ++i) { for (UseIterator it(instr); it.HasNext(); it.Advance()) {
LOperand* input = summary->InputAt(i); LOperand* input = it.Next();
LifetimePosition use_pos; LifetimePosition use_pos;
if (input->IsUnallocated() && if (input->IsUnallocated() &&
@ -969,9 +969,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (input->IsUnallocated()) live->Add(input->VirtualRegister()); if (input->IsUnallocated()) live->Add(input->VirtualRegister());
} }
for (int i = 0; i < summary->TempCount(); ++i) { for (TempIterator it(instr); it.HasNext(); it.Advance()) {
LOperand* temp = summary->TempAt(i); LOperand* temp = it.Next();
if (summary->IsCall()) { if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue; if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) { if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp); LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@ -1042,9 +1042,9 @@ void LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() { void LAllocator::MeetRegisterConstraints() {
HPhase phase("Register constraints", chunk()); HPhase phase("Register constraints", chunk_);
first_artificial_register_ = next_virtual_register_; first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) { for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i); HBasicBlock* block = blocks->at(i);
MeetRegisterConstraints(block); MeetRegisterConstraints(block);
@ -1053,10 +1053,10 @@ void LAllocator::MeetRegisterConstraints() {
void LAllocator::ResolvePhis() { void LAllocator::ResolvePhis() {
HPhase phase("Resolve phis", chunk()); HPhase phase("Resolve phis", chunk_);
// Process the blocks in reverse order. // Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) { for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id); HBasicBlock* block = blocks->at(block_id);
ResolvePhis(block); ResolvePhis(block);
@ -1094,7 +1094,7 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
if (!pred_op->Equals(cur_op)) { if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL; LGap* gap = NULL;
if (block->predecessors()->length() == 1) { if (block->predecessors()->length() == 1) {
gap = chunk_->GetGapAt(block->first_instruction_index()); gap = GapAt(block->first_instruction_index());
} else { } else {
ASSERT(pred->end()->SecondSuccessor() == NULL); ASSERT(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred); gap = GetLastGap(pred);
@ -1107,19 +1107,19 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) { LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
int index = pos.InstructionIndex(); int index = pos.InstructionIndex();
if (chunk_->IsGapAt(index)) { if (IsGapAt(index)) {
LGap* gap = chunk_->GetGapAt(index); LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove( return gap->GetOrCreateParallelMove(
pos.IsInstructionStart() ? LGap::START : LGap::END); pos.IsInstructionStart() ? LGap::START : LGap::END);
} }
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1); int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove( return GapAt(gap_pos)->GetOrCreateParallelMove(
(gap_pos < index) ? LGap::AFTER : LGap::BEFORE); (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
} }
HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) { HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex())); LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
return gap->block(); return gap->block();
} }
@ -1166,7 +1166,7 @@ bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
void LAllocator::ResolveControlFlow() { void LAllocator::ResolveControlFlow() {
HPhase phase("Resolve control flow", this); HPhase phase("Resolve control flow", this);
const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) { for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id); HBasicBlock* block = blocks->at(block_id);
if (CanEagerlyResolveControlFlow(block)) continue; if (CanEagerlyResolveControlFlow(block)) continue;
@ -1189,7 +1189,7 @@ void LAllocator::BuildLiveRanges() {
HPhase phase("Build live ranges", this); HPhase phase("Build live ranges", this);
InitializeLivenessAnalysis(); InitializeLivenessAnalysis();
// Process the blocks in reverse order. // Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) { for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id); HBasicBlock* block = blocks->at(block_id);
BitVector* live = ComputeLiveOut(block); BitVector* live = ComputeLiveOut(block);
@ -1264,7 +1264,7 @@ void LAllocator::BuildLiveRanges() {
found = true; found = true;
int operand_index = iterator.Current(); int operand_index = iterator.Current();
PrintF("Function: %s\n", PrintF("Function: %s\n",
*graph()->info()->function()->debug_name()->ToCString()); *graph_->info()->function()->debug_name()->ToCString());
PrintF("Value %d used before first definition!\n", operand_index); PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index); LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value()); PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@ -1469,7 +1469,7 @@ void LAllocator::AllocateRegisters() {
if (current->HasAllocatedSpillOperand()) { if (current->HasAllocatedSpillOperand()) {
TraceAlloc("Live range %d already has a spill operand\n", current->id()); TraceAlloc("Live range %d already has a spill operand\n", current->id());
LifetimePosition next_pos = position; LifetimePosition next_pos = position;
if (chunk_->IsGapAt(next_pos.InstructionIndex())) { if (IsGapAt(next_pos.InstructionIndex())) {
next_pos = next_pos.NextInstruction(); next_pos = next_pos.NextInstruction();
} }
UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos); UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
@ -1556,14 +1556,8 @@ void LAllocator::TraceAlloc(const char* msg, ...) {
} }
void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
operand->set_virtual_register(value->id());
current_summary()->AddInput(operand);
}
bool LAllocator::HasTaggedValue(int virtual_register) const { bool LAllocator::HasTaggedValue(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register); HValue* value = graph_->LookupValue(virtual_register);
if (value == NULL) return false; if (value == NULL) return false;
return value->representation().IsTagged(); return value->representation().IsTagged();
} }
@ -1571,7 +1565,7 @@ bool LAllocator::HasTaggedValue(int virtual_register) const {
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const { RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
if (virtual_register < first_artificial_register_) { if (virtual_register < first_artificial_register_) {
HValue* value = graph()->LookupValue(virtual_register); HValue* value = graph_->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) { if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS; return DOUBLE_REGISTERS;
} }
@ -1584,39 +1578,8 @@ RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
} }
void LAllocator::MarkAsCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
InstructionSummary* summary = current_summary();
#ifdef DEBUG
ASSERT(summary->Output() == NULL ||
LUnallocated::cast(summary->Output())->HasFixedPolicy() ||
!LUnallocated::cast(summary->Output())->HasRegisterPolicy());
for (int i = 0; i < summary->InputCount(); i++) {
ASSERT(LUnallocated::cast(summary->InputAt(i))->HasFixedPolicy() ||
LUnallocated::cast(summary->InputAt(i))->IsUsedAtStart() ||
!LUnallocated::cast(summary->InputAt(i))->HasRegisterPolicy());
}
for (int i = 0; i < summary->TempCount(); i++) {
ASSERT(LUnallocated::cast(summary->TempAt(i))->HasFixedPolicy() ||
!LUnallocated::cast(summary->TempAt(i))->HasRegisterPolicy());
}
#endif
summary->MarkAsCall();
}
void LAllocator::MarkAsSaveDoubles() {
current_summary()->MarkAsSaveDoubles();
}
void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) { void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
operand->set_virtual_register(instr->id()); operand->set_virtual_register(instr->id());
current_summary()->SetOutput(operand);
} }
@ -1625,40 +1588,16 @@ void LAllocator::RecordTemporary(LUnallocated* operand) {
if (!operand->HasFixedPolicy()) { if (!operand->HasFixedPolicy()) {
operand->set_virtual_register(next_virtual_register_++); operand->set_virtual_register(next_virtual_register_++);
} }
current_summary()->AddTemp(operand);
}
int LAllocator::max_initial_value_ids() {
return LUnallocated::kMaxVirtualRegisters / 32;
}
void LAllocator::BeginInstruction() {
if (next_summary_ == NULL) {
next_summary_ = new InstructionSummary();
}
summary_stack_.Add(next_summary_);
next_summary_ = NULL;
} }
void LAllocator::SummarizeInstruction(int index) { void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
InstructionSummary* sum = summary_stack_.RemoveLast(); operand->set_virtual_register(value->id());
if (summaries_.length() <= index) {
summaries_.AddBlock(NULL, index + 1 - summaries_.length());
}
ASSERT(summaries_[index] == NULL);
if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
summaries_[index] = sum;
} else {
next_summary_ = sum;
}
} }
void LAllocator::OmitInstruction() { int LAllocator::max_initial_value_ids() {
summary_stack_.RemoveLast(); return LUnallocated::kMaxVirtualRegisters / 32;
} }
@ -2007,7 +1946,7 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
bool LAllocator::IsBlockBoundary(LifetimePosition pos) { bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() && return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel(); InstructionAt(pos.InstructionIndex())->IsLabel();
} }

118
deps/v8/src/lithium-allocator.h

@ -31,6 +31,7 @@
#include "v8.h" #include "v8.h"
#include "data-flow.h" #include "data-flow.h"
#include "lithium.h"
#include "zone.h" #include "zone.h"
namespace v8 { namespace v8 {
@ -153,52 +154,55 @@ enum RegisterKind {
// A register-allocator view of a Lithium instruction. It contains the id of // A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses. // the output operand and a list of input operand uses.
class InstructionSummary: public ZoneObject {
class LInstruction;
class LEnvironment;
// Iterator for non-null temp operands.
class TempIterator BASE_EMBEDDED {
public: public:
InstructionSummary() inline explicit TempIterator(LInstruction* instr);
: output_operand_(NULL), inline bool HasNext();
input_count_(0), inline LOperand* Next();
operands_(4), inline void Advance();
is_call_(false),
is_save_doubles_(false) {}
// Output operands.
LOperand* Output() const { return output_operand_; }
void SetOutput(LOperand* output) {
ASSERT(output_operand_ == NULL);
output_operand_ = output;
}
// Input operands. private:
int InputCount() const { return input_count_; } inline int AdvanceToNext(int start);
LOperand* InputAt(int i) const { LInstruction* instr_;
ASSERT(i < input_count_); int limit_;
return operands_[i]; int current_;
} };
void AddInput(LOperand* input) {
operands_.InsertAt(input_count_, input);
input_count_++;
}
// Temporary operands.
int TempCount() const { return operands_.length() - input_count_; }
LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
void AddTemp(LOperand* temp) { operands_.Add(temp); }
void MarkAsCall() { is_call_ = true; } // Iterator for non-constant input operands.
bool IsCall() const { return is_call_; } class InputIterator BASE_EMBEDDED {
public:
inline explicit InputIterator(LInstruction* instr);
inline bool HasNext();
inline LOperand* Next();
inline void Advance();
private:
inline int AdvanceToNext(int start);
LInstruction* instr_;
int limit_;
int current_;
};
void MarkAsSaveDoubles() { is_save_doubles_ = true; } class UseIterator BASE_EMBEDDED {
bool IsSaveDoubles() const { return is_save_doubles_; } public:
inline explicit UseIterator(LInstruction* instr);
inline bool HasNext();
inline LOperand* Next();
inline void Advance();
private: private:
LOperand* output_operand_; InputIterator input_iterator_;
int input_count_; DeepIterator env_iterator_;
ZoneList<LOperand*> operands_;
bool is_call_;
bool is_save_doubles_;
}; };
// Representation of the non-empty interval [start,end[. // Representation of the non-empty interval [start,end[.
class UseInterval: public ZoneObject { class UseInterval: public ZoneObject {
public: public:
@ -428,9 +432,6 @@ class LAllocator BASE_EMBEDDED {
public: public:
explicit LAllocator(int first_virtual_register, HGraph* graph) explicit LAllocator(int first_virtual_register, HGraph* graph)
: chunk_(NULL), : chunk_(NULL),
summaries_(0),
next_summary_(NULL),
summary_stack_(2),
live_in_sets_(0), live_in_sets_(0),
live_ranges_(16), live_ranges_(16),
fixed_live_ranges_(8), fixed_live_ranges_(8),
@ -457,27 +458,12 @@ class LAllocator BASE_EMBEDDED {
// Record a temporary operand. // Record a temporary operand.
void RecordTemporary(LUnallocated* operand); void RecordTemporary(LUnallocated* operand);
// Marks the current instruction as a call.
void MarkAsCall();
// Marks the current instruction as requiring saving double registers.
void MarkAsSaveDoubles();
// Checks whether the value of a given virtual register is tagged. // Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const; bool HasTaggedValue(int virtual_register) const;
// Returns the register kind required by the given virtual register. // Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const; RegisterKind RequiredRegisterKind(int virtual_register) const;
// Begin a new instruction.
void BeginInstruction();
// Summarize the current instruction.
void SummarizeInstruction(int index);
// Summarize the current instruction.
void OmitInstruction();
// Control max function size. // Control max function size.
static int max_initial_value_ids(); static int max_initial_value_ids();
@ -525,8 +511,8 @@ class LAllocator BASE_EMBEDDED {
void AddInitialIntervals(HBasicBlock* block, BitVector* live_out); void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
void ProcessInstructions(HBasicBlock* block, BitVector* live); void ProcessInstructions(HBasicBlock* block, BitVector* live);
void MeetRegisterConstraints(HBasicBlock* block); void MeetRegisterConstraints(HBasicBlock* block);
void MeetConstraintsBetween(InstructionSummary* first, void MeetConstraintsBetween(LInstruction* first,
InstructionSummary* second, LInstruction* second,
int gap_index); int gap_index);
void ResolvePhis(HBasicBlock* block); void ResolvePhis(HBasicBlock* block);
@ -604,12 +590,6 @@ class LAllocator BASE_EMBEDDED {
// Return the block which contains give lifetime position. // Return the block which contains give lifetime position.
HBasicBlock* GetBlock(LifetimePosition pos); HBasicBlock* GetBlock(LifetimePosition pos);
// Current active summary.
InstructionSummary* current_summary() const { return summary_stack_.last(); }
// Get summary for given instruction index.
InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
// Helper methods for the fixed registers. // Helper methods for the fixed registers.
int RegisterCount() const; int RegisterCount() const;
static int FixedLiveRangeID(int index) { return -index - 1; } static int FixedLiveRangeID(int index) { return -index - 1; }
@ -618,15 +598,17 @@ class LAllocator BASE_EMBEDDED {
LiveRange* FixedDoubleLiveRangeFor(int index); LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index); LiveRange* LiveRangeFor(int index);
HPhi* LookupPhi(LOperand* operand) const; HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block) const; LGap* GetLastGap(HBasicBlock* block);
const char* RegisterName(int allocation_index); const char* RegisterName(int allocation_index);
LChunk* chunk_; inline bool IsGapAt(int index);
ZoneList<InstructionSummary*> summaries_;
InstructionSummary* next_summary_;
ZoneList<InstructionSummary*> summary_stack_; inline LInstruction* InstructionAt(int index);
inline LGap* GapAt(int index);
LChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets // During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed. // for blocks already analyzed.

76
deps/v8/src/lithium.h

@ -509,6 +509,82 @@ class LEnvironment: public ZoneObject {
friend class LCodegen; friend class LCodegen;
}; };
// Iterates over the non-null, non-constant operands in an environment.
class ShallowIterator BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
limit_(env != NULL ? env->values()->length() : 0),
current_(0) {
current_ = AdvanceToNext(0);
}
inline bool HasNext() {
return env_ != NULL && current_ < limit_;
}
inline LOperand* Next() {
ASSERT(HasNext());
return env_->values()->at(current_);
}
inline void Advance() {
current_ = AdvanceToNext(current_ + 1);
}
inline LEnvironment* env() { return env_; }
private:
inline int AdvanceToNext(int start) {
while (start < limit_ &&
(env_->values()->at(start) == NULL ||
env_->values()->at(start)->IsConstantOperand())) {
start++;
}
return start;
}
LEnvironment* env_;
int limit_;
int current_;
};
// Iterator for non-null, non-constant operands incl. outer environments.
class DeepIterator BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) { }
inline bool HasNext() {
if (current_iterator_.HasNext()) return true;
if (current_iterator_.env() == NULL) return false;
AdvanceToOuter();
return current_iterator_.HasNext();
}
inline LOperand* Next() {
ASSERT(current_iterator_.HasNext());
return current_iterator_.Next();
}
inline void Advance() {
if (current_iterator_.HasNext()) {
current_iterator_.Advance();
} else {
AdvanceToOuter();
}
}
private:
inline void AdvanceToOuter() {
current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
}
ShallowIterator current_iterator_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_LITHIUM_H_ #endif // V8_LITHIUM_H_

9
deps/v8/src/messages.js

@ -38,10 +38,6 @@ var COMPILATION_TYPE_HOST = 0;
var COMPILATION_TYPE_EVAL = 1; var COMPILATION_TYPE_EVAL = 1;
var COMPILATION_TYPE_JSON = 2; var COMPILATION_TYPE_JSON = 2;
// Lazily initialized.
var kVowelSounds = 0;
var kCapitalVowelSounds = 0;
// Matches Messages::kNoLineNumberInfo from v8.h // Matches Messages::kNoLineNumberInfo from v8.h
var kNoLineNumberInfo = 0; var kNoLineNumberInfo = 0;
@ -52,8 +48,7 @@ var kAddMessageAccessorsMarker = { };
var kMessages = 0; var kMessages = 0;
var kReplacementMarkers = var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
[ "%0", "%1", "%2", "%3" ]
function FormatString(format, message) { function FormatString(format, message) {
var args = %MessageGetArguments(message); var args = %MessageGetArguments(message);
@ -152,6 +147,7 @@ function FormatMessage(message) {
unexpected_token_number: ["Unexpected number"], unexpected_token_number: ["Unexpected number"],
unexpected_token_string: ["Unexpected string"], unexpected_token_string: ["Unexpected string"],
unexpected_token_identifier: ["Unexpected identifier"], unexpected_token_identifier: ["Unexpected identifier"],
unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
unexpected_eos: ["Unexpected end of input"], unexpected_eos: ["Unexpected end of input"],
malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"], malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
unterminated_regexp: ["Invalid regular expression: missing /"], unterminated_regexp: ["Invalid regular expression: missing /"],
@ -226,6 +222,7 @@ function FormatMessage(message) {
strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"], strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"], strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"], strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
strict_reserved_word: ["Use of future reserved word in strict mode"],
}; };
} }
var message_type = %MessageGetType(message); var message_type = %MessageGetType(message);

28
deps/v8/src/objects-inl.h

@ -2510,29 +2510,29 @@ void Code::set_stack_slots(unsigned slots) {
} }
unsigned Code::safepoint_table_start() { unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION); ASSERT(kind() == OPTIMIZED_FUNCTION);
return READ_UINT32_FIELD(this, kSafepointTableStartOffset); return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
} }
void Code::set_safepoint_table_start(unsigned offset) { void Code::set_safepoint_table_offset(unsigned offset) {
ASSERT(kind() == OPTIMIZED_FUNCTION); ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize))); ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset); WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
} }
unsigned Code::stack_check_table_start() { unsigned Code::stack_check_table_offset() {
ASSERT(kind() == FUNCTION); ASSERT(kind() == FUNCTION);
return READ_UINT32_FIELD(this, kStackCheckTableStartOffset); return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
} }
void Code::set_stack_check_table_start(unsigned offset) { void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT(kind() == FUNCTION); ASSERT(kind() == FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize))); ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset); WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
} }
@ -2993,6 +2993,18 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
} }
bool SharedFunctionInfo::strict_mode() {
return BooleanBit::get(compiler_hints(), kStrictModeFunction);
}
void SharedFunctionInfo::set_strict_mode(bool value) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kStrictModeFunction,
value));
}
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset) ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset) ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)

55
deps/v8/src/objects.cc

@ -1213,6 +1213,8 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
MaybeObject* JSObject::AddFastProperty(String* name, MaybeObject* JSObject::AddFastProperty(String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes) {
ASSERT(!IsJSGlobalProxy());
// Normalize the object if the name is an actual string (not the // Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier. // hidden symbols) and is not a real identifier.
StringInputBuffer buffer(name); StringInputBuffer buffer(name);
@ -2288,6 +2290,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
// The global object is always normalized. // The global object is always normalized.
ASSERT(!IsGlobalObject()); ASSERT(!IsGlobalObject());
// JSGlobalProxy must never be normalized
ASSERT(!IsJSGlobalProxy());
// Allocate new content. // Allocate new content.
int property_count = map()->NumberOfDescribedProperties(); int property_count = map()->NumberOfDescribedProperties();
if (expected_additional_properties > 0) { if (expected_additional_properties > 0) {
@ -5920,7 +5925,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
Handle<Object> p = it.rinfo()->target_object_handle(origin); Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p); it.rinfo()->set_target_object(*p);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle(); Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell); it.rinfo()->set_target_cell(*cell);
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct // rewrite code handles in inline cache targets to direct
@ -6001,7 +6006,7 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
void Code::SetNoStackCheckTable() { void Code::SetNoStackCheckTable() {
// Indicate the absence of a stack-check table by a table start after the // Indicate the absence of a stack-check table by a table start after the
// end of the instructions. Table start must be aligned, so round up. // end of the instructions. Table start must be aligned, so round up.
set_stack_check_table_start(RoundUp(instruction_size(), kIntSize)); set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
} }
@ -6278,7 +6283,7 @@ void Code::Disassemble(const char* name, FILE* out) {
} }
PrintF(out, "\n"); PrintF(out, "\n");
} else if (kind() == FUNCTION) { } else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_start(); unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after // If there is no stack check table, the "table start" will at or after
// (due to alignment) the end of the instruction stream. // (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) { if (static_cast<int>(offset) < instruction_size()) {
@ -6679,6 +6684,13 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
return UNDEFINED_ELEMENT; return UNDEFINED_ELEMENT;
} }
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return UNDEFINED_ELEMENT;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->HasLocalElement(index);
}
// Check for lookup interceptor // Check for lookup interceptor
if (HasIndexedInterceptor()) { if (HasIndexedInterceptor()) {
return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
@ -7986,20 +7998,28 @@ class StringKey : public HashTableKey {
// StringSharedKeys are used as keys in the eval cache. // StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey { class StringSharedKey : public HashTableKey {
public: public:
StringSharedKey(String* source, SharedFunctionInfo* shared) StringSharedKey(String* source,
: source_(source), shared_(shared) { } SharedFunctionInfo* shared,
StrictModeFlag strict_mode)
: source_(source),
shared_(shared),
strict_mode_(strict_mode) { }
bool IsMatch(Object* other) { bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false; if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other); FixedArray* pair = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0)); SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
if (shared != shared_) return false; if (shared != shared_) return false;
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
Smi::cast(pair->get(2))->value());
if (strict_mode != strict_mode_) return false;
String* source = String::cast(pair->get(1)); String* source = String::cast(pair->get(1));
return source->Equals(source_); return source->Equals(source_);
} }
static uint32_t StringSharedHashHelper(String* source, static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared) { SharedFunctionInfo* shared,
StrictModeFlag strict_mode) {
uint32_t hash = source->Hash(); uint32_t hash = source->Hash();
if (shared->HasSourceCode()) { if (shared->HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash // Instead of using the SharedFunctionInfo pointer in the hash
@ -8009,36 +8029,41 @@ class StringSharedKey : public HashTableKey {
// collection. // collection.
Script* script = Script::cast(shared->script()); Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash(); hash ^= String::cast(script->source())->Hash();
if (strict_mode == kStrictMode) hash ^= 0x8000;
hash += shared->start_position(); hash += shared->start_position();
} }
return hash; return hash;
} }
uint32_t Hash() { uint32_t Hash() {
return StringSharedHashHelper(source_, shared_); return StringSharedHashHelper(source_, shared_, strict_mode_);
} }
uint32_t HashForObject(Object* obj) { uint32_t HashForObject(Object* obj) {
FixedArray* pair = FixedArray::cast(obj); FixedArray* pair = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0)); SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
String* source = String::cast(pair->get(1)); String* source = String::cast(pair->get(1));
return StringSharedHashHelper(source, shared); StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
Smi::cast(pair->get(2))->value());
return StringSharedHashHelper(source, shared, strict_mode);
} }
MUST_USE_RESULT MaybeObject* AsObject() { MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj; Object* obj;
{ MaybeObject* maybe_obj = Heap::AllocateFixedArray(2); { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
FixedArray* pair = FixedArray::cast(obj); FixedArray* pair = FixedArray::cast(obj);
pair->set(0, shared_); pair->set(0, shared_);
pair->set(1, source_); pair->set(1, source_);
pair->set(2, Smi::FromInt(strict_mode_));
return pair; return pair;
} }
private: private:
String* source_; String* source_;
SharedFunctionInfo* shared_; SharedFunctionInfo* shared_;
StrictModeFlag strict_mode_;
}; };
@ -8997,8 +9022,10 @@ Object* CompilationCacheTable::Lookup(String* src) {
} }
Object* CompilationCacheTable::LookupEval(String* src, Context* context) { Object* CompilationCacheTable::LookupEval(String* src,
StringSharedKey key(src, context->closure()->shared()); Context* context,
StrictModeFlag strict_mode) {
StringSharedKey key(src, context->closure()->shared(), strict_mode);
int entry = FindEntry(&key); int entry = FindEntry(&key);
if (entry == kNotFound) return Heap::undefined_value(); if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1); return get(EntryToIndex(entry) + 1);
@ -9033,8 +9060,10 @@ MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
MaybeObject* CompilationCacheTable::PutEval(String* src, MaybeObject* CompilationCacheTable::PutEval(String* src,
Context* context, Context* context,
Object* value) { SharedFunctionInfo* value) {
StringSharedKey key(src, context->closure()->shared()); StringSharedKey key(src,
context->closure()->shared(),
value->strict_mode() ? kStrictMode : kNonStrictMode);
Object* obj; Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key); { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;

23
deps/v8/src/objects.h

@ -3275,13 +3275,13 @@ class Code: public HeapObject {
// [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
// the instruction stream where the safepoint table starts. // the instruction stream where the safepoint table starts.
inline unsigned safepoint_table_start(); inline unsigned safepoint_table_offset();
inline void set_safepoint_table_start(unsigned offset); inline void set_safepoint_table_offset(unsigned offset);
// [stack_check_table_start]: For kind FUNCTION, the offset in the // [stack_check_table_start]: For kind FUNCTION, the offset in the
// instruction stream where the stack check table starts. // instruction stream where the stack check table starts.
inline unsigned stack_check_table_start(); inline unsigned stack_check_table_offset();
inline void set_stack_check_table_start(unsigned offset); inline void set_stack_check_table_offset(unsigned offset);
// [check type]: For kind CALL_IC, tells how to check if the // [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call. // receiver is valid for the given call.
@ -3445,8 +3445,8 @@ class Code: public HeapObject {
static const int kAllowOSRAtLoopNestingLevelOffset = static const int kAllowOSRAtLoopNestingLevelOffset =
kHasDeoptimizationSupportOffset + 1; kHasDeoptimizationSupportOffset + 1;
static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize; static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize; static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
// Flags layout. // Flags layout.
static const int kFlagsICStateShift = 0; static const int kFlagsICStateShift = 0;
@ -4176,6 +4176,10 @@ class SharedFunctionInfo: public HeapObject {
inline bool optimization_disabled(); inline bool optimization_disabled();
inline void set_optimization_disabled(bool value); inline void set_optimization_disabled(bool value);
// Indicates whether the function is a strict mode function.
inline bool strict_mode();
inline void set_strict_mode(bool value);
// Indicates whether or not the code in the shared function support // Indicates whether or not the code in the shared function support
// deoptimization. // deoptimization.
inline bool has_deoptimization_support(); inline bool has_deoptimization_support();
@ -4357,6 +4361,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kCodeAgeShift = 4; static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 0x7; static const int kCodeAgeMask = 0x7;
static const int kOptimizationDisabled = 7; static const int kOptimizationDisabled = 7;
static const int kStrictModeFunction = 8;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo); DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
}; };
@ -4902,10 +4907,12 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
public: public:
// Find cached value for a string key, otherwise return null. // Find cached value for a string key, otherwise return null.
Object* Lookup(String* src); Object* Lookup(String* src);
Object* LookupEval(String* src, Context* context); Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
Object* LookupRegExp(String* source, JSRegExp::Flags flags); Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MaybeObject* Put(String* src, Object* value); MaybeObject* Put(String* src, Object* value);
MaybeObject* PutEval(String* src, Context* context, Object* value); MaybeObject* PutEval(String* src,
Context* context,
SharedFunctionInfo* value);
MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value); MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
// Remove given value from cache. // Remove given value from cache.

126
deps/v8/src/parser.cc

@ -616,7 +616,8 @@ Parser::Parser(Handle<Script> script,
FunctionLiteral* Parser::ParseProgram(Handle<String> source, FunctionLiteral* Parser::ParseProgram(Handle<String> source,
bool in_global_context) { bool in_global_context,
StrictModeFlag strict_mode) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT); CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse); HistogramTimerScope timer(&Counters::parse);
@ -632,17 +633,18 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
ExternalTwoByteStringUC16CharacterStream stream( ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length()); Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream); scanner_.Initialize(&stream);
return DoParseProgram(source, in_global_context, &zone_scope); return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
} else { } else {
GenericStringUC16CharacterStream stream(source, 0, source->length()); GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream); scanner_.Initialize(&stream);
return DoParseProgram(source, in_global_context, &zone_scope); return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
} }
} }
FunctionLiteral* Parser::DoParseProgram(Handle<String> source, FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
bool in_global_context, bool in_global_context,
StrictModeFlag strict_mode,
ZoneScope* zone_scope) { ZoneScope* zone_scope) {
ASSERT(target_stack_ == NULL); ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize(); if (pre_data_ != NULL) pre_data_->Initialize();
@ -662,6 +664,9 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_, LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
scope); scope);
TemporaryScope temp_scope(&this->temp_scope_); TemporaryScope temp_scope(&this->temp_scope_);
if (strict_mode == kStrictMode) {
temp_scope.EnableStrictMode();
}
ZoneList<Statement*>* body = new ZoneList<Statement*>(16); ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true; bool ok = true;
int beg_loc = scanner().location().beg_pos; int beg_loc = scanner().location().beg_pos;
@ -747,10 +752,16 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
scope); scope);
TemporaryScope temp_scope(&this->temp_scope_); TemporaryScope temp_scope(&this->temp_scope_);
if (info->strict_mode()) {
temp_scope.EnableStrictMode();
}
FunctionLiteralType type = FunctionLiteralType type =
info->is_expression() ? EXPRESSION : DECLARATION; info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true; bool ok = true;
result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok); result = ParseFunctionLiteral(name,
false, // Strict mode name already checked.
RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree. // Make sure the results agree.
ASSERT(ok == (result != NULL)); ASSERT(ok == (result != NULL));
// The only errors should be stack overflows. // The only errors should be stack overflows.
@ -1439,8 +1450,10 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) {
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}' // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK); Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos; int function_token_position = scanner().location().beg_pos;
Handle<String> name = ParseIdentifier(CHECK_OK); bool is_reserved = false;
Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name, FunctionLiteral* fun = ParseFunctionLiteral(name,
is_reserved,
function_token_position, function_token_position,
DECLARATION, DECLARATION,
CHECK_OK); CHECK_OK);
@ -1699,7 +1712,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// ExpressionStatement | LabelledStatement :: // ExpressionStatement | LabelledStatement ::
// Expression ';' // Expression ';'
// Identifier ':' Statement // Identifier ':' Statement
bool starts_with_idenfifier = (peek() == Token::IDENTIFIER); bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK); Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr && if (peek() == Token::COLON && starts_with_idenfifier && expr &&
expr->AsVariableProxy() != NULL && expr->AsVariableProxy() != NULL &&
@ -2688,9 +2701,12 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expect(Token::FUNCTION, CHECK_OK); Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos; int function_token_position = scanner().location().beg_pos;
Handle<String> name; Handle<String> name;
if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK); bool is_reserved_name = false;
result = ParseFunctionLiteral(name, function_token_position, if (peek_any_identifier()) {
NESTED, CHECK_OK); name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
}
result = ParseFunctionLiteral(name, is_reserved_name,
function_token_position, NESTED, CHECK_OK);
} else { } else {
result = ParsePrimaryExpression(CHECK_OK); result = ParsePrimaryExpression(CHECK_OK);
} }
@ -2759,6 +2775,11 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
case Token::IDENTIFIER: case Token::IDENTIFIER:
return ReportMessage("unexpected_token_identifier", return ReportMessage("unexpected_token_identifier",
Vector<const char*>::empty()); Vector<const char*>::empty());
case Token::FUTURE_RESERVED_WORD:
return ReportMessage(temp_scope_->StrictMode() ?
"unexpected_strict_reserved" :
"unexpected_token_identifier",
Vector<const char*>::empty());
default: default:
const char* name = Token::String(token); const char* name = Token::String(token);
ASSERT(name != NULL); ASSERT(name != NULL);
@ -2814,7 +2835,8 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
result = new Literal(Factory::false_value()); result = new Literal(Factory::false_value());
break; break;
case Token::IDENTIFIER: { case Token::IDENTIFIER:
case Token::FUTURE_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK); Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name); if (fni_ != NULL) fni_->PushVariableName(name);
result = top_scope_->NewUnresolved(name, inside_with()); result = top_scope_->NewUnresolved(name, inside_with());
@ -3221,6 +3243,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
Token::Value next = Next(); Token::Value next = Next();
bool is_keyword = Token::IsKeyword(next); bool is_keyword = Token::IsKeyword(next);
if (next == Token::IDENTIFIER || next == Token::NUMBER || if (next == Token::IDENTIFIER || next == Token::NUMBER ||
next == Token::FUTURE_RESERVED_WORD ||
next == Token::STRING || is_keyword) { next == Token::STRING || is_keyword) {
Handle<String> name; Handle<String> name;
if (is_keyword) { if (is_keyword) {
@ -3230,6 +3253,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
} }
FunctionLiteral* value = FunctionLiteral* value =
ParseFunctionLiteral(name, ParseFunctionLiteral(name,
false, // reserved words are allowed here
RelocInfo::kNoPosition, RelocInfo::kNoPosition,
DECLARATION, DECLARATION,
CHECK_OK); CHECK_OK);
@ -3272,6 +3296,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Scanner::Location loc = scanner().peek_location(); Scanner::Location loc = scanner().peek_location();
switch (next) { switch (next) {
case Token::FUTURE_RESERVED_WORD:
case Token::IDENTIFIER: { case Token::IDENTIFIER: {
bool is_getter = false; bool is_getter = false;
bool is_setter = false; bool is_setter = false;
@ -3420,6 +3445,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
int function_token_position, int function_token_position,
FunctionLiteralType type, FunctionLiteralType type,
bool* ok) { bool* ok) {
@ -3453,10 +3479,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int start_pos = scanner().location().beg_pos; int start_pos = scanner().location().beg_pos;
Scanner::Location name_loc = Scanner::NoLocation(); Scanner::Location name_loc = Scanner::NoLocation();
Scanner::Location dupe_loc = Scanner::NoLocation(); Scanner::Location dupe_loc = Scanner::NoLocation();
Scanner::Location reserved_loc = Scanner::NoLocation();
bool done = (peek() == Token::RPAREN); bool done = (peek() == Token::RPAREN);
while (!done) { while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK); bool is_reserved = false;
Handle<String> param_name =
ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
// Store locations for possible future error reports. // Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) { if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
@ -3465,6 +3494,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) { if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
dupe_loc = scanner().location(); dupe_loc = scanner().location();
} }
if (!reserved_loc.IsValid() && is_reserved) {
reserved_loc = scanner().location();
}
Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR); Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
top_scope_->AddParameter(parameter); top_scope_->AddParameter(parameter);
@ -3545,7 +3577,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int position = function_token_position != RelocInfo::kNoPosition int position = function_token_position != RelocInfo::kNoPosition
? function_token_position ? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos); : (start_pos > 0 ? start_pos - 1 : start_pos);
ReportMessageAt(Scanner::Location(position, start_pos), Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location,
"strict_function_name", Vector<const char*>::empty()); "strict_function_name", Vector<const char*>::empty());
*ok = false; *ok = false;
return NULL; return NULL;
@ -3562,6 +3595,22 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
*ok = false; *ok = false;
return NULL; return NULL;
} }
if (name_is_reserved) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location, "strict_reserved_word",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
ReportMessageAt(reserved_loc, "strict_reserved_word",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
CheckOctalLiteral(start_pos, end_pos, CHECK_OK); CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
} }
@ -3633,6 +3682,13 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
} }
bool Parser::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
next == Token::FUTURE_RESERVED_WORD;
}
void Parser::Consume(Token::Value token) { void Parser::Consume(Token::Value token) {
Token::Value next = Next(); Token::Value next = Next();
USE(next); USE(next);
@ -3692,7 +3748,22 @@ Literal* Parser::GetLiteralNumber(double value) {
Handle<String> Parser::ParseIdentifier(bool* ok) { Handle<String> Parser::ParseIdentifier(bool* ok) {
Expect(Token::IDENTIFIER, ok); bool is_reserved;
return ParseIdentifierOrReservedWord(&is_reserved, ok);
}
Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
bool* ok) {
*is_reserved = false;
if (temp_scope_->StrictMode()) {
Expect(Token::IDENTIFIER, ok);
} else {
if (!Check(Token::IDENTIFIER)) {
Expect(Token::FUTURE_RESERVED_WORD, ok);
*is_reserved = true;
}
}
if (!*ok) return Handle<String>(); if (!*ok) return Handle<String>();
return GetSymbol(ok); return GetSymbol(ok);
} }
@ -3700,7 +3771,9 @@ Handle<String> Parser::ParseIdentifier(bool* ok) {
Handle<String> Parser::ParseIdentifierName(bool* ok) { Handle<String> Parser::ParseIdentifierName(bool* ok) {
Token::Value next = Next(); Token::Value next = Next();
if (next != Token::IDENTIFIER && !Token::IsKeyword(next)) { if (next != Token::IDENTIFIER &&
next != Token::FUTURE_RESERVED_WORD &&
!Token::IsKeyword(next)) {
ReportUnexpectedToken(next); ReportUnexpectedToken(next);
*ok = false; *ok = false;
return Handle<String>(); return Handle<String>();
@ -3740,20 +3813,18 @@ void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
// This function reads an identifier and determines whether or not it // This function reads an identifier and determines whether or not it
// is 'get' or 'set'. The reason for not using ParseIdentifier and // is 'get' or 'set'.
// checking on the output is that this involves heap allocation which
// we can't do during preparsing.
Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get, Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set, bool* is_set,
bool* ok) { bool* ok) {
Expect(Token::IDENTIFIER, ok); Handle<String> result = ParseIdentifier(ok);
if (!*ok) return Handle<String>(); if (!*ok) return Handle<String>();
if (scanner().is_literal_ascii() && scanner().literal_length() == 3) { if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
const char* token = scanner().literal_ascii_string().start(); const char* token = scanner().literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0; *is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0; *is_set = !*is_get && strncmp(token, "set", 3) == 0;
} }
return GetSymbol(ok); return result;
} }
@ -3895,6 +3966,7 @@ Handle<Object> JsonParser::ParseJson(Handle<String> script,
message = "unexpected_token_string"; message = "unexpected_token_string";
break; break;
case Token::IDENTIFIER: case Token::IDENTIFIER:
case Token::FUTURE_RESERVED_WORD:
message = "unexpected_token_identifier"; message = "unexpected_token_identifier";
break; break;
default: default:
@ -3941,16 +4013,10 @@ Handle<String> JsonParser::GetString() {
Handle<Object> JsonParser::ParseJsonValue() { Handle<Object> JsonParser::ParseJsonValue() {
Token::Value token = scanner_.Next(); Token::Value token = scanner_.Next();
switch (token) { switch (token) {
case Token::STRING: { case Token::STRING:
return GetString(); return GetString();
} case Token::NUMBER:
case Token::NUMBER: { return Factory::NewNumber(scanner_.number());
ASSERT(scanner_.is_literal_ascii());
double value = StringToDouble(scanner_.literal_ascii_string(),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
return Factory::NewNumber(value);
}
case Token::FALSE_LITERAL: case Token::FALSE_LITERAL:
return Factory::false_value(); return Factory::false_value();
case Token::TRUE_LITERAL: case Token::TRUE_LITERAL:
@ -5024,7 +5090,9 @@ bool ParserApi::Parse(CompilationInfo* info) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
} else { } else {
Handle<String> source = Handle<String>(String::cast(script->source())); Handle<String> source = Handle<String>(String::cast(script->source()));
result = parser.ParseProgram(source, info->is_global()); result = parser.ParseProgram(source,
info->is_global(),
info->StrictMode());
} }
} }

8
deps/v8/src/parser.h

@ -423,7 +423,8 @@ class Parser {
// Returns NULL if parsing failed. // Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source, FunctionLiteral* ParseProgram(Handle<String> source,
bool in_global_context); bool in_global_context,
StrictModeFlag strict_mode);
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info); FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
@ -446,6 +447,7 @@ class Parser {
// Called by ParseProgram after setting up the scanner. // Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source, FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context, bool in_global_context,
StrictModeFlag strict_mode,
ZoneScope* zone_scope); ZoneScope* zone_scope);
// Report syntax error // Report syntax error
@ -546,6 +548,7 @@ class Parser {
ZoneList<Expression*>* ParseArguments(bool* ok); ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name, FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
int function_token_position, int function_token_position,
FunctionLiteralType type, FunctionLiteralType type,
bool* ok); bool* ok);
@ -575,6 +578,8 @@ class Parser {
return scanner().Next(); return scanner().Next();
} }
bool peek_any_identifier();
INLINE(void Consume(Token::Value token)); INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok); void Expect(Token::Value token, bool* ok);
bool Check(Token::Value token); bool Check(Token::Value token);
@ -608,6 +613,7 @@ class Parser {
Literal* GetLiteralNumber(double value); Literal* GetLiteralNumber(double value);
Handle<String> ParseIdentifier(bool* ok); Handle<String> ParseIdentifier(bool* ok);
Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
Handle<String> ParseIdentifierName(bool* ok); Handle<String> ParseIdentifierName(bool* ok);
Handle<String> ParseIdentifierOrGetOrSet(bool* is_get, Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set, bool* is_set,

31
deps/v8/src/preparser.cc

@ -83,6 +83,7 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
return ReportMessageAt(source_location.beg_pos, source_location.end_pos, return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_string", NULL); "unexpected_token_string", NULL);
case i::Token::IDENTIFIER: case i::Token::IDENTIFIER:
case i::Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location.beg_pos, source_location.end_pos, return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_identifier", NULL); "unexpected_token_identifier", NULL);
default: default:
@ -790,7 +791,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
Expression result = kUnknownExpression; Expression result = kUnknownExpression;
if (peek() == i::Token::FUNCTION) { if (peek() == i::Token::FUNCTION) {
Consume(i::Token::FUNCTION); Consume(i::Token::FUNCTION);
if (peek() == i::Token::IDENTIFIER) { if (peek_any_identifier()) {
ParseIdentifier(CHECK_OK); ParseIdentifier(CHECK_OK);
} }
result = ParseFunctionLiteral(CHECK_OK); result = ParseFunctionLiteral(CHECK_OK);
@ -858,7 +859,8 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
break; break;
} }
case i::Token::IDENTIFIER: { case i::Token::IDENTIFIER:
case i::Token::FUTURE_RESERVED_WORD: {
ParseIdentifier(CHECK_OK); ParseIdentifier(CHECK_OK);
result = kIdentifierExpression; result = kIdentifierExpression;
break; break;
@ -946,7 +948,8 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
while (peek() != i::Token::RBRACE) { while (peek() != i::Token::RBRACE) {
i::Token::Value next = peek(); i::Token::Value next = peek();
switch (next) { switch (next) {
case i::Token::IDENTIFIER: { case i::Token::IDENTIFIER:
case i::Token::FUTURE_RESERVED_WORD: {
bool is_getter = false; bool is_getter = false;
bool is_setter = false; bool is_setter = false;
ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK); ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
@ -954,6 +957,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
i::Token::Value name = Next(); i::Token::Value name = Next();
bool is_keyword = i::Token::IsKeyword(name); bool is_keyword = i::Token::IsKeyword(name);
if (name != i::Token::IDENTIFIER && if (name != i::Token::IDENTIFIER &&
name != i::Token::FUTURE_RESERVED_WORD &&
name != i::Token::NUMBER && name != i::Token::NUMBER &&
name != i::Token::STRING && name != i::Token::STRING &&
!is_keyword) { !is_keyword) {
@ -1151,7 +1155,9 @@ PreParser::Expression PreParser::GetStringSymbol() {
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) { PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
Expect(i::Token::IDENTIFIER, ok); if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
Expect(i::Token::IDENTIFIER, ok);
}
if (!*ok) return kUnknownIdentifier; if (!*ok) return kUnknownIdentifier;
return GetIdentifierSymbol(); return GetIdentifierSymbol();
} }
@ -1166,7 +1172,8 @@ PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
i::StrLength(keyword))); i::StrLength(keyword)));
return kUnknownExpression; return kUnknownExpression;
} }
if (next == i::Token::IDENTIFIER) { if (next == i::Token::IDENTIFIER ||
next == i::Token::FUTURE_RESERVED_WORD) {
return GetIdentifierSymbol(); return GetIdentifierSymbol();
} }
*ok = false; *ok = false;
@ -1175,19 +1182,23 @@ PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
// This function reads an identifier and determines whether or not it // This function reads an identifier and determines whether or not it
// is 'get' or 'set'. The reason for not using ParseIdentifier and // is 'get' or 'set'.
// checking on the output is that this involves heap allocation which
// we can't do during preparsing.
PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get, PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set, bool* is_set,
bool* ok) { bool* ok) {
Expect(i::Token::IDENTIFIER, CHECK_OK); PreParser::Identifier result = ParseIdentifier(CHECK_OK);
if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) { if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
const char* token = scanner_->literal_ascii_string().start(); const char* token = scanner_->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0; *is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0; *is_set = !*is_get && strncmp(token, "set", 3) == 0;
} }
return GetIdentifierSymbol(); return result;
}
bool PreParser::peek_any_identifier() {
i::Token::Value next = peek();
return next == i::Token::IDENTIFIER ||
next == i::Token::FUTURE_RESERVED_WORD;
} }
#undef CHECK_OK #undef CHECK_OK

2
deps/v8/src/preparser.h

@ -243,6 +243,8 @@ class PreParser {
return scanner_->Next(); return scanner_->Next();
} }
bool peek_any_identifier();
void Consume(i::Token::Value token) { Next(); } void Consume(i::Token::Value token) { Next(); }
void Expect(i::Token::Value token, bool* ok) { void Expect(i::Token::Value token, bool* ok) {

25
deps/v8/src/prettyprinter.cc

@ -297,13 +297,13 @@ void PrettyPrinter::VisitSlot(Slot* node) {
Print("parameter[%d]", node->index()); Print("parameter[%d]", node->index());
break; break;
case Slot::LOCAL: case Slot::LOCAL:
Print("frame[%d]", node->index()); Print("local[%d]", node->index());
break; break;
case Slot::CONTEXT: case Slot::CONTEXT:
Print(".context[%d]", node->index()); Print("context[%d]", node->index());
break; break;
case Slot::LOOKUP: case Slot::LOOKUP:
Print(".context["); Print("lookup[");
PrintLiteral(node->var()->name(), false); PrintLiteral(node->var()->name(), false);
Print("]"); Print("]");
break; break;
@ -999,24 +999,7 @@ void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
void AstPrinter::VisitSlot(Slot* node) { void AstPrinter::VisitSlot(Slot* node) {
PrintIndented("SLOT "); PrintIndented("SLOT ");
switch (node->type()) { PrettyPrinter::VisitSlot(node);
case Slot::PARAMETER:
Print("parameter[%d]", node->index());
break;
case Slot::LOCAL:
Print("frame[%d]", node->index());
break;
case Slot::CONTEXT:
Print(".context[%d]", node->index());
break;
case Slot::LOOKUP:
Print(".context[");
PrintLiteral(node->var()->name(), false);
Print("]");
break;
default:
UNREACHABLE();
}
Print("\n"); Print("\n");
} }

192
deps/v8/src/runtime.cc

@ -644,6 +644,90 @@ static void GetOwnPropertyImplementation(JSObject* obj,
} }
static bool CheckAccessException(LookupResult* result,
v8::AccessType access_type) {
if (result->type() == CALLBACKS) {
Object* callback = result->GetCallbackObject();
if (callback->IsAccessorInfo()) {
AccessorInfo* info = AccessorInfo::cast(callback);
bool can_access =
(access_type == v8::ACCESS_HAS &&
(info->all_can_read() || info->all_can_write())) ||
(access_type == v8::ACCESS_GET && info->all_can_read()) ||
(access_type == v8::ACCESS_SET && info->all_can_write());
return can_access;
}
}
return false;
}
static bool CheckAccess(JSObject* obj,
String* name,
LookupResult* result,
v8::AccessType access_type) {
ASSERT(result->IsProperty());
JSObject* holder = result->holder();
JSObject* current = obj;
while (true) {
if (current->IsAccessCheckNeeded() &&
!Top::MayNamedAccess(current, name, access_type)) {
// Access check callback denied the access, but some properties
// can have a special permissions which override callbacks descision
// (currently see v8::AccessControl).
break;
}
if (current == holder) {
return true;
}
current = JSObject::cast(current->GetPrototype());
}
// API callbacks can have per callback access exceptions.
switch (result->type()) {
case CALLBACKS: {
if (CheckAccessException(result, access_type)) {
return true;
}
break;
}
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
holder->LookupRealNamedProperty(name, result);
if (result->IsProperty()) {
if (CheckAccessException(result, access_type)) {
return true;
}
}
break;
}
default:
break;
}
Top::ReportFailedAccessCheck(current, access_type);
return false;
}
// TODO(1095): we should traverse hidden prototype hierachy as well.
static bool CheckElementAccess(JSObject* obj,
uint32_t index,
v8::AccessType access_type) {
if (obj->IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(obj, index, access_type)) {
return false;
}
return true;
}
// Enumerator used as indices into the array returned from GetOwnProperty // Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices { enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX, IS_ACCESSOR_INDEX,
@ -686,7 +770,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
// subsequent cases. // subsequent cases.
Handle<JSValue> js_value = Handle<JSValue>::cast(obj); Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
Handle<String> str(String::cast(js_value->value())); Handle<String> str(String::cast(js_value->value()));
Handle<String> substr = SubString(str, index, index+1, NOT_TENURED); Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, *substr); elms->set(VALUE_INDEX, *substr);
@ -699,8 +783,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
case JSObject::INTERCEPTED_ELEMENT: case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: { case JSObject::FAST_ELEMENT: {
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
Handle<Object> element = GetElement(Handle<Object>(obj), index); elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(VALUE_INDEX, *element);
elms->set(WRITABLE_INDEX, Heap::true_value()); elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value()); elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value()); elms->set(CONFIGURABLE_INDEX, Heap::true_value());
@ -708,7 +791,14 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
} }
case JSObject::DICTIONARY_ELEMENT: { case JSObject::DICTIONARY_ELEMENT: {
NumberDictionary* dictionary = obj->element_dictionary(); Handle<JSObject> holder = obj;
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
if (proto->IsNull()) return Heap::undefined_value();
ASSERT(proto->IsJSGlobalObject());
holder = Handle<JSObject>(JSObject::cast(proto));
}
NumberDictionary* dictionary = holder->element_dictionary();
int entry = dictionary->FindEntry(index); int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound); ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry); PropertyDetails details = dictionary->DetailsAt(entry);
@ -718,14 +808,18 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
FixedArray* callbacks = FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry)); FixedArray::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, Heap::true_value()); elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
elms->set(GETTER_INDEX, callbacks->get(0)); if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
elms->set(SETTER_INDEX, callbacks->get(1)); elms->set(GETTER_INDEX, callbacks->get(0));
}
if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
elms->set(SETTER_INDEX, callbacks->get(1));
}
break; break;
} }
case NORMAL: case NORMAL:
// This is a data property. // This is a data property.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, dictionary->ValueAt(entry)); elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly())); elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
break; break;
default: default:
@ -746,6 +840,10 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
return Heap::false_value();
}
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum())); elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete())); elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
@ -754,16 +852,22 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
if (is_js_accessor) { if (is_js_accessor) {
// __defineGetter__/__defineSetter__ callback. // __defineGetter__/__defineSetter__ callback.
FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
elms->set(IS_ACCESSOR_INDEX, Heap::true_value()); elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
elms->set(GETTER_INDEX, structure->get(0));
elms->set(SETTER_INDEX, structure->get(1)); FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, structure->get(0));
}
if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
elms->set(SETTER_INDEX, structure->get(1));
}
} else { } else {
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly())); elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
PropertyAttributes attrs; PropertyAttributes attrs;
Object* value; Object* value;
// GetProperty will check access and report any violations.
{ MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs); { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
if (!maybe_value->ToObject(&value)) return maybe_value; if (!maybe_value->ToObject(&value)) return maybe_value;
} }
@ -3487,8 +3591,10 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
HandleScope scope; HandleScope scope;
Handle<String> str = args.at<String>(0); Handle<String> str = args.at<String>(0);
int index = Smi::cast(args[1])->value(); int index = Smi::cast(args[1])->value();
Handle<Object> result = GetCharAt(str, index); if (index >= 0 && index < str->length()) {
return *result; Handle<Object> result = GetCharAt(str, index);
return *result;
}
} }
// Fall back to GetObjectProperty. // Fall back to GetObjectProperty.
@ -3496,7 +3602,12 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
args.at<Object>(1)); args.at<Object>(1));
} }
// Implements part of 8.12.9 DefineOwnProperty.
// There are 3 cases that lead here:
// Step 4b - define a new accessor property.
// Steps 9c & 12 - replace an existing data property with an accessor property.
// Step 12 - update an existing accessor property with an accessor or generic
// descriptor.
static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) { static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
ASSERT(args.length() == 5); ASSERT(args.length() == 5);
HandleScope scope; HandleScope scope;
@ -3528,6 +3639,12 @@ static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr); return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
} }
// Implements part of 8.12.9 DefineOwnProperty.
// There are 3 cases that lead here:
// Step 4a - define a new data property.
// Steps 9b & 12 - replace an existing accessor property with a data property.
// Step 12 - update an existing data property with a data or generic
// descriptor.
static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) { static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
ASSERT(args.length() == 4); ASSERT(args.length() == 4);
HandleScope scope; HandleScope scope;
@ -3551,7 +3668,9 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) && if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
is_element) { is_element) {
// Normalize the elements to enable attributes on the property. // Normalize the elements to enable attributes on the property.
NormalizeElements(js_object); if (!js_object->IsJSGlobalProxy()) {
NormalizeElements(js_object);
}
Handle<NumberDictionary> dictionary(js_object->element_dictionary()); Handle<NumberDictionary> dictionary(js_object->element_dictionary());
// Make sure that we never go back to fast case. // Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements(); dictionary->set_requires_slow_elements();
@ -3571,7 +3690,9 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
if (result.IsProperty() && if (result.IsProperty() &&
(attr != result.GetAttributes() || result.type() == CALLBACKS)) { (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
// New attributes - normalize to avoid writing to instance descriptor // New attributes - normalize to avoid writing to instance descriptor
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0); if (!js_object->IsJSGlobalProxy()) {
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
}
// Use IgnoreAttributes version since a readonly property may be // Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this. // overridden and SetProperty does not allow this.
return js_object->SetLocalPropertyIgnoreAttributes(*name, return js_object->SetLocalPropertyIgnoreAttributes(*name,
@ -4167,7 +4288,7 @@ static MaybeObject* Runtime_ToSlowProperties(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0); Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) { if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
Handle<JSObject> js_object = Handle<JSObject>::cast(object); Handle<JSObject> js_object = Handle<JSObject>::cast(object);
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0); NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
} }
@ -6889,7 +7010,7 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
// the AST id matching the PC. // the AST id matching the PC.
Address start = unoptimized->instruction_start(); Address start = unoptimized->instruction_start();
unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start); unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
Address table_cursor = start + unoptimized->stack_check_table_start(); Address table_cursor = start + unoptimized->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor); uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize; table_cursor += kIntSize;
for (unsigned i = 0; i < table_length; ++i) { for (unsigned i = 0; i < table_length; ++i) {
@ -7553,7 +7674,8 @@ static MaybeObject* Runtime_CompileString(Arguments args) {
Handle<Context> context(Top::context()->global_context()); Handle<Context> context(Top::context()->global_context());
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source, Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context, context,
true); true,
kNonStrictMode);
if (shared.is_null()) return Failure::Exception(); if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun = Handle<JSFunction> fun =
Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED); Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
@ -7562,13 +7684,15 @@ static MaybeObject* Runtime_CompileString(Arguments args) {
static ObjectPair CompileGlobalEval(Handle<String> source, static ObjectPair CompileGlobalEval(Handle<String> source,
Handle<Object> receiver) { Handle<Object> receiver,
StrictModeFlag mode) {
// Deal with a normal eval call with a string argument. Compile it // Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context. // and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval( Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source, source,
Handle<Context>(Top::context()), Handle<Context>(Top::context()),
Top::context()->IsGlobalContext()); Top::context()->IsGlobalContext(),
mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL); if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo( Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
shared, shared,
@ -7579,7 +7703,7 @@ static ObjectPair CompileGlobalEval(Handle<String> source,
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) { static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
ASSERT(args.length() == 3); ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) { if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL); return MakePair(Top::ThrowIllegalOperation(), NULL);
} }
@ -7643,12 +7767,16 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver()); return MakePair(*callee, Top::context()->global()->global_receiver());
} }
return CompileGlobalEval(args.at<String>(1), args.at<Object>(2)); ASSERT(args[3]->IsSmi());
return CompileGlobalEval(args.at<String>(1),
args.at<Object>(2),
static_cast<StrictModeFlag>(
Smi::cast(args[3])->value()));
} }
static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) { static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
ASSERT(args.length() == 3); ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) { if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL); return MakePair(Top::ThrowIllegalOperation(), NULL);
} }
@ -7663,7 +7791,11 @@ static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
return MakePair(*callee, Top::context()->global()->global_receiver()); return MakePair(*callee, Top::context()->global()->global_receiver());
} }
return CompileGlobalEval(args.at<String>(1), args.at<Object>(2)); ASSERT(args[3]->IsSmi());
return CompileGlobalEval(args.at<String>(1),
args.at<Object>(2),
static_cast<StrictModeFlag>(
Smi::cast(args[3])->value()));
} }
@ -9800,10 +9932,14 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
Handle<String> function_source = Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str, Factory::NewStringFromAscii(Vector<const char>(source_str,
source_str_length)); source_str_length));
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(function_source, Compiler::CompileEval(function_source,
context, context,
context->IsGlobalContext()); context->IsGlobalContext(),
kNonStrictMode);
if (shared.is_null()) return Failure::Exception(); if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function = Handle<JSFunction> compiled_function =
Factory::NewFunctionFromSharedFunctionInfo(shared, context); Factory::NewFunctionFromSharedFunctionInfo(shared, context);
@ -9885,10 +10021,10 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
} }
// Compile the source to be evaluated. // Compile the source to be evaluated.
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(source, Compiler::CompileEval(source, context, is_global, kNonStrictMode);
context,
is_global);
if (shared.is_null()) return Failure::Exception(); if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function = Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared, Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,

4
deps/v8/src/runtime.h

@ -237,8 +237,8 @@ namespace internal {
\ \
/* Eval */ \ /* Eval */ \
F(GlobalReceiver, 1, 1) \ F(GlobalReceiver, 1, 1) \
F(ResolvePossiblyDirectEval, 3, 2) \ F(ResolvePossiblyDirectEval, 4, 2) \
F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \ F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
\ \
F(SetProperty, -1 /* 3 or 4 */, 1) \ F(SetProperty, -1 /* 3 or 4 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \ F(DefineOrRedefineDataProperty, 4, 1) \

22
deps/v8/src/safepoint-table.cc

@ -58,7 +58,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
SafepointTable::SafepointTable(Code* code) { SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code; code_ = code;
Address header = code->instruction_start() + code->safepoint_table_start(); Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset); length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset); entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize; pc_and_deoptimization_indexes_ = header + kHeaderSize;
@ -230,4 +230,24 @@ uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
} }
int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
int result = 0;
if (!deoptimization_info_.is_empty()) {
unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
DeoptimizationInfo info = deoptimization_info_[i];
if (static_cast<int>(info.deoptimization_index) !=
Safepoint::kNoDeoptimizationIndex) {
if (previous_gap_end + limit > info.pc) {
result++;
}
previous_gap_end = info.pc_after_gap;
}
}
}
return result;
}
} } // namespace v8::internal } } // namespace v8::internal

11
deps/v8/src/safepoint-table.h

@ -220,8 +220,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
int arguments, int arguments,
int deoptimization_index); int deoptimization_index);
// Update the last safepoint with the size of the code generated for the gap // Update the last safepoint with the size of the code generated until the
// following it. // end of the gap following it.
void SetPcAfterGap(int pc) { void SetPcAfterGap(int pc) {
ASSERT(!deoptimization_info_.is_empty()); ASSERT(!deoptimization_info_.is_empty());
int index = deoptimization_info_.length() - 1; int index = deoptimization_info_.length() - 1;
@ -232,6 +232,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
// entry must be enough to hold all the pointer indexes. // entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry); void Emit(Assembler* assembler, int bits_per_entry);
// Count the number of deoptimization points where the next
// following deoptimization point comes less than limit bytes
// after the end of this point's gap.
int CountShortDeoptimizationIntervals(unsigned limit);
private: private:
struct DeoptimizationInfo { struct DeoptimizationInfo {
unsigned pc; unsigned pc;
@ -247,8 +252,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
ZoneList<ZoneList<int>*> indexes_; ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_; ZoneList<ZoneList<int>*> registers_;
bool emitted_;
unsigned offset_; unsigned offset_;
bool emitted_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder); DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
}; };

61
deps/v8/src/scanner-base.cc

@ -796,25 +796,27 @@ KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ "break", KEYWORD_PREFIX, Token::BREAK }, { "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL }, { NULL, C, Token::ILLEGAL },
{ NULL, D, Token::ILLEGAL }, { NULL, D, Token::ILLEGAL },
{ "else", KEYWORD_PREFIX, Token::ELSE }, { NULL, E, Token::ILLEGAL },
{ NULL, F, Token::ILLEGAL }, { NULL, F, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, I, Token::ILLEGAL }, { NULL, I, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, N, Token::ILLEGAL }, { NULL, N, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, P, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ "return", KEYWORD_PREFIX, Token::RETURN }, { "return", KEYWORD_PREFIX, Token::RETURN },
{ "switch", KEYWORD_PREFIX, Token::SWITCH }, { NULL, S, Token::ILLEGAL },
{ NULL, T, Token::ILLEGAL }, { NULL, T, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL }, { NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, V, Token::ILLEGAL }, { NULL, V, Token::ILLEGAL },
{ NULL, W, Token::ILLEGAL } { NULL, W, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
}; };
@ -822,7 +824,7 @@ void KeywordMatcher::Step(unibrow::uchar input) {
switch (state_) { switch (state_) {
case INITIAL: { case INITIAL: {
// matching the first character is the only state with significant fanout. // matching the first character is the only state with significant fanout.
// Match only lower-case letters in range 'b'..'w'. // Match only lower-case letters in range 'b'..'y'.
unsigned int offset = input - kFirstCharRangeMin; unsigned int offset = input - kFirstCharRangeMin;
if (offset < kFirstCharRangeLength) { if (offset < kFirstCharRangeLength) {
state_ = first_states_[offset].state; state_ = first_states_[offset].state;
@ -850,6 +852,8 @@ void KeywordMatcher::Step(unibrow::uchar input) {
break; break;
case C: case C:
if (MatchState(input, 'a', CA)) return; if (MatchState(input, 'a', CA)) return;
if (MatchKeywordStart(input, "class", 1,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'o', CO)) return; if (MatchState(input, 'o', CO)) return;
break; break;
case CA: case CA:
@ -872,6 +876,18 @@ void KeywordMatcher::Step(unibrow::uchar input) {
if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return; if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return; if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
break; break;
case E:
if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
if (MatchKeywordStart(input, "enum", 1,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'x', EX)) return;
break;
case EX:
if (MatchKeywordStart(input, "export", 2,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "extends", 2,
Token::FUTURE_RESERVED_WORD)) return;
break;
case F: case F:
if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return; if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return; if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
@ -880,10 +896,22 @@ void KeywordMatcher::Step(unibrow::uchar input) {
break; break;
case I: case I:
if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return; if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
if (MatchState(input, 'm', IM)) return;
if (MatchKeyword(input, 'n', IN, Token::IN)) return; if (MatchKeyword(input, 'n', IN, Token::IN)) return;
break; break;
case IM:
if (MatchState(input, 'p', IMP)) return;
break;
case IMP:
if (MatchKeywordStart(input, "implements", 3,
Token::FUTURE_RESERVED_WORD )) return;
if (MatchKeywordStart(input, "import", 3,
Token::FUTURE_RESERVED_WORD)) return;
break;
case IN: case IN:
token_ = Token::IDENTIFIER; token_ = Token::IDENTIFIER;
if (MatchKeywordStart(input, "interface", 2,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return; if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
break; break;
case N: case N:
@ -891,6 +919,27 @@ void KeywordMatcher::Step(unibrow::uchar input) {
if (MatchKeywordStart(input, "new", 1, Token::NEW)) return; if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return; if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
break; break;
case P:
if (MatchKeywordStart(input, "package", 1,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'r', PR)) return;
if (MatchKeywordStart(input, "public", 1,
Token::FUTURE_RESERVED_WORD)) return;
break;
case PR:
if (MatchKeywordStart(input, "private", 2,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "protected", 2,
Token::FUTURE_RESERVED_WORD)) return;
break;
case S:
if (MatchKeywordStart(input, "static", 1,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "super", 1,
Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "switch", 1,
Token::SWITCH)) return;
break;
case T: case T:
if (MatchState(input, 'h', TH)) return; if (MatchState(input, 'h', TH)) return;
if (MatchState(input, 'r', TR)) return; if (MatchState(input, 'r', TR)) return;

9
deps/v8/src/scanner-base.h

@ -564,10 +564,17 @@ class KeywordMatcher {
CON, CON,
D, D,
DE, DE,
E,
EX,
F, F,
I, I,
IM,
IMP,
IN, IN,
N, N,
P,
PR,
S,
T, T,
TH, TH,
TR, TR,
@ -583,7 +590,7 @@ class KeywordMatcher {
// Range of possible first characters of a keyword. // Range of possible first characters of a keyword.
static const unsigned int kFirstCharRangeMin = 'b'; static const unsigned int kFirstCharRangeMin = 'b';
static const unsigned int kFirstCharRangeMax = 'w'; static const unsigned int kFirstCharRangeMax = 'y';
static const unsigned int kFirstCharRangeLength = static const unsigned int kFirstCharRangeLength =
kFirstCharRangeMax - kFirstCharRangeMin + 1; kFirstCharRangeMax - kFirstCharRangeMin + 1;
// State map for first keyword character range. // State map for first keyword character range.

19
deps/v8/src/scanner.cc

@ -516,17 +516,30 @@ Token::Value JsonScanner::ScanJsonString() {
Token::Value JsonScanner::ScanJsonNumber() { Token::Value JsonScanner::ScanJsonNumber() {
LiteralScope literal(this); LiteralScope literal(this);
if (c0_ == '-') AddLiteralCharAdvance(); bool negative = false;
if (c0_ == '-') {
AddLiteralCharAdvance();
negative = true;
}
if (c0_ == '0') { if (c0_ == '0') {
AddLiteralCharAdvance(); AddLiteralCharAdvance();
// Prefix zero is only allowed if it's the only digit before // Prefix zero is only allowed if it's the only digit before
// a decimal point or exponent. // a decimal point or exponent.
if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL; if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
} else { } else {
int i = 0;
int digits = 0;
if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL; if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
do { do {
i = i * 10 + c0_ - '0';
digits++;
AddLiteralCharAdvance(); AddLiteralCharAdvance();
} while (c0_ >= '0' && c0_ <= '9'); } while (c0_ >= '0' && c0_ <= '9');
if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
number_ = (negative ? -i : i);
return Token::NUMBER;
}
} }
if (c0_ == '.') { if (c0_ == '.') {
AddLiteralCharAdvance(); AddLiteralCharAdvance();
@ -544,6 +557,10 @@ Token::Value JsonScanner::ScanJsonNumber() {
} while (c0_ >= '0' && c0_ <= '9'); } while (c0_ >= '0' && c0_ <= '9');
} }
literal.Complete(); literal.Complete();
ASSERT_NOT_NULL(next_.literal_chars);
number_ = StringToDouble(next_.literal_chars->ascii_literal(),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
return Token::NUMBER; return Token::NUMBER;
} }

9
deps/v8/src/scanner.h

@ -148,6 +148,12 @@ class JsonScanner : public Scanner {
// Returns the next token. // Returns the next token.
Token::Value Next(); Token::Value Next();
// Returns the value of a number token.
double number() {
return number_;
}
protected: protected:
// Skip past JSON whitespace (only space, tab, newline and carrige-return). // Skip past JSON whitespace (only space, tab, newline and carrige-return).
bool SkipJsonWhiteSpace(); bool SkipJsonWhiteSpace();
@ -178,6 +184,9 @@ class JsonScanner : public Scanner {
// are the only valid JSON identifiers (productions JSONBooleanLiteral, // are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral). // JSONNullLiteral).
Token::Value ScanJsonIdentifier(const char* text, Token::Value token); Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
// Holds the value of a scanned number token.
double number_;
}; };
} } // namespace v8::internal } } // namespace v8::internal

3
deps/v8/src/scopes.cc

@ -383,8 +383,7 @@ void Scope::AddDeclaration(Declaration* declaration) {
void Scope::SetIllegalRedeclaration(Expression* expression) { void Scope::SetIllegalRedeclaration(Expression* expression) {
// Only set the illegal redeclaration expression the // Record only the first illegal redeclaration.
// first time the function is called.
if (!HasIllegalRedeclaration()) { if (!HasIllegalRedeclaration()) {
illegal_redecl_ = expression; illegal_redecl_ = expression;
} }

33
deps/v8/src/stub-cache.cc

@ -441,6 +441,12 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) { MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
// Using NORMAL as the PropertyType for array element loads is a misuse. The
// generated stub always accesses fast elements, not slow-mode fields, but
// some property type is required for the stub lookup. Note that overloading
// the NORMAL PropertyType is only safe as long as no stubs are generated for
// other keyed field loads. This is guaranteed to be the case since all field
// keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags = Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL); Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
String* name = Heap::KeyedLoadSpecialized_symbol(); String* name = Heap::KeyedLoadSpecialized_symbol();
@ -461,6 +467,33 @@ MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
} }
MaybeObject* StubCache::ComputeKeyedLoadPixelArray(JSObject* receiver) {
// Using NORMAL as the PropertyType for array element loads is a misuse. The
// generated stub always accesses fast elements, not slow-mode fields, but
// some property type is required for the stub lookup. Note that overloading
// the NORMAL PropertyType is only safe as long as no stubs are generated for
// other keyed field loads. This is guaranteed to be the case since all field
// keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
String* name = Heap::KeyedLoadPixelArray_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
{ MaybeObject* maybe_code = compiler.CompileLoadPixelArray(receiver);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
}
return code;
}
MaybeObject* StubCache::ComputeStoreField(String* name, MaybeObject* StubCache::ComputeStoreField(String* name,
JSObject* receiver, JSObject* receiver,
int field_index, int field_index,

4
deps/v8/src/stub-cache.h

@ -133,6 +133,9 @@ class StubCache : public AllStatic {
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized( MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
JSObject* receiver); JSObject* receiver);
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadPixelArray(
JSObject* receiver);
// --- // ---
MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name, MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
@ -607,6 +610,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name); MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver); MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
MUST_USE_RESULT MaybeObject* CompileLoadPixelArray(JSObject* receiver);
private: private:
MaybeObject* GetCode(PropertyType type, String* name); MaybeObject* GetCode(PropertyType type, String* name);

18
deps/v8/src/third_party/strongtalk/README.chromium

@ -1,18 +0,0 @@
Name: Strongtalk
URL: http://www.strongtalk.org/
Code from the Strongtalk assembler is used with modification in the following
files:
src/assembler.h
src/assembler.cc
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/assembler-arm-inl.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/assembler-ia32-inl.h
src/mips/assembler-mips.cc
src/mips/assembler-mips.h
src/mips/assembler-mips-inl.h
src/x64/assembler-x64.h

37
deps/v8/src/token.h

@ -155,38 +155,6 @@ namespace internal {
K(WHILE, "while", 0) \ K(WHILE, "while", 0) \
K(WITH, "with", 0) \ K(WITH, "with", 0) \
\ \
/* Future reserved words (ECMA-262, section 7.5.3, page 14). */ \
F(ABSTRACT, "abstract", 0) \
F(BOOLEAN, "boolean", 0) \
F(BYTE, "byte", 0) \
F(CHAR, "char", 0) \
F(CLASS, "class", 0) \
K(CONST, "const", 0) \
F(DOUBLE, "double", 0) \
F(ENUM, "enum", 0) \
F(EXPORT, "export", 0) \
F(EXTENDS, "extends", 0) \
F(FINAL, "final", 0) \
F(FLOAT, "float", 0) \
F(GOTO, "goto", 0) \
F(IMPLEMENTS, "implements", 0) \
F(IMPORT, "import", 0) \
F(INT, "int", 0) \
F(INTERFACE, "interface", 0) \
F(LONG, "long", 0) \
K(NATIVE, "native", 0) \
F(PACKAGE, "package", 0) \
F(PRIVATE, "private", 0) \
F(PROTECTED, "protected", 0) \
F(PUBLIC, "public", 0) \
F(SHORT, "short", 0) \
F(STATIC, "static", 0) \
F(SUPER, "super", 0) \
F(SYNCHRONIZED, "synchronized", 0) \
F(THROWS, "throws", 0) \
F(TRANSIENT, "transient", 0) \
F(VOLATILE, "volatile", 0) \
\
/* Literals (ECMA-262, section 7.8, page 16). */ \ /* Literals (ECMA-262, section 7.8, page 16). */ \
K(NULL_LITERAL, "null", 0) \ K(NULL_LITERAL, "null", 0) \
K(TRUE_LITERAL, "true", 0) \ K(TRUE_LITERAL, "true", 0) \
@ -197,6 +165,11 @@ namespace internal {
/* Identifiers (not keywords or future reserved words). */ \ /* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, NULL, 0) \ T(IDENTIFIER, NULL, 0) \
\ \
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_RESERVED_WORD, NULL, 0) \
K(CONST, "const", 0) \
K(NATIVE, "native", 0) \
\
/* Illegal token - not able to scan. */ \ /* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \ T(ILLEGAL, "ILLEGAL", 0) \
\ \

2
deps/v8/src/top.h

@ -32,11 +32,11 @@
#include "compilation-cache.h" #include "compilation-cache.h"
#include "frames-inl.h" #include "frames-inl.h"
#include "runtime-profiler.h" #include "runtime-profiler.h"
#include "simulator.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class Simulator;
#define RETURN_IF_SCHEDULED_EXCEPTION() \ #define RETURN_IF_SCHEDULED_EXCEPTION() \
if (Top::has_scheduled_exception()) return Top::PromoteScheduledException() if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()

6
deps/v8/src/type-info.h

@ -120,9 +120,9 @@ class TypeInfo {
} }
// Integer32 is an integer that can be represented as either a signed // Integer32 is an integer that can be represented as a signed
// 32-bit integer or as an unsigned 32-bit integer. It has to be // 32-bit integer. It has to be
// in the range [-2^31, 2^32 - 1]. We also have to check for negative 0 // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
// as it is not an Integer32. // as it is not an Integer32.
static inline bool IsInt32Double(double value) { static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0); const DoubleRepresentation minus_zero(-0.0);

3
deps/v8/src/uri.js

@ -205,7 +205,7 @@ function Decode(uri, reserved) {
octets[0] = cc; octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed"); if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
for (var i = 1; i < n; i++) { for (var i = 1; i < n; i++) {
k++; if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k)); octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
} }
index = URIDecodeOctets(octets, result, index); index = URIDecodeOctets(octets, result, index);
@ -412,4 +412,3 @@ function SetupURI() {
} }
SetupURI(); SetupURI();

6
deps/v8/src/v8globals.h

@ -469,6 +469,12 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
ARMv7 = 2, // ARM ARMv7 = 2, // ARM
SAHF = 0}; // x86 SAHF = 0}; // x86
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
enum StrictModeFlag {
kNonStrictMode,
kStrictMode
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_V8GLOBALS_H_ #endif // V8_V8GLOBALS_H_

146
deps/v8/src/v8natives.js

@ -491,28 +491,29 @@ PropertyDescriptor.prototype.hasSetter = function() {
} }
// Converts an array returned from Runtime_GetOwnProperty to an actual
// property descriptor. For a description of the array layout please
// see the runtime.cc file.
function ConvertDescriptorArrayToDescriptor(desc_array) {
if (desc_array == false) {
throw 'Internal error: invalid desc_array';
}
// ES5 section 8.12.1. if (IS_UNDEFINED(desc_array)) {
function GetOwnProperty(obj, p) { return void 0;
var desc = new PropertyDescriptor(); }
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), ToString(p));
if (IS_UNDEFINED(props)) return void 0;
// This is an accessor var desc = new PropertyDescriptor();
if (props[IS_ACCESSOR_INDEX]) { // This is an accessor.
desc.setGet(props[GETTER_INDEX]); if (desc_array[IS_ACCESSOR_INDEX]) {
desc.setSet(props[SETTER_INDEX]); desc.setGet(desc_array[GETTER_INDEX]);
desc.setSet(desc_array[SETTER_INDEX]);
} else { } else {
desc.setValue(props[VALUE_INDEX]); desc.setValue(desc_array[VALUE_INDEX]);
desc.setWritable(props[WRITABLE_INDEX]); desc.setWritable(desc_array[WRITABLE_INDEX]);
} }
desc.setEnumerable(props[ENUMERABLE_INDEX]); desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
desc.setConfigurable(props[CONFIGURABLE_INDEX]); desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
return desc; return desc;
} }
@ -535,9 +536,27 @@ function HasProperty(obj, p) {
} }
// ES5 section 8.12.1.
function GetOwnProperty(obj, p) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (props == false) return void 0;
return ConvertDescriptorArrayToDescriptor(props);
}
// ES5 8.12.9. // ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) { function DefineOwnProperty(obj, p, desc, should_throw) {
var current = GetOwnProperty(obj, p); var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access == false) return void 0;
var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj)); var extensible = %IsExtensible(ToObject(obj));
// Error handling according to spec. // Error handling according to spec.
@ -545,10 +564,12 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (IS_UNDEFINED(current) && !extensible) if (IS_UNDEFINED(current) && !extensible)
throw MakeTypeError("define_disallowed", ["defineProperty"]); throw MakeTypeError("define_disallowed", ["defineProperty"]);
if (!IS_UNDEFINED(current) && !current.isConfigurable()) { if (!IS_UNDEFINED(current)) {
// Step 5 and 6 // Step 5 and 6
if ((!desc.hasEnumerable() || if ((IsGenericDescriptor(desc) ||
SameValue(desc.isEnumerable() && current.isEnumerable())) && IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
(!desc.hasEnumerable() ||
SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() || (!desc.hasConfigurable() ||
SameValue(desc.isConfigurable(), current.isConfigurable())) && SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() || (!desc.hasWritable() ||
@ -561,30 +582,36 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
SameValue(desc.getSet(), current.getSet()))) { SameValue(desc.getSet(), current.getSet()))) {
return true; return true;
} }
if (!current.isConfigurable()) {
// Step 7 // Step 7
if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable()) if (desc.isConfigurable() ||
throw MakeTypeError("redefine_disallowed", ["defineProperty"]); (desc.hasEnumerable() &&
// Step 9 desc.isEnumerable() != current.isEnumerable()))
if (IsDataDescriptor(current) != IsDataDescriptor(desc))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
// Step 10
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
if (!current.isWritable() && desc.isWritable())
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
if (!current.isWritable() && desc.hasValue() &&
!SameValue(desc.getValue(), current.getValue())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]); throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
// Step 8
if (!IsGenericDescriptor(desc)) {
// Step 9a
if (IsDataDescriptor(current) != IsDataDescriptor(desc))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
// Step 10a
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
if (!current.isWritable() && desc.isWritable())
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
if (!current.isWritable() && desc.hasValue() &&
!SameValue(desc.getValue(), current.getValue())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
}
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
} }
} }
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
} }
// Send flags - enumerable and configurable are common - writable is // Send flags - enumerable and configurable are common - writable is
@ -607,7 +634,16 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
} else } else
flag |= DONT_DELETE; flag |= DONT_DELETE;
if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) { if (IsDataDescriptor(desc) ||
(IsGenericDescriptor(desc) &&
(IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
// There are 3 cases that lead here:
// Step 4a - defining a new data property.
// Steps 9b & 12 - replacing an existing accessor property with a data
// property.
// Step 12 - updating an existing data property with a data or generic
// descriptor.
if (desc.hasWritable()) { if (desc.hasWritable()) {
flag |= desc.isWritable() ? 0 : READ_ONLY; flag |= desc.isWritable() ? 0 : READ_ONLY;
} else if (!IS_UNDEFINED(current)) { } else if (!IS_UNDEFINED(current)) {
@ -615,20 +651,30 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
} else { } else {
flag |= READ_ONLY; flag |= READ_ONLY;
} }
var value = void 0; // Default value is undefined. var value = void 0; // Default value is undefined.
if (desc.hasValue()) { if (desc.hasValue()) {
value = desc.getValue(); value = desc.getValue();
} else if (!IS_UNDEFINED(current)) { } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
value = current.getValue(); value = current.getValue();
} }
%DefineOrRedefineDataProperty(obj, p, value, flag); %DefineOrRedefineDataProperty(obj, p, value, flag);
} else if (IsGenericDescriptor(desc)) {
// Step 12 - updating an existing accessor property with generic
// descriptor. Changing flags only.
%DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
} else { } else {
if (desc.hasGetter() && // There are 3 cases that lead here:
(IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) { // Step 4b - defining a new accessor property.
%DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag); // Steps 9c & 12 - replacing an existing data property with an accessor
// property.
// Step 12 - updating an existing accessor property with an accessor
// descriptor.
if (desc.hasGetter()) {
%DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
} }
if (desc.hasSetter() && if (desc.hasSetter()) {
(IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
%DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag); %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
} }
} }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 1 #define MINOR_VERSION 1
#define BUILD_NUMBER 1 #define BUILD_NUMBER 2
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

27
deps/v8/src/x64/assembler-x64.cc

@ -916,6 +916,23 @@ void Assembler::call(const Operand& op) {
} }
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
// same Code object. Should not be used when generating new code (use labels),
// but only when patching existing code.
void Assembler::call(Address target) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
Address source = pc_ + 4;
intptr_t displacement = target - source;
ASSERT(is_int32(displacement));
emitl(static_cast<int32_t>(displacement));
}
void Assembler::clc() { void Assembler::clc() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -3012,6 +3029,16 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
} }
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x50);
emit_sse_operand(dst, src);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() }; Register ireg = { reg.code() };

32
deps/v8/src/x64/assembler-x64.h

@ -553,10 +553,12 @@ class Assembler : public Malloced {
// TODO(X64): Rename this, removing the "Real", after changing the above. // TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2; static const int kRealPatchReturnSequenceAddressOffset = 2;
// The x64 JS return sequence is padded with int3 to make it large // Some x64 JS code is padded with int3 to make it large
// enough to hold a call instruction when the debugger patches it. // enough to hold an instruction when the debugger patches it.
static const int kJumpInstructionLength = 13;
static const int kCallInstructionLength = 13; static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13; static const int kJSReturnSequenceLength = 13;
static const int kShortCallInstructionLength = 5;
// The debug break slot must be able to contain a call instruction. // The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength; static const int kDebugBreakSlotLength = kCallInstructionLength;
@ -585,7 +587,7 @@ class Assembler : public Malloced {
// Insert the smallest number of nop instructions // Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2. // of m, where m must be a power of 2.
void Align(int m); void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform. // Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign(); void CodeTargetAlign();
@ -894,6 +896,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src); arithmetic_op(0x0B, dst, src);
} }
void orl(Register dst, const Operand& src) {
arithmetic_op_32(0x0B, dst, src);
}
void or_(const Operand& dst, Register src) { void or_(const Operand& dst, Register src) {
arithmetic_op(0x09, src, dst); arithmetic_op(0x09, src, dst);
} }
@ -1057,6 +1063,18 @@ class Assembler : public Malloced {
arithmetic_op_32(0x33, dst, src); arithmetic_op_32(0x33, dst, src);
} }
void xorl(Register dst, const Operand& src) {
arithmetic_op_32(0x33, dst, src);
}
void xorl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
void xorl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
void xor_(Register dst, const Operand& src) { void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src); arithmetic_op(0x33, dst, src);
} }
@ -1111,6 +1129,12 @@ class Assembler : public Malloced {
void call(Label* L); void call(Label* L);
void call(Handle<Code> target, RelocInfo::Mode rmode); void call(Handle<Code> target, RelocInfo::Mode rmode);
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
// same Code object. Should not be used when generating new code (use labels),
// but only when patching existing code.
void call(Address target);
// Call near absolute indirect, address in register // Call near absolute indirect, address in register
void call(Register adr); void call(Register adr);
@ -1254,6 +1278,8 @@ class Assembler : public Malloced {
void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src); void ucomisd(XMMRegister dst, const Operand& src);
void movmskpd(Register dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field. // The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src); void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr); void emit_sse_operand(XMMRegister reg, const Operand& adr);

497
deps/v8/src/x64/code-stubs-x64.cc

@ -1037,29 +1037,6 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
} }
// Prepare for a type transition runtime call when the args are already on
// the stack, under the return address.
void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
MacroAssembler* masm) {
__ pop(rcx); // Save return address.
// Left and right arguments are already on top of the stack.
// Push this stub's key. Although the operation and the type info are
// encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
__ Push(Smi::FromInt(op_));
__ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
5,
1);
}
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) { switch (operands_type_) {
case TRBinaryOpIC::UNINITIALIZED: case TRBinaryOpIC::UNINITIALIZED:
@ -1069,7 +1046,9 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateSmiStub(masm); GenerateSmiStub(masm);
break; break;
case TRBinaryOpIC::INT32: case TRBinaryOpIC::INT32:
GenerateInt32Stub(masm); UNREACHABLE();
// The int32 case is identical to the Smi case. We avoid creating this
// ic state on x64.
break; break;
case TRBinaryOpIC::HEAP_NUMBER: case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm); GenerateHeapNumberStub(masm);
@ -1112,54 +1091,337 @@ const char* TypeRecordingBinaryOpStub::GetName() {
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
Label* slow, Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
UNIMPLEMENTED();
}
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations.
bool generate_inline_heapnumber_results =
(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
(op_ == Token::ADD || op_ == Token::SUB ||
op_ == Token::MUL || op_ == Token::DIV);
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
Label call_runtime; Register left = rdx;
Register right = rax;
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
Label not_smis;
Label use_fp_on_smis;
Label restore_MOD_registers; // Only used if op_ == Token::MOD.
if (op_ != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
break;
case Token::SUB: case Token::SUB:
__ SmiSub(left, left, right, &use_fp_on_smis);
__ movq(rax, left);
break;
case Token::MUL: case Token::MUL:
ASSERT(right.is(rax));
__ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
break;
case Token::DIV: case Token::DIV:
// SmiDiv will not accept left in rdx or right in rax.
left = rcx;
right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
__ SmiDiv(rax, left, right, &use_fp_on_smis);
break; break;
case Token::MOD: case Token::MOD:
case Token::BIT_OR: // SmiMod will not accept left in rdx or right in rax.
case Token::BIT_AND: left = rcx;
right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
__ SmiMod(rax, left, right, &use_fp_on_smis);
break;
case Token::BIT_OR: {
ASSERT(right.is(rax));
__ movq(rcx, right); // Save the right operand.
__ SmiOr(right, right, left); // BIT_OR is commutative.
__ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
break;
}
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::SAR: ASSERT(right.is(rax));
__ SmiXor(right, right, left); // BIT_XOR is commutative.
break;
case Token::BIT_AND:
ASSERT(right.is(rax));
__ SmiAnd(right, right, left); // BIT_AND is commutative.
break;
case Token::SHL: case Token::SHL:
__ SmiShiftLeft(left, left, right);
__ movq(rax, left);
break;
case Token::SAR:
__ SmiShiftArithmeticRight(left, left, right);
__ movq(rax, left);
break;
case Token::SHR: case Token::SHR:
GenerateRegisterArgsPush(masm); __ SmiShiftLogicalRight(left, left, right, &not_smis);
__ movq(rax, left);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
if (result_type_ == TRBinaryOpIC::UNINITIALIZED || // 5. Emit return of result in rax. Some operations have registers pushed.
result_type_ == TRBinaryOpIC::SMI) { __ ret(0);
GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
} else { // 6. For some operations emit inline code to perform floating point
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); // operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
if (op_ == Token::DIV || op_ == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
} }
__ bind(&call_runtime);
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
FloatingPointHelper::LoadSSE2SmiOperands(masm);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
}
// 7. Non-smi operands reach the end of the code generated by
// GenerateSmiCode, and fall through to subsequent code,
// with the operands in rdx and rax.
Comment done_comment(masm, "-- Enter non-smi code");
__ bind(&not_smis);
if (op_ == Token::BIT_OR) {
__ movq(right, rcx);
}
}
void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
MacroAssembler* masm,
Label* allocation_failure,
Label* non_numeric_failure) {
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
case Token::SUB: case Token::SUB:
case Token::MUL: case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
GenerateHeapResultAllocation(masm, allocation_failure);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
}
case Token::MOD: {
// For MOD we jump to the allocation_failure label, to call runtime.
__ jmp(allocation_failure);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SAR:
case Token::SHL:
case Token::SHR: {
Label non_smi_shr_result;
Register heap_number_map = r9;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: {
__ shrl_cl(rax);
// Check if result is negative. This can only happen for a shift
// by zero.
__ testl(rax, rax);
__ j(negative, &non_smi_shr_result);
break;
}
default: UNREACHABLE();
}
STATIC_ASSERT(kSmiValueSize == 32);
// Tag smi result and return.
__ Integer32ToSmi(rax, rax);
__ Ret();
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
if (op_ == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
// Allocate heap number in new space.
// Not using AllocateHeapNumber macro in order to reuse
// already loaded heap_number_map.
__ AllocateInNewSpace(HeapNumber::kSize,
rax,
rcx,
no_reg,
&allocation_failed,
TAG_OBJECT);
// Set the map.
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ Ret();
__ bind(&allocation_failed);
// We need tagged values in rdx and rax for the following code,
// not int32 in rax and rcx.
__ Integer32ToSmi(rax, rcx);
__ Integer32ToSmi(rdx, rax);
__ jmp(allocation_failure);
}
break;
}
default: UNREACHABLE(); break;
}
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
"TypeRecordingBinaryStub::GenerateFloatingPointCode.");
}
}
void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
// Registers containing left and right operands respectively.
Register lhs = rdx;
Register rhs = rax;
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1, string1_smi2;
__ JumpIfNotString(lhs, r8, &not_string1);
// First argument is a a string, test second.
__ JumpIfSmi(rhs, &string1_smi2);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string1);
// First and second argument are strings.
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, rhs, rbx, rcx, r8, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
__ JumpIfNotString(rhs, rhs, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
// Pop arguments, because CallRuntimeCode wants to push them again.
__ pop(rcx);
__ pop(rax);
__ pop(rdx);
__ push(rcx);
}
void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
break;
case Token::DIV: case Token::DIV:
GenerateTypeTransition(masm); __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break; break;
case Token::MOD: case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR: case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
break;
case Token::BIT_AND: case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
break;
case Token::BIT_XOR: case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
break;
case Token::SAR: case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
break;
case Token::SHL: case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
break;
case Token::SHR: case Token::SHR:
GenerateTypeTransitionWithSavedArgs(masm); __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1167,30 +1429,90 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
} }
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNIMPLEMENTED(); Label not_smi;
GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
__ bind(&not_smi);
GenerateTypeTransition(masm);
} }
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
UNIMPLEMENTED(); ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
GenerateTypeTransition(masm);
} }
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
UNIMPLEMENTED(); Label gc_required, not_number;
GenerateFloatingPointCode(masm, &gc_required, &not_number);
__ bind(&not_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
GenerateCallRuntimeCode(masm);
} }
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED(); Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateStringAddCode(masm);
}
__ bind(&call_runtime);
GenerateCallRuntimeCode(masm);
} }
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm, MacroAssembler* masm,
Label* alloc_failure) { Label* alloc_failure) {
UNIMPLEMENTED(); Label skip_allocation;
OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rdx, &skip_allocation);
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, alloc_failure);
// Now rdx can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ movq(rdx, rbx);
__ bind(&skip_allocation);
// Use object in rdx as a result holder
__ movq(rax, rdx);
break;
}
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, alloc_failure);
// Now rax can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
} }
@ -1512,6 +1834,7 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
// Input: rdx, rax are the left and right objects of a bit op. // Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op. // Output: rax, rcx are left and right integers for a bit op.
// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure, Label* conversion_failure,
Register heap_number_map) { Register heap_number_map) {
@ -1521,28 +1844,27 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label load_arg2, done; Label load_arg2, done;
__ JumpIfNotSmi(rdx, &arg1_is_object); __ JumpIfNotSmi(rdx, &arg1_is_object);
__ SmiToInteger32(rdx, rdx); __ SmiToInteger32(r8, rdx);
__ jmp(&load_arg2); __ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5). // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1); __ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure); __ j(not_equal, conversion_failure);
__ movl(rdx, Immediate(0)); __ movl(r8, Immediate(0));
__ jmp(&load_arg2); __ jmp(&load_arg2);
__ bind(&arg1_is_object); __ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1); __ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in rcx. // Get the untagged integer version of the rdx heap number in rcx.
IntegerConvert(masm, rdx, rdx); IntegerConvert(masm, r8, rdx);
// Here rdx has the untagged integer, rax has a Smi or a heap number. // Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2); __ bind(&load_arg2);
// Test if arg2 is a Smi. // Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object); __ JumpIfNotSmi(rax, &arg2_is_object);
__ SmiToInteger32(rax, rax); __ SmiToInteger32(rcx, rax);
__ movl(rcx, rax);
__ jmp(&done); __ jmp(&done);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5). // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
@ -1558,7 +1880,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
// Get the untagged integer version of the rax heap number in rcx. // Get the untagged integer version of the rax heap number in rcx.
IntegerConvert(masm, rcx, rax); IntegerConvert(masm, rcx, rax);
__ bind(&done); __ bind(&done);
__ movl(rax, rdx); __ movl(rax, r8);
} }
@ -1888,11 +2210,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
} }
// Stack frame on entry. // Stack frame on entry.
// esp[0]: return address // rsp[0]: return address
// esp[8]: last_match_info (expected JSArray) // rsp[8]: last_match_info (expected JSArray)
// esp[16]: previous index // rsp[16]: previous index
// esp[24]: subject string // rsp[24]: subject string
// esp[32]: JSRegExp object // rsp[32]: JSRegExp object
static const int kLastMatchInfoOffset = 1 * kPointerSize; static const int kLastMatchInfoOffset = 1 * kPointerSize;
static const int kPreviousIndexOffset = 2 * kPointerSize; static const int kPreviousIndexOffset = 2 * kPointerSize;
@ -2234,7 +2556,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Smi-tagging is equivalent to multiplying by 2. // Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
// Allocate RegExpResult followed by FixedArray with size in ebx. // Allocate RegExpResult followed by FixedArray with size in rbx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input] // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..] // Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
@ -2293,7 +2615,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label loop; Label loop;
__ testl(rbx, rbx); __ testl(rbx, rbx);
__ bind(&loop); __ bind(&loop);
__ j(less_equal, &done); // Jump if ecx is negative or zero. __ j(less_equal, &done); // Jump if rcx is negative or zero.
__ subl(rbx, Immediate(1)); __ subl(rbx, Immediate(1));
__ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
__ jmp(&loop); __ jmp(&loop);
@ -2656,7 +2978,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// undefined, and are equal. // undefined, and are equal.
__ Set(rax, EQUAL); __ Set(rax, EQUAL);
__ bind(&return_unequal); __ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax, // Return non-equal by returning the non-zero object pointer in rax,
// or return equal if we fell through to here. // or return equal if we fell through to here.
__ ret(0); __ ret(0);
__ bind(&not_both_objects); __ bind(&not_both_objects);
@ -3151,7 +3473,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
// If current EBP value is the same as js_entry_sp value, it means that // If current RBP value is the same as js_entry_sp value, it means that
// the current function is the outermost. // the current function is the outermost.
__ movq(kScratchRegister, js_entry_sp); __ movq(kScratchRegister, js_entry_sp);
__ cmpq(rbp, Operand(kScratchRegister, 0)); __ cmpq(rbp, Operand(kScratchRegister, 0));
@ -4414,6 +4736,53 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ jmp(rdi); __ jmp(rdi);
} }
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register untagged_key,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// elements - is set to the the receiver's element if
// the receiver doesn't have a pixel array or the
// key is not a smi, otherwise it's the elements'
// external pointer.
// untagged_key - is set to the untagged key
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
__ SmiToInteger32(untagged_key, key);
// Verify that the receiver has pixel array elements.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
// Check that the smi is in range.
__ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
__ j(above_equal, out_of_range); // unsigned check handles negative keys.
// Load and tag the element as a smi.
__ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
__ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
__ Integer32ToSmi(result, result);
__ ret(0);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

24
deps/v8/src/x64/code-stubs-x64.h

@ -270,6 +270,11 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm, void GenerateSmiCode(MacroAssembler* masm,
Label* slow, Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results); SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateFloatingPointCode(MacroAssembler* masm,
Label* allocation_failure,
Label* non_numeric_failure);
void GenerateStringAddCode(MacroAssembler* masm);
void GenerateCallRuntimeCode(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm); void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm); void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm); void GenerateUninitializedStub(MacroAssembler* masm);
@ -447,6 +452,25 @@ class NumberToStringStub: public CodeStub {
}; };
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements,
Register untagged_key,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_ #endif // V8_X64_CODE_STUBS_X64_H_

10
deps/v8/src/x64/codegen-x64.cc

@ -5402,9 +5402,12 @@ void CodeGenerator::VisitCall(Call* node) {
} }
frame_->PushParameterAt(-1); frame_->PushParameterAt(-1);
// Push the strict mode flag.
frame_->Push(Smi::FromInt(strict_mode_flag()));
// Resolve the call. // Resolve the call.
result = result =
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3); frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result); done.Jump(&result);
slow.Bind(); slow.Bind();
@ -5421,8 +5424,11 @@ void CodeGenerator::VisitCall(Call* node) {
} }
frame_->PushParameterAt(-1); frame_->PushParameterAt(-1);
// Push the strict mode flag.
frame_->Push(Smi::FromInt(strict_mode_flag()));
// Resolve the call. // Resolve the call.
result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast // If we generated fast-case code bind the jump-target where fast
// and slow case merge. // and slow case merge.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save