mirror of https://github.com/lukechilds/node.git
Ryan Dahl
14 years ago
738 changed files with 125799 additions and 108391 deletions
File diff suppressed because it is too large
@ -0,0 +1,38 @@ |
|||
# Copyright 2011 the V8 project authors. All rights reserved. |
|||
# Redistribution and use in source and binary forms, with or without |
|||
# modification, are permitted provided that the following conditions are |
|||
# met: |
|||
# |
|||
# * Redistributions of source code must retain the above copyright |
|||
# notice, this list of conditions and the following disclaimer. |
|||
# * Redistributions in binary form must reproduce the above |
|||
# copyright notice, this list of conditions and the following |
|||
# disclaimer in the documentation and/or other materials provided |
|||
# with the distribution. |
|||
# * Neither the name of Google Inc. nor the names of its |
|||
# contributors may be used to endorse or promote products derived |
|||
# from this software without specific prior written permission. |
|||
# |
|||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|||
|
|||
from os.path import join |
|||
Import('context') |
|||
|
|||
def ConfigureObjectFiles(): |
|||
env = Environment() |
|||
env.Replace(**context.flags['preparser']) |
|||
context.ApplyEnvOverrides(env) |
|||
return env.Object('preparser-process.cc') |
|||
|
|||
preparser_object = ConfigureObjectFiles() |
|||
Return('preparser_object') |
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -1,48 +0,0 @@ |
|||
// Copyright 2009 the V8 project authors. All rights reserved.
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following
|
|||
// disclaimer in the documentation and/or other materials provided
|
|||
// with the distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived
|
|||
// from this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
|
|||
#ifndef V8_ARM_CODEGEN_ARM_INL_H_ |
|||
#define V8_ARM_CODEGEN_ARM_INL_H_ |
|||
|
|||
#include "virtual-frame-arm.h" |
|||
|
|||
namespace v8 { |
|||
namespace internal { |
|||
|
|||
#define __ ACCESS_MASM(masm_) |
|||
|
|||
// Platform-specific inline functions.
|
|||
|
|||
void DeferredCode::Jump() { __ jmp(&entry_label_); } |
|||
void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); } |
|||
|
|||
#undef __ |
|||
|
|||
} } // namespace v8::internal
|
|||
|
|||
#endif // V8_ARM_CODEGEN_ARM_INL_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -1,174 +0,0 @@ |
|||
// Copyright 2008 the V8 project authors. All rights reserved.
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following
|
|||
// disclaimer in the documentation and/or other materials provided
|
|||
// with the distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived
|
|||
// from this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
#include "v8.h" |
|||
|
|||
#if defined(V8_TARGET_ARCH_ARM) |
|||
|
|||
#include "codegen-inl.h" |
|||
#include "jump-target-inl.h" |
|||
#include "register-allocator-inl.h" |
|||
#include "virtual-frame-inl.h" |
|||
|
|||
namespace v8 { |
|||
namespace internal { |
|||
|
|||
// -------------------------------------------------------------------------
|
|||
// JumpTarget implementation.
|
|||
|
|||
#define __ ACCESS_MASM(cgen()->masm()) |
|||
|
|||
void JumpTarget::DoJump() { |
|||
ASSERT(cgen()->has_valid_frame()); |
|||
// Live non-frame registers are not allowed at unconditional jumps
|
|||
// because we have no way of invalidating the corresponding results
|
|||
// which are still live in the C++ code.
|
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
|
|||
if (entry_frame_set_) { |
|||
if (entry_label_.is_bound()) { |
|||
// If we already bound and generated code at the destination then it
|
|||
// is too late to ask for less optimistic type assumptions.
|
|||
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); |
|||
} |
|||
// There already a frame expectation at the target.
|
|||
cgen()->frame()->MergeTo(&entry_frame_); |
|||
cgen()->DeleteFrame(); |
|||
} else { |
|||
// Clone the current frame to use as the expected one at the target.
|
|||
set_entry_frame(cgen()->frame()); |
|||
// Zap the fall-through frame since the jump was unconditional.
|
|||
RegisterFile empty; |
|||
cgen()->SetFrame(NULL, &empty); |
|||
} |
|||
if (entry_label_.is_bound()) { |
|||
// You can't jump backwards to an already bound label unless you admitted
|
|||
// up front that this was a bidirectional jump target. Bidirectional jump
|
|||
// targets will zap their type info when bound in case some later virtual
|
|||
// frame with less precise type info branches to them.
|
|||
ASSERT(direction_ != FORWARD_ONLY); |
|||
} |
|||
__ jmp(&entry_label_); |
|||
} |
|||
|
|||
|
|||
void JumpTarget::DoBranch(Condition cond, Hint ignored) { |
|||
ASSERT(cgen()->has_valid_frame()); |
|||
|
|||
if (entry_frame_set_) { |
|||
if (entry_label_.is_bound()) { |
|||
// If we already bound and generated code at the destination then it
|
|||
// is too late to ask for less optimistic type assumptions.
|
|||
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); |
|||
} |
|||
// We have an expected frame to merge to on the backward edge.
|
|||
cgen()->frame()->MergeTo(&entry_frame_, cond); |
|||
} else { |
|||
// Clone the current frame to use as the expected one at the target.
|
|||
set_entry_frame(cgen()->frame()); |
|||
} |
|||
if (entry_label_.is_bound()) { |
|||
// You can't branch backwards to an already bound label unless you admitted
|
|||
// up front that this was a bidirectional jump target. Bidirectional jump
|
|||
// targets will zap their type info when bound in case some later virtual
|
|||
// frame with less precise type info branches to them.
|
|||
ASSERT(direction_ != FORWARD_ONLY); |
|||
} |
|||
__ b(cond, &entry_label_); |
|||
if (cond == al) { |
|||
cgen()->DeleteFrame(); |
|||
} |
|||
} |
|||
|
|||
|
|||
void JumpTarget::Call() { |
|||
// Call is used to push the address of the catch block on the stack as
|
|||
// a return address when compiling try/catch and try/finally. We
|
|||
// fully spill the frame before making the call. The expected frame
|
|||
// at the label (which should be the only one) is the spilled current
|
|||
// frame plus an in-memory return address. The "fall-through" frame
|
|||
// at the return site is the spilled current frame.
|
|||
ASSERT(cgen()->has_valid_frame()); |
|||
// There are no non-frame references across the call.
|
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
ASSERT(!is_linked()); |
|||
|
|||
// Calls are always 'forward' so we use a copy of the current frame (plus
|
|||
// one for a return address) as the expected frame.
|
|||
ASSERT(!entry_frame_set_); |
|||
VirtualFrame target_frame = *cgen()->frame(); |
|||
target_frame.Adjust(1); |
|||
set_entry_frame(&target_frame); |
|||
|
|||
__ bl(&entry_label_); |
|||
} |
|||
|
|||
|
|||
void JumpTarget::DoBind() { |
|||
ASSERT(!is_bound()); |
|||
|
|||
// Live non-frame registers are not allowed at the start of a basic
|
|||
// block.
|
|||
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters()); |
|||
|
|||
if (cgen()->has_valid_frame()) { |
|||
if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo(); |
|||
// If there is a current frame we can use it on the fall through.
|
|||
if (!entry_frame_set_) { |
|||
entry_frame_ = *cgen()->frame(); |
|||
entry_frame_set_ = true; |
|||
} else { |
|||
cgen()->frame()->MergeTo(&entry_frame_); |
|||
// On fall through we may have to merge both ways.
|
|||
if (direction_ != FORWARD_ONLY) { |
|||
// This will not need to adjust the virtual frame entries that are
|
|||
// register allocated since that was done above and they now match.
|
|||
// But it does need to adjust the entry_frame_ of this jump target
|
|||
// to make it potentially less optimistic. Later code can branch back
|
|||
// to this jump target and we need to assert that that code does not
|
|||
// have weaker assumptions about types.
|
|||
entry_frame_.MergeTo(cgen()->frame()); |
|||
} |
|||
} |
|||
} else { |
|||
// If there is no current frame we must have an entry frame which we can
|
|||
// copy.
|
|||
ASSERT(entry_frame_set_); |
|||
RegisterFile empty; |
|||
cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty); |
|||
} |
|||
|
|||
__ bind(&entry_label_); |
|||
} |
|||
|
|||
|
|||
#undef __ |
|||
|
|||
|
|||
} } // namespace v8::internal
|
|||
|
|||
#endif // V8_TARGET_ARCH_ARM
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -1,843 +0,0 @@ |
|||
// Copyright 2009 the V8 project authors. All rights reserved.
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following
|
|||
// disclaimer in the documentation and/or other materials provided
|
|||
// with the distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived
|
|||
// from this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
#include "v8.h" |
|||
|
|||
#if defined(V8_TARGET_ARCH_ARM) |
|||
|
|||
#include "codegen-inl.h" |
|||
#include "register-allocator-inl.h" |
|||
#include "scopes.h" |
|||
#include "virtual-frame-inl.h" |
|||
|
|||
namespace v8 { |
|||
namespace internal { |
|||
|
|||
#define __ ACCESS_MASM(masm()) |
|||
|
|||
void VirtualFrame::PopToR1R0() { |
|||
// Shuffle things around so the top of stack is in r0 and r1.
|
|||
MergeTOSTo(R0_R1_TOS); |
|||
// Pop the two registers off the stack so they are detached from the frame.
|
|||
LowerHeight(2); |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::PopToR1() { |
|||
// Shuffle things around so the top of stack is only in r1.
|
|||
MergeTOSTo(R1_TOS); |
|||
// Pop the register off the stack so it is detached from the frame.
|
|||
LowerHeight(1); |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::PopToR0() { |
|||
// Shuffle things around so the top of stack only in r0.
|
|||
MergeTOSTo(R0_TOS); |
|||
// Pop the register off the stack so it is detached from the frame.
|
|||
LowerHeight(1); |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) { |
|||
if (Equals(expected)) return; |
|||
ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) == |
|||
expected->tos_known_smi_map_); |
|||
ASSERT(expected->IsCompatibleWith(this)); |
|||
MergeTOSTo(expected->top_of_stack_state_, cond); |
|||
ASSERT(register_allocation_map_ == expected->register_allocation_map_); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) { |
|||
if (Equals(expected)) return; |
|||
tos_known_smi_map_ &= expected->tos_known_smi_map_; |
|||
MergeTOSTo(expected->top_of_stack_state_, cond); |
|||
ASSERT(register_allocation_map_ == expected->register_allocation_map_); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::MergeTOSTo( |
|||
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) { |
|||
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b)) |
|||
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) { |
|||
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS): |
|||
break; |
|||
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS): |
|||
__ pop(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS): |
|||
__ pop(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS): |
|||
__ pop(r0, cond); |
|||
__ pop(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): |
|||
__ pop(r1, cond); |
|||
__ pop(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): |
|||
__ push(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_TOS, R0_TOS): |
|||
break; |
|||
case CASE_NUMBER(R0_TOS, R1_TOS): |
|||
__ mov(r1, r0, LeaveCC, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_TOS, R0_R1_TOS): |
|||
__ pop(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_TOS, R1_R0_TOS): |
|||
__ mov(r1, r0, LeaveCC, cond); |
|||
__ pop(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS): |
|||
__ push(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_TOS, R0_TOS): |
|||
__ mov(r0, r1, LeaveCC, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_TOS, R1_TOS): |
|||
break; |
|||
case CASE_NUMBER(R1_TOS, R0_R1_TOS): |
|||
__ mov(r0, r1, LeaveCC, cond); |
|||
__ pop(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_TOS, R1_R0_TOS): |
|||
__ pop(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS): |
|||
__ Push(r1, r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_R1_TOS, R0_TOS): |
|||
__ push(r1, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_R1_TOS, R1_TOS): |
|||
__ push(r1, cond); |
|||
__ mov(r1, r0, LeaveCC, cond); |
|||
break; |
|||
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS): |
|||
break; |
|||
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS): |
|||
__ Swap(r0, r1, ip, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS): |
|||
__ Push(r0, r1, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_R0_TOS, R0_TOS): |
|||
__ push(r0, cond); |
|||
__ mov(r0, r1, LeaveCC, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_R0_TOS, R1_TOS): |
|||
__ push(r0, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS): |
|||
__ Swap(r0, r1, ip, cond); |
|||
break; |
|||
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS): |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
#undef CASE_NUMBER |
|||
} |
|||
// A conditional merge will be followed by a conditional branch and the
|
|||
// fall-through code will have an unchanged virtual frame state. If the
|
|||
// merge is unconditional ('al'ways) then it might be followed by a fall
|
|||
// through. We need to update the virtual frame state to match the code we
|
|||
// are falling into. The final case is an unconditional merge followed by an
|
|||
// unconditional branch, in which case it doesn't matter what we do to the
|
|||
// virtual frame state, because the virtual frame will be invalidated.
|
|||
if (cond == al) { |
|||
top_of_stack_state_ = expected_top_of_stack_state; |
|||
} |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::Enter() { |
|||
Comment cmnt(masm(), "[ Enter JS frame"); |
|||
|
|||
#ifdef DEBUG |
|||
// Verify that r1 contains a JS function. The following code relies
|
|||
// on r2 being available for use.
|
|||
if (FLAG_debug_code) { |
|||
Label map_check, done; |
|||
__ tst(r1, Operand(kSmiTagMask)); |
|||
__ b(ne, &map_check); |
|||
__ stop("VirtualFrame::Enter - r1 is not a function (smi check)."); |
|||
__ bind(&map_check); |
|||
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); |
|||
__ b(eq, &done); |
|||
__ stop("VirtualFrame::Enter - r1 is not a function (map check)."); |
|||
__ bind(&done); |
|||
} |
|||
#endif // DEBUG
|
|||
|
|||
// We are about to push four values to the frame.
|
|||
Adjust(4); |
|||
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
|||
// Adjust FP to point to saved FP.
|
|||
__ add(fp, sp, Operand(2 * kPointerSize)); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::Exit() { |
|||
Comment cmnt(masm(), "[ Exit JS frame"); |
|||
// Record the location of the JS exit code for patching when setting
|
|||
// break point.
|
|||
__ RecordJSReturn(); |
|||
|
|||
// Drop the execution stack down to the frame pointer and restore the caller
|
|||
// frame pointer and return address.
|
|||
__ mov(sp, fp); |
|||
__ ldm(ia_w, sp, fp.bit() | lr.bit()); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::AllocateStackSlots() { |
|||
int count = local_count(); |
|||
if (count > 0) { |
|||
Comment cmnt(masm(), "[ Allocate space for locals"); |
|||
Adjust(count); |
|||
// Initialize stack slots with 'undefined' value.
|
|||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
|||
__ LoadRoot(r2, Heap::kStackLimitRootIndex); |
|||
if (count < kLocalVarBound) { |
|||
// For less locals the unrolled loop is more compact.
|
|||
for (int i = 0; i < count; i++) { |
|||
__ push(ip); |
|||
} |
|||
} else { |
|||
// For more locals a loop in generated code is more compact.
|
|||
Label alloc_locals_loop; |
|||
__ mov(r1, Operand(count)); |
|||
__ bind(&alloc_locals_loop); |
|||
__ push(ip); |
|||
__ sub(r1, r1, Operand(1), SetCC); |
|||
__ b(ne, &alloc_locals_loop); |
|||
} |
|||
} else { |
|||
__ LoadRoot(r2, Heap::kStackLimitRootIndex); |
|||
} |
|||
// Check the stack for overflow or a break request.
|
|||
masm()->cmp(sp, Operand(r2)); |
|||
StackCheckStub stub; |
|||
// Call the stub if lower.
|
|||
masm()->mov(ip, |
|||
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), |
|||
RelocInfo::CODE_TARGET), |
|||
LeaveCC, |
|||
lo); |
|||
masm()->Call(ip, lo); |
|||
} |
|||
|
|||
|
|||
|
|||
void VirtualFrame::PushReceiverSlotAddress() { |
|||
UNIMPLEMENTED(); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::PushTryHandler(HandlerType type) { |
|||
// Grow the expression stack by handler size less one (the return
|
|||
// address in lr is already counted by a call instruction).
|
|||
Adjust(kHandlerSize - 1); |
|||
__ PushTryHandler(IN_JAVASCRIPT, type); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallJSFunction(int arg_count) { |
|||
// InvokeFunction requires function in r1.
|
|||
PopToR1(); |
|||
SpillAll(); |
|||
|
|||
// +1 for receiver.
|
|||
Forget(arg_count + 1); |
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
ParameterCount count(arg_count); |
|||
__ InvokeFunction(r1, count, CALL_FUNCTION); |
|||
// Restore the context.
|
|||
__ ldr(cp, Context()); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { |
|||
SpillAll(); |
|||
Forget(arg_count); |
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
__ CallRuntime(f, arg_count); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) { |
|||
SpillAll(); |
|||
Forget(arg_count); |
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
__ CallRuntime(id, arg_count); |
|||
} |
|||
|
|||
|
|||
#ifdef ENABLE_DEBUGGER_SUPPORT |
|||
void VirtualFrame::DebugBreak() { |
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
__ DebugBreak(); |
|||
} |
|||
#endif |
|||
|
|||
|
|||
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, |
|||
InvokeJSFlags flags, |
|||
int arg_count) { |
|||
Forget(arg_count); |
|||
__ InvokeBuiltin(id, flags); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) { |
|||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); |
|||
PopToR0(); |
|||
SpillAll(); |
|||
__ mov(r2, Operand(name)); |
|||
CallCodeObject(ic, mode, 0); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallStoreIC(Handle<String> name, |
|||
bool is_contextual, |
|||
StrictModeFlag strict_mode) { |
|||
Handle<Code> ic(Builtins::builtin( |
|||
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict |
|||
: Builtins::StoreIC_Initialize)); |
|||
PopToR0(); |
|||
RelocInfo::Mode mode; |
|||
if (is_contextual) { |
|||
SpillAll(); |
|||
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
|||
mode = RelocInfo::CODE_TARGET_CONTEXT; |
|||
} else { |
|||
EmitPop(r1); |
|||
SpillAll(); |
|||
mode = RelocInfo::CODE_TARGET; |
|||
} |
|||
__ mov(r2, Operand(name)); |
|||
CallCodeObject(ic, mode, 0); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallKeyedLoadIC() { |
|||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); |
|||
PopToR1R0(); |
|||
SpillAll(); |
|||
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { |
|||
Handle<Code> ic(Builtins::builtin( |
|||
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict |
|||
: Builtins::KeyedStoreIC_Initialize)); |
|||
PopToR1R0(); |
|||
SpillAll(); |
|||
EmitPop(r2); |
|||
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::CallCodeObject(Handle<Code> code, |
|||
RelocInfo::Mode rmode, |
|||
int dropped_args) { |
|||
switch (code->kind()) { |
|||
case Code::CALL_IC: |
|||
case Code::KEYED_CALL_IC: |
|||
case Code::FUNCTION: |
|||
break; |
|||
case Code::KEYED_LOAD_IC: |
|||
case Code::LOAD_IC: |
|||
case Code::KEYED_STORE_IC: |
|||
case Code::STORE_IC: |
|||
ASSERT(dropped_args == 0); |
|||
break; |
|||
case Code::BUILTIN: |
|||
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall)); |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
break; |
|||
} |
|||
Forget(dropped_args); |
|||
ASSERT(cgen()->HasValidEntryRegisters()); |
|||
__ Call(code, rmode); |
|||
} |
|||
|
|||
|
|||
// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
|
|||
const bool VirtualFrame::kR0InUse[TOS_STATES] = |
|||
{ false, true, false, true, true }; |
|||
const bool VirtualFrame::kR1InUse[TOS_STATES] = |
|||
{ false, false, true, true, true }; |
|||
const int VirtualFrame::kVirtualElements[TOS_STATES] = |
|||
{ 0, 1, 1, 2, 2 }; |
|||
const Register VirtualFrame::kTopRegister[TOS_STATES] = |
|||
{ r0, r0, r1, r1, r0 }; |
|||
const Register VirtualFrame::kBottomRegister[TOS_STATES] = |
|||
{ r0, r0, r1, r0, r1 }; |
|||
const Register VirtualFrame::kAllocatedRegisters[ |
|||
VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 }; |
|||
// Popping is done by the transition implied by kStateAfterPop. Of course if
|
|||
// there were no stack slots allocated to registers then the physical SP must
|
|||
// be adjusted.
|
|||
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] = |
|||
{ NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS }; |
|||
// Pushing is done by the transition implied by kStateAfterPush. Of course if
|
|||
// the maximum number of registers was already allocated to the top of stack
|
|||
// slots then one register must be physically pushed onto the stack.
|
|||
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] = |
|||
{ R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS }; |
|||
|
|||
|
|||
bool VirtualFrame::SpilledScope::is_spilled_ = false; |
|||
|
|||
|
|||
void VirtualFrame::Drop(int count) { |
|||
ASSERT(count >= 0); |
|||
ASSERT(height() >= count); |
|||
// Discard elements from the virtual frame and free any registers.
|
|||
int num_virtual_elements = kVirtualElements[top_of_stack_state_]; |
|||
while (num_virtual_elements > 0) { |
|||
Pop(); |
|||
num_virtual_elements--; |
|||
count--; |
|||
if (count == 0) return; |
|||
} |
|||
if (count == 0) return; |
|||
__ add(sp, sp, Operand(count * kPointerSize)); |
|||
LowerHeight(count); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::Pop() { |
|||
if (top_of_stack_state_ == NO_TOS_REGISTERS) { |
|||
__ add(sp, sp, Operand(kPointerSize)); |
|||
} else { |
|||
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; |
|||
} |
|||
LowerHeight(1); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPop(Register reg) { |
|||
ASSERT(!is_used(RegisterAllocator::ToNumber(reg))); |
|||
if (top_of_stack_state_ == NO_TOS_REGISTERS) { |
|||
__ pop(reg); |
|||
} else { |
|||
__ mov(reg, kTopRegister[top_of_stack_state_]); |
|||
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; |
|||
} |
|||
LowerHeight(1); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::SpillAllButCopyTOSToR0() { |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
__ ldr(r0, MemOperand(sp, 0)); |
|||
break; |
|||
case R0_TOS: |
|||
__ push(r0); |
|||
break; |
|||
case R1_TOS: |
|||
__ push(r1); |
|||
__ mov(r0, r1); |
|||
break; |
|||
case R0_R1_TOS: |
|||
__ Push(r1, r0); |
|||
break; |
|||
case R1_R0_TOS: |
|||
__ Push(r0, r1); |
|||
__ mov(r0, r1); |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::SpillAllButCopyTOSToR1() { |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
__ ldr(r1, MemOperand(sp, 0)); |
|||
break; |
|||
case R0_TOS: |
|||
__ push(r0); |
|||
__ mov(r1, r0); |
|||
break; |
|||
case R1_TOS: |
|||
__ push(r1); |
|||
break; |
|||
case R0_R1_TOS: |
|||
__ Push(r1, r0); |
|||
__ mov(r1, r0); |
|||
break; |
|||
case R1_R0_TOS: |
|||
__ Push(r0, r1); |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::SpillAllButCopyTOSToR1R0() { |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
__ ldr(r1, MemOperand(sp, 0)); |
|||
__ ldr(r0, MemOperand(sp, kPointerSize)); |
|||
break; |
|||
case R0_TOS: |
|||
__ push(r0); |
|||
__ mov(r1, r0); |
|||
__ ldr(r0, MemOperand(sp, kPointerSize)); |
|||
break; |
|||
case R1_TOS: |
|||
__ push(r1); |
|||
__ ldr(r0, MemOperand(sp, kPointerSize)); |
|||
break; |
|||
case R0_R1_TOS: |
|||
__ Push(r1, r0); |
|||
__ Swap(r0, r1, ip); |
|||
break; |
|||
case R1_R0_TOS: |
|||
__ Push(r0, r1); |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
} |
|||
|
|||
|
|||
Register VirtualFrame::Peek() { |
|||
AssertIsNotSpilled(); |
|||
if (top_of_stack_state_ == NO_TOS_REGISTERS) { |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
Register answer = kTopRegister[top_of_stack_state_]; |
|||
__ pop(answer); |
|||
return answer; |
|||
} else { |
|||
return kTopRegister[top_of_stack_state_]; |
|||
} |
|||
} |
|||
|
|||
|
|||
Register VirtualFrame::Peek2() { |
|||
AssertIsNotSpilled(); |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
case R0_TOS: |
|||
case R0_R1_TOS: |
|||
MergeTOSTo(R0_R1_TOS); |
|||
return r1; |
|||
case R1_TOS: |
|||
case R1_R0_TOS: |
|||
MergeTOSTo(R1_R0_TOS); |
|||
return r0; |
|||
default: |
|||
UNREACHABLE(); |
|||
return no_reg; |
|||
} |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::Dup() { |
|||
if (SpilledScope::is_spilled()) { |
|||
__ ldr(ip, MemOperand(sp, 0)); |
|||
__ push(ip); |
|||
} else { |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
__ ldr(r0, MemOperand(sp, 0)); |
|||
top_of_stack_state_ = R0_TOS; |
|||
break; |
|||
case R0_TOS: |
|||
__ mov(r1, r0); |
|||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R1_TOS: |
|||
__ mov(r0, r1); |
|||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R0_R1_TOS: |
|||
__ push(r1); |
|||
__ mov(r1, r0); |
|||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R1_R0_TOS: |
|||
__ push(r0); |
|||
__ mov(r0, r1); |
|||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
} |
|||
RaiseHeight(1, tos_known_smi_map_ & 1); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::Dup2() { |
|||
if (SpilledScope::is_spilled()) { |
|||
__ ldr(ip, MemOperand(sp, kPointerSize)); |
|||
__ push(ip); |
|||
__ ldr(ip, MemOperand(sp, kPointerSize)); |
|||
__ push(ip); |
|||
} else { |
|||
switch (top_of_stack_state_) { |
|||
case NO_TOS_REGISTERS: |
|||
__ ldr(r0, MemOperand(sp, 0)); |
|||
__ ldr(r1, MemOperand(sp, kPointerSize)); |
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R0_TOS: |
|||
__ push(r0); |
|||
__ ldr(r1, MemOperand(sp, kPointerSize)); |
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R1_TOS: |
|||
__ push(r1); |
|||
__ ldr(r0, MemOperand(sp, kPointerSize)); |
|||
top_of_stack_state_ = R1_R0_TOS; |
|||
break; |
|||
case R0_R1_TOS: |
|||
__ Push(r1, r0); |
|||
top_of_stack_state_ = R0_R1_TOS; |
|||
break; |
|||
case R1_R0_TOS: |
|||
__ Push(r0, r1); |
|||
top_of_stack_state_ = R1_R0_TOS; |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
} |
|||
RaiseHeight(2, tos_known_smi_map_ & 3); |
|||
} |
|||
|
|||
|
|||
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { |
|||
ASSERT(but_not_to_this_one.is(r0) || |
|||
but_not_to_this_one.is(r1) || |
|||
but_not_to_this_one.is(no_reg)); |
|||
LowerHeight(1); |
|||
if (top_of_stack_state_ == NO_TOS_REGISTERS) { |
|||
if (but_not_to_this_one.is(r0)) { |
|||
__ pop(r1); |
|||
return r1; |
|||
} else { |
|||
__ pop(r0); |
|||
return r0; |
|||
} |
|||
} else { |
|||
Register answer = kTopRegister[top_of_stack_state_]; |
|||
ASSERT(!answer.is(but_not_to_this_one)); |
|||
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; |
|||
return answer; |
|||
} |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EnsureOneFreeTOSRegister() { |
|||
if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) { |
|||
__ push(kBottomRegister[top_of_stack_state_]); |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; |
|||
} |
|||
ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPush(Register reg, TypeInfo info) { |
|||
RaiseHeight(1, info.IsSmi() ? 1 : 0); |
|||
if (reg.is(cp)) { |
|||
// If we are pushing cp then we are about to make a call and things have to
|
|||
// be pushed to the physical stack. There's nothing to be gained my moving
|
|||
// to a TOS register and then pushing that, we might as well push to the
|
|||
// physical stack immediately.
|
|||
MergeTOSTo(NO_TOS_REGISTERS); |
|||
__ push(reg); |
|||
return; |
|||
} |
|||
if (SpilledScope::is_spilled()) { |
|||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); |
|||
__ push(reg); |
|||
return; |
|||
} |
|||
if (top_of_stack_state_ == NO_TOS_REGISTERS) { |
|||
if (reg.is(r0)) { |
|||
top_of_stack_state_ = R0_TOS; |
|||
return; |
|||
} |
|||
if (reg.is(r1)) { |
|||
top_of_stack_state_ = R1_TOS; |
|||
return; |
|||
} |
|||
} |
|||
EnsureOneFreeTOSRegister(); |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
Register dest = kTopRegister[top_of_stack_state_]; |
|||
__ Move(dest, reg); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::SetElementAt(Register reg, int this_far_down) { |
|||
if (this_far_down < kTOSKnownSmiMapSize) { |
|||
tos_known_smi_map_ &= ~(1 << this_far_down); |
|||
} |
|||
if (this_far_down == 0) { |
|||
Pop(); |
|||
Register dest = GetTOSRegister(); |
|||
if (dest.is(reg)) { |
|||
// We already popped one item off the top of the stack. If the only
|
|||
// free register is the one we were asked to push then we have been
|
|||
// asked to push a register that was already in use, which cannot
|
|||
// happen. It therefore folows that there are two free TOS registers:
|
|||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); |
|||
dest = dest.is(r0) ? r1 : r0; |
|||
} |
|||
__ mov(dest, reg); |
|||
EmitPush(dest); |
|||
} else if (this_far_down == 1) { |
|||
int virtual_elements = kVirtualElements[top_of_stack_state_]; |
|||
if (virtual_elements < 2) { |
|||
__ str(reg, ElementAt(this_far_down)); |
|||
} else { |
|||
ASSERT(virtual_elements == 2); |
|||
ASSERT(!reg.is(r0)); |
|||
ASSERT(!reg.is(r1)); |
|||
Register dest = kBottomRegister[top_of_stack_state_]; |
|||
__ mov(dest, reg); |
|||
} |
|||
} else { |
|||
ASSERT(this_far_down >= 2); |
|||
ASSERT(kVirtualElements[top_of_stack_state_] <= 2); |
|||
__ str(reg, ElementAt(this_far_down)); |
|||
} |
|||
} |
|||
|
|||
|
|||
Register VirtualFrame::GetTOSRegister() { |
|||
if (SpilledScope::is_spilled()) return r0; |
|||
|
|||
EnsureOneFreeTOSRegister(); |
|||
return kTopRegister[kStateAfterPush[top_of_stack_state_]]; |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPush(Operand operand, TypeInfo info) { |
|||
RaiseHeight(1, info.IsSmi() ? 1 : 0); |
|||
if (SpilledScope::is_spilled()) { |
|||
__ mov(r0, operand); |
|||
__ push(r0); |
|||
return; |
|||
} |
|||
EnsureOneFreeTOSRegister(); |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
__ mov(kTopRegister[top_of_stack_state_], operand); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) { |
|||
RaiseHeight(1, info.IsSmi() ? 1 : 0); |
|||
if (SpilledScope::is_spilled()) { |
|||
__ ldr(r0, operand); |
|||
__ push(r0); |
|||
return; |
|||
} |
|||
EnsureOneFreeTOSRegister(); |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
__ ldr(kTopRegister[top_of_stack_state_], operand); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) { |
|||
RaiseHeight(1, 0); |
|||
if (SpilledScope::is_spilled()) { |
|||
__ LoadRoot(r0, index); |
|||
__ push(r0); |
|||
return; |
|||
} |
|||
EnsureOneFreeTOSRegister(); |
|||
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; |
|||
__ LoadRoot(kTopRegister[top_of_stack_state_], index); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::EmitPushMultiple(int count, int src_regs) { |
|||
ASSERT(SpilledScope::is_spilled()); |
|||
Adjust(count); |
|||
__ stm(db_w, sp, src_regs); |
|||
} |
|||
|
|||
|
|||
void VirtualFrame::SpillAll() { |
|||
switch (top_of_stack_state_) { |
|||
case R1_R0_TOS: |
|||
masm()->push(r0); |
|||
// Fall through.
|
|||
case R1_TOS: |
|||
masm()->push(r1); |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
break; |
|||
case R0_R1_TOS: |
|||
masm()->push(r1); |
|||
// Fall through.
|
|||
case R0_TOS: |
|||
masm()->push(r0); |
|||
top_of_stack_state_ = NO_TOS_REGISTERS; |
|||
// Fall through.
|
|||
case NO_TOS_REGISTERS: |
|||
break; |
|||
default: |
|||
UNREACHABLE(); |
|||
break; |
|||
} |
|||
ASSERT(register_allocation_map_ == 0); // Not yet implemented.
|
|||
} |
|||
|
|||
#undef __ |
|||
|
|||
} } // namespace v8::internal
|
|||
|
|||
#endif // V8_TARGET_ARCH_ARM
|
@ -1,520 +0,0 @@ |
|||
// Copyright 2009 the V8 project authors. All rights reserved.
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following
|
|||
// disclaimer in the documentation and/or other materials provided
|
|||
// with the distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived
|
|||
// from this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_ |
|||
#define V8_ARM_VIRTUAL_FRAME_ARM_H_ |
|||
|
|||
#include "register-allocator.h" |
|||
|
|||
namespace v8 { |
|||
namespace internal { |
|||
|
|||
// This dummy class is only used to create invalid virtual frames.
|
|||
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer; |
|||
|
|||
|
|||
// -------------------------------------------------------------------------
|
|||
// Virtual frames
|
|||
//
|
|||
// The virtual frame is an abstraction of the physical stack frame. It
|
|||
// encapsulates the parameters, frame-allocated locals, and the expression
|
|||
// stack. It supports push/pop operations on the expression stack, as well
|
|||
// as random access to the expression stack elements, locals, and
|
|||
// parameters.
|
|||
|
|||
class VirtualFrame : public ZoneObject { |
|||
public: |
|||
class RegisterAllocationScope; |
|||
// A utility class to introduce a scope where the virtual frame is
|
|||
// expected to remain spilled. The constructor spills the code
|
|||
// generator's current frame, and keeps it spilled.
|
|||
class SpilledScope BASE_EMBEDDED { |
|||
public: |
|||
explicit SpilledScope(VirtualFrame* frame) |
|||
: old_is_spilled_(is_spilled_) { |
|||
if (frame != NULL) { |
|||
if (!is_spilled_) { |
|||
frame->SpillAll(); |
|||
} else { |
|||
frame->AssertIsSpilled(); |
|||
} |
|||
} |
|||
is_spilled_ = true; |
|||
} |
|||
~SpilledScope() { |
|||
is_spilled_ = old_is_spilled_; |
|||
} |
|||
static bool is_spilled() { return is_spilled_; } |
|||
|
|||
private: |
|||
static bool is_spilled_; |
|||
int old_is_spilled_; |
|||
|
|||
SpilledScope() { } |
|||
|
|||
friend class RegisterAllocationScope; |
|||
}; |
|||
|
|||
class RegisterAllocationScope BASE_EMBEDDED { |
|||
public: |
|||
// A utility class to introduce a scope where the virtual frame
|
|||
// is not spilled, ie. where register allocation occurs. Eventually
|
|||
// when RegisterAllocationScope is ubiquitous it can be removed
|
|||
// along with the (by then unused) SpilledScope class.
|
|||
inline explicit RegisterAllocationScope(CodeGenerator* cgen); |
|||
inline ~RegisterAllocationScope(); |
|||
|
|||
private: |
|||
CodeGenerator* cgen_; |
|||
bool old_is_spilled_; |
|||
|
|||
RegisterAllocationScope() { } |
|||
}; |
|||
|
|||
// An illegal index into the virtual frame.
|
|||
static const int kIllegalIndex = -1; |
|||
|
|||
// Construct an initial virtual frame on entry to a JS function.
|
|||
inline VirtualFrame(); |
|||
|
|||
// Construct an invalid virtual frame, used by JumpTargets.
|
|||
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy); |
|||
|
|||
// Construct a virtual frame as a clone of an existing one.
|
|||
explicit inline VirtualFrame(VirtualFrame* original); |
|||
|
|||
inline CodeGenerator* cgen() const; |
|||
inline MacroAssembler* masm(); |
|||
|
|||
// The number of elements on the virtual frame.
|
|||
int element_count() const { return element_count_; } |
|||
|
|||
// The height of the virtual expression stack.
|
|||
inline int height() const; |
|||
|
|||
bool is_used(int num) { |
|||
switch (num) { |
|||
case 0: { // r0.
|
|||
return kR0InUse[top_of_stack_state_]; |
|||
} |
|||
case 1: { // r1.
|
|||
return kR1InUse[top_of_stack_state_]; |
|||
} |
|||
case 2: |
|||
case 3: |
|||
case 4: |
|||
case 5: |
|||
case 6: { // r2 to r6.
|
|||
ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters); |
|||
ASSERT(num >= kFirstAllocatedRegister); |
|||
if ((register_allocation_map_ & |
|||
(1 << (num - kFirstAllocatedRegister))) == 0) { |
|||
return false; |
|||
} else { |
|||
return true; |
|||
} |
|||
} |
|||
default: { |
|||
ASSERT(num < kFirstAllocatedRegister || |
|||
num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters); |
|||
return false; |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Add extra in-memory elements to the top of the frame to match an actual
|
|||
// frame (eg, the frame after an exception handler is pushed). No code is
|
|||
// emitted.
|
|||
void Adjust(int count); |
|||
|
|||
// Forget elements from the top of the frame to match an actual frame (eg,
|
|||
// the frame after a runtime call). No code is emitted except to bring the
|
|||
// frame to a spilled state.
|
|||
void Forget(int count); |
|||
|
|||
// Spill all values from the frame to memory.
|
|||
void SpillAll(); |
|||
|
|||
void AssertIsSpilled() const { |
|||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); |
|||
ASSERT(register_allocation_map_ == 0); |
|||
} |
|||
|
|||
void AssertIsNotSpilled() { |
|||
ASSERT(!SpilledScope::is_spilled()); |
|||
} |
|||
|
|||
// Spill all occurrences of a specific register from the frame.
|
|||
void Spill(Register reg) { |
|||
UNIMPLEMENTED(); |
|||
} |
|||
|
|||
// Spill all occurrences of an arbitrary register if possible. Return the
|
|||
// register spilled or no_reg if it was not possible to free any register
|
|||
// (ie, they all have frame-external references). Unimplemented.
|
|||
Register SpillAnyRegister(); |
|||
|
|||
// Make this virtual frame have a state identical to an expected virtual
|
|||
// frame. As a side effect, code may be emitted to make this frame match
|
|||
// the expected one.
|
|||
void MergeTo(VirtualFrame* expected, Condition cond = al); |
|||
void MergeTo(const VirtualFrame* expected, Condition cond = al); |
|||
|
|||
// Checks whether this frame can be branched to by the other frame.
|
|||
bool IsCompatibleWith(const VirtualFrame* other) const { |
|||
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0; |
|||
} |
|||
|
|||
inline void ForgetTypeInfo() { |
|||
tos_known_smi_map_ = 0; |
|||
} |
|||
|
|||
// Detach a frame from its code generator, perhaps temporarily. This
|
|||
// tells the register allocator that it is free to use frame-internal
|
|||
// registers. Used when the code generator's frame is switched from this
|
|||
// one to NULL by an unconditional jump.
|
|||
void DetachFromCodeGenerator() { |
|||
} |
|||
|
|||
// (Re)attach a frame to its code generator. This informs the register
|
|||
// allocator that the frame-internal register references are active again.
|
|||
// Used when a code generator's frame is switched from NULL to this one by
|
|||
// binding a label.
|
|||
void AttachToCodeGenerator() { |
|||
} |
|||
|
|||
// Emit code for the physical JS entry and exit frame sequences. After
|
|||
// calling Enter, the virtual frame is ready for use; and after calling
|
|||
// Exit it should not be used. Note that Enter does not allocate space in
|
|||
// the physical frame for storing frame-allocated locals.
|
|||
void Enter(); |
|||
void Exit(); |
|||
|
|||
// Prepare for returning from the frame by elements in the virtual frame. This
|
|||
// avoids generating unnecessary merge code when jumping to the
|
|||
// shared return site. No spill code emitted. Value to return should be in r0.
|
|||
inline void PrepareForReturn(); |
|||
|
|||
// Number of local variables after when we use a loop for allocating.
|
|||
static const int kLocalVarBound = 5; |
|||
|
|||
// Allocate and initialize the frame-allocated locals.
|
|||
void AllocateStackSlots(); |
|||
|
|||
// The current top of the expression stack as an assembly operand.
|
|||
MemOperand Top() { |
|||
AssertIsSpilled(); |
|||
return MemOperand(sp, 0); |
|||
} |
|||
|
|||
// An element of the expression stack as an assembly operand.
|
|||
MemOperand ElementAt(int index) { |
|||
int adjusted_index = index - kVirtualElements[top_of_stack_state_]; |
|||
ASSERT(adjusted_index >= 0); |
|||
return MemOperand(sp, adjusted_index * kPointerSize); |
|||
} |
|||
|
|||
bool KnownSmiAt(int index) { |
|||
if (index >= kTOSKnownSmiMapSize) return false; |
|||
return (tos_known_smi_map_ & (1 << index)) != 0; |
|||
} |
|||
|
|||
// A frame-allocated local as an assembly operand.
|
|||
inline MemOperand LocalAt(int index); |
|||
|
|||
// Push the address of the receiver slot on the frame.
|
|||
void PushReceiverSlotAddress(); |
|||
|
|||
// The function frame slot.
|
|||
MemOperand Function() { return MemOperand(fp, kFunctionOffset); } |
|||
|
|||
// The context frame slot.
|
|||
MemOperand Context() { return MemOperand(fp, kContextOffset); } |
|||
|
|||
// A parameter as an assembly operand.
|
|||
inline MemOperand ParameterAt(int index); |
|||
|
|||
// The receiver frame slot.
|
|||
inline MemOperand Receiver(); |
|||
|
|||
// Push a try-catch or try-finally handler on top of the virtual frame.
|
|||
void PushTryHandler(HandlerType type); |
|||
|
|||
// Call stub given the number of arguments it expects on (and
|
|||
// removes from) the stack.
|
|||
inline void CallStub(CodeStub* stub, int arg_count); |
|||
|
|||
// Call JS function from top of the stack with arguments
|
|||
// taken from the stack.
|
|||
void CallJSFunction(int arg_count); |
|||
|
|||
// Call runtime given the number of arguments expected on (and
|
|||
// removed from) the stack.
|
|||
void CallRuntime(Runtime::Function* f, int arg_count); |
|||
void CallRuntime(Runtime::FunctionId id, int arg_count); |
|||
|
|||
#ifdef ENABLE_DEBUGGER_SUPPORT |
|||
void DebugBreak(); |
|||
#endif |
|||
|
|||
// Invoke builtin given the number of arguments it expects on (and
|
|||
// removes from) the stack.
|
|||
void InvokeBuiltin(Builtins::JavaScript id, |
|||
InvokeJSFlags flag, |
|||
int arg_count); |
|||
|
|||
// Call load IC. Receiver is on the stack and is consumed. Result is returned
|
|||
// in r0.
|
|||
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode); |
|||
|
|||
// Call store IC. If the load is contextual, value is found on top of the
|
|||
// frame. If not, value and receiver are on the frame. Both are consumed.
|
|||
// Result is returned in r0.
|
|||
void CallStoreIC(Handle<String> name, bool is_contextual, |
|||
StrictModeFlag strict_mode); |
|||
|
|||
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
|
|||
// Result is returned in r0.
|
|||
void CallKeyedLoadIC(); |
|||
|
|||
// Call keyed store IC. Value, key and receiver are on the stack. All three
|
|||
// are consumed. Result is returned in r0.
|
|||
void CallKeyedStoreIC(StrictModeFlag strict_mode); |
|||
|
|||
// Call into an IC stub given the number of arguments it removes
|
|||
// from the stack. Register arguments to the IC stub are implicit,
|
|||
// and depend on the type of IC stub.
|
|||
void CallCodeObject(Handle<Code> ic, |
|||
RelocInfo::Mode rmode, |
|||
int dropped_args); |
|||
|
|||
// Drop a number of elements from the top of the expression stack. May
|
|||
// emit code to affect the physical frame. Does not clobber any registers
|
|||
// excepting possibly the stack pointer.
|
|||
void Drop(int count); |
|||
|
|||
// Drop one element.
|
|||
void Drop() { Drop(1); } |
|||
|
|||
// Pop an element from the top of the expression stack. Discards
|
|||
// the result.
|
|||
void Pop(); |
|||
|
|||
// Pop an element from the top of the expression stack. The register
|
|||
// will be one normally used for the top of stack register allocation
|
|||
// so you can't hold on to it if you push on the stack.
|
|||
Register PopToRegister(Register but_not_to_this_one = no_reg); |
|||
|
|||
// Look at the top of the stack. The register returned is aliased and
|
|||
// must be copied to a scratch register before modification.
|
|||
Register Peek(); |
|||
|
|||
// Look at the value beneath the top of the stack. The register returned is
|
|||
// aliased and must be copied to a scratch register before modification.
|
|||
Register Peek2(); |
|||
|
|||
// Duplicate the top of stack.
|
|||
void Dup(); |
|||
|
|||
// Duplicate the two elements on top of stack.
|
|||
void Dup2(); |
|||
|
|||
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
|
|||
void SpillAllButCopyTOSToR0(); |
|||
|
|||
// Flushes all registers, but it puts a copy of the top-of-stack in r1.
|
|||
void SpillAllButCopyTOSToR1(); |
|||
|
|||
// Flushes all registers, but it puts a copy of the top-of-stack in r1
|
|||
// and the next value on the stack in r0.
|
|||
void SpillAllButCopyTOSToR1R0(); |
|||
|
|||
// Pop and save an element from the top of the expression stack and
|
|||
// emit a corresponding pop instruction.
|
|||
void EmitPop(Register reg); |
|||
|
|||
// Takes the top two elements and puts them in r0 (top element) and r1
|
|||
// (second element).
|
|||
void PopToR1R0(); |
|||
|
|||
// Takes the top element and puts it in r1.
|
|||
void PopToR1(); |
|||
|
|||
// Takes the top element and puts it in r0.
|
|||
void PopToR0(); |
|||
|
|||
// Push an element on top of the expression stack and emit a
|
|||
// corresponding push instruction.
|
|||
void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown()); |
|||
void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown()); |
|||
void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown()); |
|||
void EmitPushRoot(Heap::RootListIndex index); |
|||
|
|||
// Overwrite the nth thing on the stack. If the nth position is in a
|
|||
// register then this turns into a mov, otherwise an str. Afterwards
|
|||
// you can still use the register even if it is a register that can be
|
|||
// used for TOS (r0 or r1).
|
|||
void SetElementAt(Register reg, int this_far_down); |
|||
|
|||
// Get a register which is free and which must be immediately used to
|
|||
// push on the top of the stack.
|
|||
Register GetTOSRegister(); |
|||
|
|||
// Push multiple registers on the stack and the virtual frame
|
|||
// Register are selected by setting bit in src_regs and
|
|||
// are pushed in decreasing order: r15 .. r0.
|
|||
void EmitPushMultiple(int count, int src_regs); |
|||
|
|||
static Register scratch0() { return r7; } |
|||
static Register scratch1() { return r9; } |
|||
|
|||
private: |
|||
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; |
|||
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset; |
|||
static const int kContextOffset = StandardFrameConstants::kContextOffset; |
|||
|
|||
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; |
|||
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
|
|||
|
|||
// 5 states for the top of stack, which can be in memory or in r0 and r1.
|
|||
enum TopOfStack { |
|||
NO_TOS_REGISTERS, |
|||
R0_TOS, |
|||
R1_TOS, |
|||
R1_R0_TOS, |
|||
R0_R1_TOS, |
|||
TOS_STATES |
|||
}; |
|||
|
|||
static const int kMaxTOSRegisters = 2; |
|||
|
|||
static const bool kR0InUse[TOS_STATES]; |
|||
static const bool kR1InUse[TOS_STATES]; |
|||
static const int kVirtualElements[TOS_STATES]; |
|||
static const TopOfStack kStateAfterPop[TOS_STATES]; |
|||
static const TopOfStack kStateAfterPush[TOS_STATES]; |
|||
static const Register kTopRegister[TOS_STATES]; |
|||
static const Register kBottomRegister[TOS_STATES]; |
|||
|
|||
// We allocate up to 5 locals in registers.
|
|||
static const int kNumberOfAllocatedRegisters = 5; |
|||
// r2 to r6 are allocated to locals.
|
|||
static const int kFirstAllocatedRegister = 2; |
|||
|
|||
static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters]; |
|||
|
|||
static Register AllocatedRegister(int r) { |
|||
ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters); |
|||
return kAllocatedRegisters[r]; |
|||
} |
|||
|
|||
// The number of elements on the stack frame.
|
|||
int element_count_; |
|||
TopOfStack top_of_stack_state_:3; |
|||
int register_allocation_map_:kNumberOfAllocatedRegisters; |
|||
static const int kTOSKnownSmiMapSize = 4; |
|||
unsigned tos_known_smi_map_:kTOSKnownSmiMapSize; |
|||
|
|||
// The index of the element that is at the processor's stack pointer
|
|||
// (the sp register). For now since everything is in memory it is given
|
|||
// by the number of elements on the not-very-virtual stack frame.
|
|||
int stack_pointer() { return element_count_ - 1; } |
|||
|
|||
// The number of frame-allocated locals and parameters respectively.
|
|||
inline int parameter_count() const; |
|||
inline int local_count() const; |
|||
|
|||
// The index of the element that is at the processor's frame pointer
|
|||
// (the fp register). The parameters, receiver, function, and context
|
|||
// are below the frame pointer.
|
|||
inline int frame_pointer() const; |
|||
|
|||
// The index of the first parameter. The receiver lies below the first
|
|||
// parameter.
|
|||
int param0_index() { return 1; } |
|||
|
|||
// The index of the context slot in the frame. It is immediately
|
|||
// below the frame pointer.
|
|||
inline int context_index(); |
|||
|
|||
// The index of the function slot in the frame. It is below the frame
|
|||
// pointer and context slot.
|
|||
inline int function_index(); |
|||
|
|||
// The index of the first local. Between the frame pointer and the
|
|||
// locals lies the return address.
|
|||
inline int local0_index() const; |
|||
|
|||
// The index of the base of the expression stack.
|
|||
inline int expression_base_index() const; |
|||
|
|||
// Convert a frame index into a frame pointer relative offset into the
|
|||
// actual stack.
|
|||
inline int fp_relative(int index); |
|||
|
|||
// Spill all elements in registers. Spill the top spilled_args elements
|
|||
// on the frame. Sync all other frame elements.
|
|||
// Then drop dropped_args elements from the virtual frame, to match
|
|||
// the effect of an upcoming call that will drop them from the stack.
|
|||
void PrepareForCall(int spilled_args, int dropped_args); |
|||
|
|||
// If all top-of-stack registers are in use then the lowest one is pushed
|
|||
// onto the physical stack and made free.
|
|||
void EnsureOneFreeTOSRegister(); |
|||
|
|||
// Emit instructions to get the top of stack state from where we are to where
|
|||
// we want to be.
|
|||
void MergeTOSTo(TopOfStack expected_state, Condition cond = al); |
|||
|
|||
inline bool Equals(const VirtualFrame* other); |
|||
|
|||
inline void LowerHeight(int count) { |
|||
element_count_ -= count; |
|||
if (count >= kTOSKnownSmiMapSize) { |
|||
tos_known_smi_map_ = 0; |
|||
} else { |
|||
tos_known_smi_map_ >>= count; |
|||
} |
|||
} |
|||
|
|||
inline void RaiseHeight(int count, unsigned known_smi_map = 0) { |
|||
ASSERT(count >= 32 || known_smi_map < (1u << count)); |
|||
element_count_ += count; |
|||
if (count >= kTOSKnownSmiMapSize) { |
|||
tos_known_smi_map_ = known_smi_map; |
|||
} else { |
|||
tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map); |
|||
} |
|||
} |
|||
|
|||
friend class JumpTarget; |
|||
}; |
|||
|
|||
|
|||
} } // namespace v8::internal
|
|||
|
|||
#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_
|
File diff suppressed because it is too large
@ -0,0 +1,169 @@ |
|||
// Copyright 2010 the V8 project authors. All rights reserved.
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following
|
|||
// disclaimer in the documentation and/or other materials provided
|
|||
// with the distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived
|
|||
// from this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// This file is an internal atomic implementation, use atomicops.h instead.
|
|||
|
|||
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
|||
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
|||
|
|||
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory") |
|||
|
|||
namespace v8 { |
|||
namespace internal { |
|||
|
|||
// Atomically execute:
|
|||
// result = *ptr;
|
|||
// if (*ptr == old_value)
|
|||
// *ptr = new_value;
|
|||
// return result;
|
|||
//
|
|||
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
|
|||
// Always return the old value of "*ptr"
|
|||
//
|
|||
// This routine implies no memory barriers.
|
|||
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
|||
Atomic32 old_value, |
|||
Atomic32 new_value) { |
|||
Atomic32 prev; |
|||
__asm__ __volatile__("1:\n" |
|||
"ll %0, %1\n" // prev = *ptr
|
|||
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
|
|||
"nop\n" // delay slot nop
|
|||
"sc %2, %1\n" // *ptr = new_value (with atomic check)
|
|||
"beqz %2, 1b\n" // start again on atomic error
|
|||
"nop\n" // delay slot nop
|
|||
"2:\n" |
|||
: "=&r" (prev), "=m" (*ptr), "+&r" (new_value) |
|||
: "Ir" (old_value), "r" (new_value), "m" (*ptr) |
|||
: "memory"); |
|||
return prev; |
|||
} |
|||
|
|||
// Atomically store new_value into *ptr, returning the previous value held in
|
|||
// *ptr. This routine implies no memory barriers.
|
|||
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
|||
Atomic32 new_value) { |
|||
Atomic32 temp, old; |
|||
__asm__ __volatile__("1:\n" |
|||
"ll %1, %2\n" // old = *ptr
|
|||
"move %0, %3\n" // temp = new_value
|
|||
"sc %0, %2\n" // *ptr = temp (with atomic check)
|
|||
"beqz %0, 1b\n" // start again on atomic error
|
|||
"nop\n" // delay slot nop
|
|||
: "=&r" (temp), "=&r" (old), "=m" (*ptr) |
|||
: "r" (new_value), "m" (*ptr) |
|||
: "memory"); |
|||
|
|||
return old; |
|||
} |
|||
|
|||
// Atomically increment *ptr by "increment". Returns the new value of
|
|||
// *ptr with the increment applied. This routine implies no memory barriers.
|
|||
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
|||
Atomic32 increment) { |
|||
Atomic32 temp, temp2; |
|||
|
|||
__asm__ __volatile__("1:\n" |
|||
"ll %0, %2\n" // temp = *ptr
|
|||
"addu %0, %3\n" // temp = temp + increment
|
|||
"move %1, %0\n" // temp2 = temp
|
|||
"sc %0, %2\n" // *ptr = temp (with atomic check)
|
|||
"beqz %0, 1b\n" // start again on atomic error
|
|||
"nop\n" // delay slot nop
|
|||
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
|||
: "Ir" (increment), "m" (*ptr) |
|||
: "memory"); |
|||
// temp2 now holds the final value.
|
|||
return temp2; |
|||
} |
|||
|
|||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
|||
Atomic32 increment) { |
|||
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
|||
ATOMICOPS_COMPILER_BARRIER(); |
|||
return res; |
|||
} |
|||
|
|||
// "Acquire" operations
|
|||
// ensure that no later memory access can be reordered ahead of the operation.
|
|||
// "Release" operations ensure that no previous memory access can be reordered
|
|||
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
|||
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
|||
// access.
|
|||
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
|||
Atomic32 old_value, |
|||
Atomic32 new_value) { |
|||
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|||
ATOMICOPS_COMPILER_BARRIER(); |
|||
return x; |
|||
} |
|||
|
|||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
|||
Atomic32 old_value, |
|||
Atomic32 new_value) { |
|||
ATOMICOPS_COMPILER_BARRIER(); |
|||
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|||
} |
|||
|
|||
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
|||
*ptr = value; |
|||
} |
|||
|
|||
inline void MemoryBarrier() { |
|||
ATOMICOPS_COMPILER_BARRIER(); |
|||
} |
|||
|
|||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
|||
*ptr = value; |
|||
MemoryBarrier(); |
|||
} |
|||
|
|||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
|||
MemoryBarrier(); |
|||
*ptr = value; |
|||
} |
|||
|
|||
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
|||
return *ptr; |
|||
} |
|||
|
|||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
|||
Atomic32 value = *ptr; |
|||
MemoryBarrier(); |
|||
return value; |
|||
} |
|||
|
|||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
|||
MemoryBarrier(); |
|||
return *ptr; |
|||
} |
|||
|
|||
} } // namespace v8::internal
|
|||
|
|||
#undef ATOMICOPS_COMPILER_BARRIER |
|||
|
|||
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue