mirror of https://github.com/lukechilds/node.git
Fedor Indutny
11 years ago
1130 changed files with 136550 additions and 54656 deletions
@ -0,0 +1,4 @@ |
|||||
|
# Defines the Google C++ style for automatic reformatting. |
||||
|
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html |
||||
|
BasedOnStyle: Google |
||||
|
MaxEmptyLinesToKeep: 2 |
@ -0,0 +1,41 @@ |
|||||
|
# Copyright 2013 the V8 project authors. All rights reserved. |
||||
|
# Redistribution and use in source and binary forms, with or without |
||||
|
# modification, are permitted provided that the following conditions are |
||||
|
# met: |
||||
|
# |
||||
|
# * Redistributions of source code must retain the above copyright |
||||
|
# notice, this list of conditions and the following disclaimer. |
||||
|
# * Redistributions in binary form must reproduce the above |
||||
|
# copyright notice, this list of conditions and the following |
||||
|
# disclaimer in the documentation and/or other materials provided |
||||
|
# with the distribution. |
||||
|
# * Neither the name of Google Inc. nor the names of its |
||||
|
# contributors may be used to endorse or promote products derived |
||||
|
# from this software without specific prior written permission. |
||||
|
# |
||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
|
|
||||
|
# This file is (possibly, depending on python version) imported by |
||||
|
# gyp_v8 when GYP_PARALLEL=1 and it creates sub-processes through the |
||||
|
# multiprocessing library. |
||||
|
|
||||
|
# Importing in Python 2.6 (fixed in 2.7) on Windows doesn't search for imports |
||||
|
# that don't end in .py (and aren't directories with an __init__.py). This |
||||
|
# wrapper makes "import gyp_v8" work with those old versions and makes it |
||||
|
# possible to execute gyp_v8.py directly on Windows where the extension is |
||||
|
# useful. |
||||
|
|
||||
|
import os |
||||
|
|
||||
|
path = os.path.abspath(os.path.split(__file__)[0]) |
||||
|
execfile(os.path.join(path, 'gyp_v8')) |
@ -0,0 +1,7 @@ |
|||||
|
CODE_REVIEW_SERVER: https://codereview.chromium.org |
||||
|
CC_LIST: v8-dev@googlegroups.com |
||||
|
VIEW_VC: https://code.google.com/p/v8/source/detail?r= |
||||
|
STATUS: http://v8-status.appspot.com/status |
||||
|
TRY_ON_UPLOAD: False |
||||
|
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8 |
||||
|
TRYSERVER_ROOT: v8 |
@ -0,0 +1,86 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_V8_PLATFORM_H_ |
||||
|
#define V8_V8_PLATFORM_H_ |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
|
||||
|
/**
|
||||
|
* A Task represents a unit of work. |
||||
|
*/ |
||||
|
class Task { |
||||
|
public: |
||||
|
virtual ~Task() {} |
||||
|
|
||||
|
virtual void Run() = 0; |
||||
|
}; |
||||
|
|
||||
|
/**
|
||||
|
* V8 Platform abstraction layer. |
||||
|
* |
||||
|
* The embedder has to provide an implementation of this interface before |
||||
|
* initializing the rest of V8. |
||||
|
*/ |
||||
|
class Platform { |
||||
|
public: |
||||
|
/**
|
||||
|
* This enum is used to indicate whether a task is potentially long running, |
||||
|
* or causes a long wait. The embedder might want to use this hint to decide |
||||
|
* whether to execute the task on a dedicated thread. |
||||
|
*/ |
||||
|
enum ExpectedRuntime { |
||||
|
kShortRunningTask, |
||||
|
kLongRunningTask |
||||
|
}; |
||||
|
|
||||
|
/**
|
||||
|
* Schedules a task to be invoked on a background thread. |expected_runtime| |
||||
|
* indicates that the task will run a long time. The Platform implementation |
||||
|
* takes ownership of |task|. There is no guarantee about order of execution |
||||
|
* of tasks wrt order of scheduling, nor is there a guarantee about the |
||||
|
* thread the task will be run on. |
||||
|
*/ |
||||
|
virtual void CallOnBackgroundThread(Task* task, |
||||
|
ExpectedRuntime expected_runtime) = 0; |
||||
|
|
||||
|
/**
|
||||
|
* Schedules a task to be invoked on a foreground thread wrt a specific |
||||
|
* |isolate|. Tasks posted for the same isolate should be execute in order of |
||||
|
* scheduling. The definition of "foreground" is opaque to V8. |
||||
|
*/ |
||||
|
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0; |
||||
|
|
||||
|
protected: |
||||
|
virtual ~Platform() {} |
||||
|
}; |
||||
|
|
||||
|
} // namespace v8
|
||||
|
|
||||
|
#endif // V8_V8_PLATFORM_H_
|
File diff suppressed because it is too large
@ -0,0 +1 @@ |
|||||
|
rmcilroy@chromium.org |
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,469 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_CODE_STUBS_A64_H_ |
||||
|
#define V8_A64_CODE_STUBS_A64_H_ |
||||
|
|
||||
|
#include "ic-inl.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); |
||||
|
|
||||
|
|
||||
|
class StoreBufferOverflowStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) |
||||
|
: save_doubles_(save_fp) { } |
||||
|
|
||||
|
void Generate(MacroAssembler* masm); |
||||
|
|
||||
|
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); |
||||
|
virtual bool SometimesSetsUpAFrame() { return false; } |
||||
|
|
||||
|
private: |
||||
|
SaveFPRegsMode save_doubles_; |
||||
|
|
||||
|
Major MajorKey() { return StoreBufferOverflow; } |
||||
|
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class StringHelper : public AllStatic { |
||||
|
public: |
||||
|
// TODO(all): These don't seem to be used any more. Delete them.
|
||||
|
|
||||
|
// Generate string hash.
|
||||
|
static void GenerateHashInit(MacroAssembler* masm, |
||||
|
Register hash, |
||||
|
Register character); |
||||
|
|
||||
|
static void GenerateHashAddCharacter(MacroAssembler* masm, |
||||
|
Register hash, |
||||
|
Register character); |
||||
|
|
||||
|
static void GenerateHashGetHash(MacroAssembler* masm, |
||||
|
Register hash, |
||||
|
Register scratch); |
||||
|
|
||||
|
private: |
||||
|
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class RecordWriteStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
// Stub to record the write of 'value' at 'address' in 'object'.
|
||||
|
// Typically 'address' = 'object' + <some offset>.
|
||||
|
// See MacroAssembler::RecordWriteField() for example.
|
||||
|
RecordWriteStub(Register object, |
||||
|
Register value, |
||||
|
Register address, |
||||
|
RememberedSetAction remembered_set_action, |
||||
|
SaveFPRegsMode fp_mode) |
||||
|
: object_(object), |
||||
|
value_(value), |
||||
|
address_(address), |
||||
|
remembered_set_action_(remembered_set_action), |
||||
|
save_fp_regs_mode_(fp_mode), |
||||
|
regs_(object, // An input reg.
|
||||
|
address, // An input reg.
|
||||
|
value) { // One scratch reg.
|
||||
|
} |
||||
|
|
||||
|
enum Mode { |
||||
|
STORE_BUFFER_ONLY, |
||||
|
INCREMENTAL, |
||||
|
INCREMENTAL_COMPACTION |
||||
|
}; |
||||
|
|
||||
|
virtual bool SometimesSetsUpAFrame() { return false; } |
||||
|
|
||||
|
static Mode GetMode(Code* stub) { |
||||
|
// Find the mode depending on the first two instructions.
|
||||
|
Instruction* instr1 = |
||||
|
reinterpret_cast<Instruction*>(stub->instruction_start()); |
||||
|
Instruction* instr2 = instr1->following(); |
||||
|
|
||||
|
if (instr1->IsUncondBranchImm()) { |
||||
|
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code())); |
||||
|
return INCREMENTAL; |
||||
|
} |
||||
|
|
||||
|
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code())); |
||||
|
|
||||
|
if (instr2->IsUncondBranchImm()) { |
||||
|
return INCREMENTAL_COMPACTION; |
||||
|
} |
||||
|
|
||||
|
ASSERT(instr2->IsPCRelAddressing()); |
||||
|
|
||||
|
return STORE_BUFFER_ONLY; |
||||
|
} |
||||
|
|
||||
|
// We patch the two first instructions of the stub back and forth between an
|
||||
|
// adr and branch when we start and stop incremental heap marking.
|
||||
|
// The branch is
|
||||
|
// b label
|
||||
|
// The adr is
|
||||
|
// adr xzr label
|
||||
|
// so effectively a nop.
|
||||
|
static void Patch(Code* stub, Mode mode) { |
||||
|
// We are going to patch the two first instructions of the stub.
|
||||
|
PatchingAssembler patcher( |
||||
|
reinterpret_cast<Instruction*>(stub->instruction_start()), 2); |
||||
|
Instruction* instr1 = patcher.InstructionAt(0); |
||||
|
Instruction* instr2 = patcher.InstructionAt(kInstructionSize); |
||||
|
// Instructions must be either 'adr' or 'b'.
|
||||
|
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); |
||||
|
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); |
||||
|
// Retrieve the offsets to the labels.
|
||||
|
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset(); |
||||
|
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset(); |
||||
|
|
||||
|
switch (mode) { |
||||
|
case STORE_BUFFER_ONLY: |
||||
|
ASSERT(GetMode(stub) == INCREMENTAL || |
||||
|
GetMode(stub) == INCREMENTAL_COMPACTION); |
||||
|
patcher.adr(xzr, offset_to_incremental_noncompacting); |
||||
|
patcher.adr(xzr, offset_to_incremental_compacting); |
||||
|
break; |
||||
|
case INCREMENTAL: |
||||
|
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
||||
|
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2); |
||||
|
patcher.adr(xzr, offset_to_incremental_compacting); |
||||
|
break; |
||||
|
case INCREMENTAL_COMPACTION: |
||||
|
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
||||
|
patcher.adr(xzr, offset_to_incremental_noncompacting); |
||||
|
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2); |
||||
|
break; |
||||
|
} |
||||
|
ASSERT(GetMode(stub) == mode); |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
// This is a helper class to manage the registers associated with the stub.
|
||||
|
// The 'object' and 'address' registers must be preserved.
|
||||
|
class RegisterAllocation { |
||||
|
public: |
||||
|
RegisterAllocation(Register object, |
||||
|
Register address, |
||||
|
Register scratch) |
||||
|
: object_(object), |
||||
|
address_(address), |
||||
|
scratch0_(scratch), |
||||
|
saved_regs_(kCallerSaved) { |
||||
|
ASSERT(!AreAliased(scratch, object, address)); |
||||
|
|
||||
|
// We would like to require more scratch registers for this stub,
|
||||
|
// but the number of registers comes down to the ones used in
|
||||
|
// FullCodeGen::SetVar(), which is architecture independent.
|
||||
|
// We allocate 2 extra scratch registers that we'll save on the stack.
|
||||
|
CPURegList pool_available = GetValidRegistersForAllocation(); |
||||
|
CPURegList used_regs(object, address, scratch); |
||||
|
pool_available.Remove(used_regs); |
||||
|
scratch1_ = Register(pool_available.PopLowestIndex()); |
||||
|
scratch2_ = Register(pool_available.PopLowestIndex()); |
||||
|
|
||||
|
// SaveCallerRegisters method needs to save caller saved register, however
|
||||
|
// we don't bother saving ip0 and ip1 because they are used as scratch
|
||||
|
// registers by the MacroAssembler.
|
||||
|
saved_regs_.Remove(ip0); |
||||
|
saved_regs_.Remove(ip1); |
||||
|
|
||||
|
// The scratch registers will be restored by other means so we don't need
|
||||
|
// to save them with the other caller saved registers.
|
||||
|
saved_regs_.Remove(scratch0_); |
||||
|
saved_regs_.Remove(scratch1_); |
||||
|
saved_regs_.Remove(scratch2_); |
||||
|
} |
||||
|
|
||||
|
void Save(MacroAssembler* masm) { |
||||
|
// We don't have to save scratch0_ because it was given to us as
|
||||
|
// a scratch register.
|
||||
|
masm->Push(scratch1_, scratch2_); |
||||
|
} |
||||
|
|
||||
|
void Restore(MacroAssembler* masm) { |
||||
|
masm->Pop(scratch2_, scratch1_); |
||||
|
} |
||||
|
|
||||
|
// If we have to call into C then we need to save and restore all caller-
|
||||
|
// saved registers that were not already preserved.
|
||||
|
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { |
||||
|
// TODO(all): This can be very expensive, and it is likely that not every
|
||||
|
// register will need to be preserved. Can we improve this?
|
||||
|
masm->PushCPURegList(saved_regs_); |
||||
|
if (mode == kSaveFPRegs) { |
||||
|
masm->PushCPURegList(kCallerSavedFP); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { |
||||
|
// TODO(all): This can be very expensive, and it is likely that not every
|
||||
|
// register will need to be preserved. Can we improve this?
|
||||
|
if (mode == kSaveFPRegs) { |
||||
|
masm->PopCPURegList(kCallerSavedFP); |
||||
|
} |
||||
|
masm->PopCPURegList(saved_regs_); |
||||
|
} |
||||
|
|
||||
|
Register object() { return object_; } |
||||
|
Register address() { return address_; } |
||||
|
Register scratch0() { return scratch0_; } |
||||
|
Register scratch1() { return scratch1_; } |
||||
|
Register scratch2() { return scratch2_; } |
||||
|
|
||||
|
private: |
||||
|
Register object_; |
||||
|
Register address_; |
||||
|
Register scratch0_; |
||||
|
Register scratch1_; |
||||
|
Register scratch2_; |
||||
|
CPURegList saved_regs_; |
||||
|
|
||||
|
// TODO(all): We should consider moving this somewhere else.
|
||||
|
static CPURegList GetValidRegistersForAllocation() { |
||||
|
// The list of valid registers for allocation is defined as all the
|
||||
|
// registers without those with a special meaning.
|
||||
|
//
|
||||
|
// The default list excludes registers x26 to x31 because they are
|
||||
|
// reserved for the following purpose:
|
||||
|
// - x26 root register
|
||||
|
// - x27 context pointer register
|
||||
|
// - x28 jssp
|
||||
|
// - x29 frame pointer
|
||||
|
// - x30 link register(lr)
|
||||
|
// - x31 xzr/stack pointer
|
||||
|
CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25); |
||||
|
|
||||
|
// We also remove MacroAssembler's scratch registers.
|
||||
|
list.Remove(ip0); |
||||
|
list.Remove(ip1); |
||||
|
list.Remove(x8); |
||||
|
list.Remove(x9); |
||||
|
|
||||
|
return list; |
||||
|
} |
||||
|
|
||||
|
friend class RecordWriteStub; |
||||
|
}; |
||||
|
|
||||
|
// A list of stub variants which are pregenerated.
|
||||
|
// The variants are stored in the same format as the minor key, so
|
||||
|
// MinorKeyFor() can be used to populate and check this list.
|
||||
|
static const int kAheadOfTime[]; |
||||
|
|
||||
|
void Generate(MacroAssembler* masm); |
||||
|
void GenerateIncremental(MacroAssembler* masm, Mode mode); |
||||
|
|
||||
|
enum OnNoNeedToInformIncrementalMarker { |
||||
|
kReturnOnNoNeedToInformIncrementalMarker, |
||||
|
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker |
||||
|
}; |
||||
|
|
||||
|
void CheckNeedsToInformIncrementalMarker( |
||||
|
MacroAssembler* masm, |
||||
|
OnNoNeedToInformIncrementalMarker on_no_need, |
||||
|
Mode mode); |
||||
|
void InformIncrementalMarker(MacroAssembler* masm, Mode mode); |
||||
|
|
||||
|
Major MajorKey() { return RecordWrite; } |
||||
|
|
||||
|
int MinorKey() { |
||||
|
return MinorKeyFor(object_, value_, address_, remembered_set_action_, |
||||
|
save_fp_regs_mode_); |
||||
|
} |
||||
|
|
||||
|
static int MinorKeyFor(Register object, |
||||
|
Register value, |
||||
|
Register address, |
||||
|
RememberedSetAction action, |
||||
|
SaveFPRegsMode fp_mode) { |
||||
|
ASSERT(object.Is64Bits()); |
||||
|
ASSERT(value.Is64Bits()); |
||||
|
ASSERT(address.Is64Bits()); |
||||
|
return ObjectBits::encode(object.code()) | |
||||
|
ValueBits::encode(value.code()) | |
||||
|
AddressBits::encode(address.code()) | |
||||
|
RememberedSetActionBits::encode(action) | |
||||
|
SaveFPRegsModeBits::encode(fp_mode); |
||||
|
} |
||||
|
|
||||
|
void Activate(Code* code) { |
||||
|
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); |
||||
|
} |
||||
|
|
||||
|
class ObjectBits: public BitField<int, 0, 5> {}; |
||||
|
class ValueBits: public BitField<int, 5, 5> {}; |
||||
|
class AddressBits: public BitField<int, 10, 5> {}; |
||||
|
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; |
||||
|
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; |
||||
|
|
||||
|
Register object_; |
||||
|
Register value_; |
||||
|
Register address_; |
||||
|
RememberedSetAction remembered_set_action_; |
||||
|
SaveFPRegsMode save_fp_regs_mode_; |
||||
|
Label slow_; |
||||
|
RegisterAllocation regs_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Helper to call C++ functions from generated code. The caller must prepare
|
||||
|
// the exit frame before doing the call with GenerateCall.
|
||||
|
class DirectCEntryStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
DirectCEntryStub() {} |
||||
|
void Generate(MacroAssembler* masm); |
||||
|
void GenerateCall(MacroAssembler* masm, Register target); |
||||
|
|
||||
|
private: |
||||
|
Major MajorKey() { return DirectCEntry; } |
||||
|
int MinorKey() { return 0; } |
||||
|
|
||||
|
bool NeedsImmovableCode() { return true; } |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class NameDictionaryLookupStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; |
||||
|
|
||||
|
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } |
||||
|
|
||||
|
void Generate(MacroAssembler* masm); |
||||
|
|
||||
|
static void GenerateNegativeLookup(MacroAssembler* masm, |
||||
|
Label* miss, |
||||
|
Label* done, |
||||
|
Register receiver, |
||||
|
Register properties, |
||||
|
Handle<Name> name, |
||||
|
Register scratch0); |
||||
|
|
||||
|
static void GeneratePositiveLookup(MacroAssembler* masm, |
||||
|
Label* miss, |
||||
|
Label* done, |
||||
|
Register elements, |
||||
|
Register name, |
||||
|
Register scratch1, |
||||
|
Register scratch2); |
||||
|
|
||||
|
virtual bool SometimesSetsUpAFrame() { return false; } |
||||
|
|
||||
|
private: |
||||
|
static const int kInlinedProbes = 4; |
||||
|
static const int kTotalProbes = 20; |
||||
|
|
||||
|
static const int kCapacityOffset = |
||||
|
NameDictionary::kHeaderSize + |
||||
|
NameDictionary::kCapacityIndex * kPointerSize; |
||||
|
|
||||
|
static const int kElementsStartOffset = |
||||
|
NameDictionary::kHeaderSize + |
||||
|
NameDictionary::kElementsStartIndex * kPointerSize; |
||||
|
|
||||
|
Major MajorKey() { return NameDictionaryLookup; } |
||||
|
|
||||
|
int MinorKey() { |
||||
|
return LookupModeBits::encode(mode_); |
||||
|
} |
||||
|
|
||||
|
class LookupModeBits: public BitField<LookupMode, 0, 1> {}; |
||||
|
|
||||
|
LookupMode mode_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class SubStringStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
SubStringStub() {} |
||||
|
|
||||
|
private: |
||||
|
Major MajorKey() { return SubString; } |
||||
|
int MinorKey() { return 0; } |
||||
|
|
||||
|
void Generate(MacroAssembler* masm); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class StringCompareStub: public PlatformCodeStub { |
||||
|
public: |
||||
|
StringCompareStub() { } |
||||
|
|
||||
|
// Compares two flat ASCII strings and returns result in x0.
|
||||
|
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
||||
|
Register left, |
||||
|
Register right, |
||||
|
Register scratch1, |
||||
|
Register scratch2, |
||||
|
Register scratch3, |
||||
|
Register scratch4); |
||||
|
|
||||
|
// Compare two flat ASCII strings for equality and returns result
|
||||
|
// in x0.
|
||||
|
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
||||
|
Register left, |
||||
|
Register right, |
||||
|
Register scratch1, |
||||
|
Register scratch2, |
||||
|
Register scratch3); |
||||
|
|
||||
|
private: |
||||
|
virtual Major MajorKey() { return StringCompare; } |
||||
|
virtual int MinorKey() { return 0; } |
||||
|
virtual void Generate(MacroAssembler* masm); |
||||
|
|
||||
|
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, |
||||
|
Register left, |
||||
|
Register right, |
||||
|
Register length, |
||||
|
Register scratch1, |
||||
|
Register scratch2, |
||||
|
Label* chars_not_equal); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
struct PlatformCallInterfaceDescriptor { |
||||
|
explicit PlatformCallInterfaceDescriptor( |
||||
|
TargetAddressStorageMode storage_mode) |
||||
|
: storage_mode_(storage_mode) { } |
||||
|
|
||||
|
TargetAddressStorageMode storage_mode() { return storage_mode_; } |
||||
|
|
||||
|
private: |
||||
|
TargetAddressStorageMode storage_mode_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_CODE_STUBS_A64_H_
|
@ -0,0 +1,616 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#include "codegen.h" |
||||
|
#include "macro-assembler.h" |
||||
|
#include "simulator-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
#define __ ACCESS_MASM(masm) |
||||
|
|
||||
|
#if defined(USE_SIMULATOR) |
||||
|
byte* fast_exp_a64_machine_code = NULL; |
||||
|
double fast_exp_simulator(double x) { |
||||
|
Simulator * simulator = Simulator::current(Isolate::Current()); |
||||
|
Simulator::CallArgument args[] = { |
||||
|
Simulator::CallArgument(x), |
||||
|
Simulator::CallArgument::End() |
||||
|
}; |
||||
|
return simulator->CallDouble(fast_exp_a64_machine_code, args); |
||||
|
} |
||||
|
#endif |
||||
|
|
||||
|
|
||||
|
UnaryMathFunction CreateExpFunction() { |
||||
|
if (!FLAG_fast_math) return &std::exp; |
||||
|
|
||||
|
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
|
||||
|
// an AAPCS64-compliant exp() function. This will be faster than the C
|
||||
|
// library's exp() function, but probably less accurate.
|
||||
|
size_t actual_size; |
||||
|
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); |
||||
|
if (buffer == NULL) return &std::exp; |
||||
|
|
||||
|
ExternalReference::InitializeMathExpData(); |
||||
|
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
||||
|
masm.SetStackPointer(csp); |
||||
|
|
||||
|
// The argument will be in d0 on entry.
|
||||
|
DoubleRegister input = d0; |
||||
|
// Use other caller-saved registers for all other values.
|
||||
|
DoubleRegister result = d1; |
||||
|
DoubleRegister double_temp1 = d2; |
||||
|
DoubleRegister double_temp2 = d3; |
||||
|
Register temp1 = x10; |
||||
|
Register temp2 = x11; |
||||
|
Register temp3 = x12; |
||||
|
|
||||
|
MathExpGenerator::EmitMathExp(&masm, input, result, |
||||
|
double_temp1, double_temp2, |
||||
|
temp1, temp2, temp3); |
||||
|
// Move the result to the return register.
|
||||
|
masm.Fmov(d0, result); |
||||
|
masm.Ret(); |
||||
|
|
||||
|
CodeDesc desc; |
||||
|
masm.GetCode(&desc); |
||||
|
ASSERT(!RelocInfo::RequiresRelocation(desc)); |
||||
|
|
||||
|
CPU::FlushICache(buffer, actual_size); |
||||
|
OS::ProtectCode(buffer, actual_size); |
||||
|
|
||||
|
#if !defined(USE_SIMULATOR) |
||||
|
return FUNCTION_CAST<UnaryMathFunction>(buffer); |
||||
|
#else |
||||
|
fast_exp_a64_machine_code = buffer; |
||||
|
return &fast_exp_simulator; |
||||
|
#endif |
||||
|
} |
||||
|
|
||||
|
|
||||
|
UnaryMathFunction CreateSqrtFunction() { |
||||
|
return &std::sqrt; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// -------------------------------------------------------------------------
|
||||
|
// Platform-specific RuntimeCallHelper functions.
|
||||
|
|
||||
|
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
||||
|
masm->EnterFrame(StackFrame::INTERNAL); |
||||
|
ASSERT(!masm->has_frame()); |
||||
|
masm->set_has_frame(true); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
||||
|
masm->LeaveFrame(StackFrame::INTERNAL); |
||||
|
ASSERT(masm->has_frame()); |
||||
|
masm->set_has_frame(false); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// -------------------------------------------------------------------------
|
||||
|
// Code generators
|
||||
|
|
||||
|
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
||||
|
MacroAssembler* masm, AllocationSiteMode mode, |
||||
|
Label* allocation_memento_found) { |
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x2 : receiver
|
||||
|
// -- x3 : target map
|
||||
|
// -----------------------------------
|
||||
|
Register receiver = x2; |
||||
|
Register map = x3; |
||||
|
|
||||
|
if (mode == TRACK_ALLOCATION_SITE) { |
||||
|
ASSERT(allocation_memento_found != NULL); |
||||
|
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, |
||||
|
allocation_memento_found); |
||||
|
} |
||||
|
|
||||
|
// Set transitioned map.
|
||||
|
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
||||
|
__ RecordWriteField(receiver, |
||||
|
HeapObject::kMapOffset, |
||||
|
map, |
||||
|
x10, |
||||
|
kLRHasNotBeenSaved, |
||||
|
kDontSaveFPRegs, |
||||
|
EMIT_REMEMBERED_SET, |
||||
|
OMIT_SMI_CHECK); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void ElementsTransitionGenerator::GenerateSmiToDouble( |
||||
|
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { |
||||
|
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble"); |
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- lr : return address
|
||||
|
// -- x0 : value
|
||||
|
// -- x1 : key
|
||||
|
// -- x2 : receiver
|
||||
|
// -- x3 : target map, scratch for subsequent call
|
||||
|
// -----------------------------------
|
||||
|
Register receiver = x2; |
||||
|
Register target_map = x3; |
||||
|
|
||||
|
Label gc_required, only_change_map; |
||||
|
|
||||
|
if (mode == TRACK_ALLOCATION_SITE) { |
||||
|
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); |
||||
|
} |
||||
|
|
||||
|
// Check for empty arrays, which only require a map transition and no changes
|
||||
|
// to the backing store.
|
||||
|
Register elements = x4; |
||||
|
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
||||
|
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); |
||||
|
|
||||
|
__ Push(lr); |
||||
|
Register length = x5; |
||||
|
__ Ldrsw(length, UntagSmiFieldMemOperand(elements, |
||||
|
FixedArray::kLengthOffset)); |
||||
|
|
||||
|
// Allocate new FixedDoubleArray.
|
||||
|
Register array_size = x6; |
||||
|
Register array = x7; |
||||
|
__ Lsl(array_size, length, kDoubleSizeLog2); |
||||
|
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize); |
||||
|
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT); |
||||
|
// Register array is non-tagged heap object.
|
||||
|
|
||||
|
// Set the destination FixedDoubleArray's length and map.
|
||||
|
Register map_root = x6; |
||||
|
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex); |
||||
|
__ SmiTag(x11, length); |
||||
|
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset)); |
||||
|
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset)); |
||||
|
|
||||
|
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
||||
|
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6, |
||||
|
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
||||
|
OMIT_SMI_CHECK); |
||||
|
|
||||
|
// Replace receiver's backing store with newly created FixedDoubleArray.
|
||||
|
__ Add(x10, array, kHeapObjectTag); |
||||
|
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
||||
|
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10, |
||||
|
x6, kLRHasBeenSaved, kDontSaveFPRegs, |
||||
|
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
||||
|
|
||||
|
// Prepare for conversion loop.
|
||||
|
Register src_elements = x10; |
||||
|
Register dst_elements = x11; |
||||
|
Register dst_end = x12; |
||||
|
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
||||
|
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize); |
||||
|
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2)); |
||||
|
|
||||
|
FPRegister nan_d = d1; |
||||
|
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64)); |
||||
|
|
||||
|
Label entry, done; |
||||
|
__ B(&entry); |
||||
|
|
||||
|
__ Bind(&only_change_map); |
||||
|
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
||||
|
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6, |
||||
|
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
||||
|
OMIT_SMI_CHECK); |
||||
|
__ B(&done); |
||||
|
|
||||
|
// Call into runtime if GC is required.
|
||||
|
__ Bind(&gc_required); |
||||
|
__ Pop(lr); |
||||
|
__ B(fail); |
||||
|
|
||||
|
// Iterate over the array, copying and coverting smis to doubles. If an
|
||||
|
// element is non-smi, write a hole to the destination.
|
||||
|
{ |
||||
|
Label loop; |
||||
|
__ Bind(&loop); |
||||
|
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex)); |
||||
|
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag); |
||||
|
__ Tst(x13, kSmiTagMask); |
||||
|
__ Fcsel(d0, d0, nan_d, eq); |
||||
|
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex)); |
||||
|
|
||||
|
__ Bind(&entry); |
||||
|
__ Cmp(dst_elements, dst_end); |
||||
|
__ B(lt, &loop); |
||||
|
} |
||||
|
|
||||
|
__ Pop(lr); |
||||
|
__ Bind(&done); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void ElementsTransitionGenerator::GenerateDoubleToObject( |
||||
|
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { |
||||
|
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject"); |
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x0 : value
|
||||
|
// -- x1 : key
|
||||
|
// -- x2 : receiver
|
||||
|
// -- lr : return address
|
||||
|
// -- x3 : target map, scratch for subsequent call
|
||||
|
// -- x4 : scratch (elements)
|
||||
|
// -----------------------------------
|
||||
|
Register value = x0; |
||||
|
Register key = x1; |
||||
|
Register receiver = x2; |
||||
|
Register target_map = x3; |
||||
|
|
||||
|
if (mode == TRACK_ALLOCATION_SITE) { |
||||
|
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); |
||||
|
} |
||||
|
|
||||
|
// Check for empty arrays, which only require a map transition and no changes
|
||||
|
// to the backing store.
|
||||
|
Label only_change_map; |
||||
|
Register elements = x4; |
||||
|
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
||||
|
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); |
||||
|
|
||||
|
__ Push(lr); |
||||
|
// TODO(all): These registers may not need to be pushed. Examine
|
||||
|
// RecordWriteStub and check whether it's needed.
|
||||
|
__ Push(target_map, receiver, key, value); |
||||
|
Register length = x5; |
||||
|
__ Ldrsw(length, UntagSmiFieldMemOperand(elements, |
||||
|
FixedArray::kLengthOffset)); |
||||
|
|
||||
|
// Allocate new FixedArray.
|
||||
|
Register array_size = x6; |
||||
|
Register array = x7; |
||||
|
Label gc_required; |
||||
|
__ Mov(array_size, FixedDoubleArray::kHeaderSize); |
||||
|
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2)); |
||||
|
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS); |
||||
|
|
||||
|
// Set destination FixedDoubleArray's length and map.
|
||||
|
Register map_root = x6; |
||||
|
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex); |
||||
|
__ SmiTag(x11, length); |
||||
|
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset)); |
||||
|
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset)); |
||||
|
|
||||
|
// Prepare for conversion loop.
|
||||
|
Register src_elements = x10; |
||||
|
Register dst_elements = x11; |
||||
|
Register dst_end = x12; |
||||
|
__ Add(src_elements, elements, |
||||
|
FixedDoubleArray::kHeaderSize - kHeapObjectTag); |
||||
|
__ Add(dst_elements, array, FixedArray::kHeaderSize); |
||||
|
__ Add(array, array, kHeapObjectTag); |
||||
|
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2)); |
||||
|
|
||||
|
Register the_hole = x14; |
||||
|
Register heap_num_map = x15; |
||||
|
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
||||
|
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex); |
||||
|
|
||||
|
Label entry; |
||||
|
__ B(&entry); |
||||
|
|
||||
|
// Call into runtime if GC is required.
|
||||
|
__ Bind(&gc_required); |
||||
|
__ Pop(value, key, receiver, target_map); |
||||
|
__ Pop(lr); |
||||
|
__ B(fail); |
||||
|
|
||||
|
{ |
||||
|
Label loop, convert_hole; |
||||
|
__ Bind(&loop); |
||||
|
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex)); |
||||
|
__ Cmp(x13, kHoleNanInt64); |
||||
|
__ B(eq, &convert_hole); |
||||
|
|
||||
|
// Non-hole double, copy value into a heap number.
|
||||
|
Register heap_num = x5; |
||||
|
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map); |
||||
|
__ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset)); |
||||
|
__ Mov(x13, dst_elements); |
||||
|
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex)); |
||||
|
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs, |
||||
|
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
||||
|
|
||||
|
__ B(&entry); |
||||
|
|
||||
|
// Replace the-hole NaN with the-hole pointer.
|
||||
|
__ Bind(&convert_hole); |
||||
|
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex)); |
||||
|
|
||||
|
__ Bind(&entry); |
||||
|
__ Cmp(dst_elements, dst_end); |
||||
|
__ B(lt, &loop); |
||||
|
} |
||||
|
|
||||
|
__ Pop(value, key, receiver, target_map); |
||||
|
// Replace receiver's backing store with newly created and filled FixedArray.
|
||||
|
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
||||
|
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13, |
||||
|
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
||||
|
OMIT_SMI_CHECK); |
||||
|
__ Pop(lr); |
||||
|
|
||||
|
__ Bind(&only_change_map); |
||||
|
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
||||
|
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13, |
||||
|
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
||||
|
OMIT_SMI_CHECK); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Code::IsYoungSequence(byte* sequence) { |
||||
|
return MacroAssembler::IsYoungSequence(sequence); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Code::GetCodeAgeAndParity(byte* sequence, Age* age, |
||||
|
MarkingParity* parity) { |
||||
|
if (IsYoungSequence(sequence)) { |
||||
|
*age = kNoAgeCodeAge; |
||||
|
*parity = NO_MARKING_PARITY; |
||||
|
} else { |
||||
|
byte* target = sequence + kCodeAgeStubEntryOffset; |
||||
|
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target)); |
||||
|
GetCodeAgeAndParity(stub, age, parity); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Code::PatchPlatformCodeAge(Isolate* isolate, |
||||
|
byte* sequence, |
||||
|
Code::Age age, |
||||
|
MarkingParity parity) { |
||||
|
PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize); |
||||
|
if (age == kNoAgeCodeAge) { |
||||
|
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); |
||||
|
} else { |
||||
|
Code * stub = GetCodeAgeStub(isolate, age, parity); |
||||
|
MacroAssembler::EmitCodeAgeSequence(&patcher, stub); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void StringCharLoadGenerator::Generate(MacroAssembler* masm, |
||||
|
Register string, |
||||
|
Register index, |
||||
|
Register result, |
||||
|
Label* call_runtime) { |
||||
|
// Fetch the instance type of the receiver into result register.
|
||||
|
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
||||
|
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
||||
|
|
||||
|
// We need special handling for indirect strings.
|
||||
|
Label check_sequential; |
||||
|
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential); |
||||
|
|
||||
|
// Dispatch on the indirect string shape: slice or cons.
|
||||
|
Label cons_string; |
||||
|
__ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string); |
||||
|
|
||||
|
// Handle slices.
|
||||
|
Label indirect_string_loaded; |
||||
|
__ Ldrsw(result, |
||||
|
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset)); |
||||
|
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset)); |
||||
|
__ Add(index, index, result); |
||||
|
__ B(&indirect_string_loaded); |
||||
|
|
||||
|
// Handle cons strings.
|
||||
|
// Check whether the right hand side is the empty string (i.e. if
|
||||
|
// this is really a flat string in a cons string). If that is not
|
||||
|
// the case we would rather go to the runtime system now to flatten
|
||||
|
// the string.
|
||||
|
__ Bind(&cons_string); |
||||
|
__ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset)); |
||||
|
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime); |
||||
|
// Get the first of the two strings and load its instance type.
|
||||
|
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset)); |
||||
|
|
||||
|
__ Bind(&indirect_string_loaded); |
||||
|
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
||||
|
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
||||
|
|
||||
|
// Distinguish sequential and external strings. Only these two string
|
||||
|
// representations can reach here (slices and flat cons strings have been
|
||||
|
// reduced to the underlying sequential or external string).
|
||||
|
Label external_string, check_encoding; |
||||
|
__ Bind(&check_sequential); |
||||
|
STATIC_ASSERT(kSeqStringTag == 0); |
||||
|
__ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string); |
||||
|
|
||||
|
// Prepare sequential strings
|
||||
|
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
||||
|
__ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
||||
|
__ B(&check_encoding); |
||||
|
|
||||
|
// Handle external strings.
|
||||
|
__ Bind(&external_string); |
||||
|
if (FLAG_debug_code) { |
||||
|
// Assert that we do not have a cons or slice (indirect strings) here.
|
||||
|
// Sequential strings have already been ruled out.
|
||||
|
__ Tst(result, kIsIndirectStringMask); |
||||
|
__ Assert(eq, kExternalStringExpectedButNotFound); |
||||
|
} |
||||
|
// Rule out short external strings.
|
||||
|
STATIC_CHECK(kShortExternalStringTag != 0); |
||||
|
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
|
||||
|
// can be bound far away in deferred code.
|
||||
|
__ Tst(result, kShortExternalStringMask); |
||||
|
__ B(ne, call_runtime); |
||||
|
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); |
||||
|
|
||||
|
Label ascii, done; |
||||
|
__ Bind(&check_encoding); |
||||
|
STATIC_ASSERT(kTwoByteStringTag == 0); |
||||
|
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii); |
||||
|
// Two-byte string.
|
||||
|
__ Ldrh(result, MemOperand(string, index, LSL, 1)); |
||||
|
__ B(&done); |
||||
|
__ Bind(&ascii); |
||||
|
// Ascii string.
|
||||
|
__ Ldrb(result, MemOperand(string, index)); |
||||
|
__ Bind(&done); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static MemOperand ExpConstant(Register base, int index) { |
||||
|
return MemOperand(base, index * kDoubleSize); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void MathExpGenerator::EmitMathExp(MacroAssembler* masm, |
||||
|
DoubleRegister input, |
||||
|
DoubleRegister result, |
||||
|
DoubleRegister double_temp1, |
||||
|
DoubleRegister double_temp2, |
||||
|
Register temp1, |
||||
|
Register temp2, |
||||
|
Register temp3) { |
||||
|
// TODO(jbramley): There are several instances where fnmsub could be used
|
||||
|
// instead of fmul and fsub. Doing this changes the result, but since this is
|
||||
|
// an estimation anyway, does it matter?
|
||||
|
|
||||
|
ASSERT(!AreAliased(input, result, |
||||
|
double_temp1, double_temp2, |
||||
|
temp1, temp2, temp3)); |
||||
|
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); |
||||
|
|
||||
|
Label done; |
||||
|
DoubleRegister double_temp3 = result; |
||||
|
Register constants = temp3; |
||||
|
|
||||
|
// The algorithm used relies on some magic constants which are initialized in
|
||||
|
// ExternalReference::InitializeMathExpData().
|
||||
|
|
||||
|
// Load the address of the start of the array.
|
||||
|
__ Mov(constants, Operand(ExternalReference::math_exp_constants(0))); |
||||
|
|
||||
|
// We have to do a four-way split here:
|
||||
|
// - If input <= about -708.4, the output always rounds to zero.
|
||||
|
// - If input >= about 709.8, the output always rounds to +infinity.
|
||||
|
// - If the input is NaN, the output is NaN.
|
||||
|
// - Otherwise, the result needs to be calculated.
|
||||
|
Label result_is_finite_non_zero; |
||||
|
// Assert that we can load offset 0 (the small input threshold) and offset 1
|
||||
|
// (the large input threshold) with a single ldp.
|
||||
|
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() - |
||||
|
ExpConstant(constants, 0).offset())); |
||||
|
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0)); |
||||
|
|
||||
|
__ Fcmp(input, double_temp1); |
||||
|
__ Fccmp(input, double_temp2, NoFlag, hi); |
||||
|
// At this point, the condition flags can be in one of five states:
|
||||
|
// NZCV
|
||||
|
// 1000 -708.4 < input < 709.8 result = exp(input)
|
||||
|
// 0110 input == 709.8 result = +infinity
|
||||
|
// 0010 input > 709.8 result = +infinity
|
||||
|
// 0011 input is NaN result = input
|
||||
|
// 0000 input <= -708.4 result = +0.0
|
||||
|
|
||||
|
// Continue the common case first. 'mi' tests N == 1.
|
||||
|
__ B(&result_is_finite_non_zero, mi); |
||||
|
|
||||
|
// TODO(jbramley): Add (and use) a zero D register for A64.
|
||||
|
// TODO(jbramley): Consider adding a +infinity register for A64.
|
||||
|
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
|
||||
|
__ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
|
||||
|
|
||||
|
// Select between +0.0 and +infinity. 'lo' tests C == 0.
|
||||
|
__ Fcsel(result, double_temp1, double_temp2, lo); |
||||
|
// Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
|
||||
|
__ Fcsel(result, result, input, vc); |
||||
|
__ B(&done); |
||||
|
|
||||
|
// The rest is magic, as described in InitializeMathExpData().
|
||||
|
__ Bind(&result_is_finite_non_zero); |
||||
|
|
||||
|
// Assert that we can load offset 3 and offset 4 with a single ldp.
|
||||
|
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() - |
||||
|
ExpConstant(constants, 3).offset())); |
||||
|
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3)); |
||||
|
__ Fmadd(double_temp1, double_temp1, input, double_temp3); |
||||
|
__ Fmov(temp2.W(), double_temp1.S()); |
||||
|
__ Fsub(double_temp1, double_temp1, double_temp3); |
||||
|
|
||||
|
// Assert that we can load offset 5 and offset 6 with a single ldp.
|
||||
|
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() - |
||||
|
ExpConstant(constants, 5).offset())); |
||||
|
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5)); |
||||
|
// TODO(jbramley): Consider using Fnmsub here.
|
||||
|
__ Fmul(double_temp1, double_temp1, double_temp2); |
||||
|
__ Fsub(double_temp1, double_temp1, input); |
||||
|
|
||||
|
__ Fmul(double_temp2, double_temp1, double_temp1); |
||||
|
__ Fsub(double_temp3, double_temp3, double_temp1); |
||||
|
__ Fmul(double_temp3, double_temp3, double_temp2); |
||||
|
|
||||
|
__ Mov(temp1.W(), Operand(temp2.W(), LSR, 11)); |
||||
|
|
||||
|
__ Ldr(double_temp2, ExpConstant(constants, 7)); |
||||
|
// TODO(jbramley): Consider using Fnmsub here.
|
||||
|
__ Fmul(double_temp3, double_temp3, double_temp2); |
||||
|
__ Fsub(double_temp3, double_temp3, double_temp1); |
||||
|
|
||||
|
// The 8th constant is 1.0, so use an immediate move rather than a load.
|
||||
|
// We can't generate a runtime assertion here as we would need to call Abort
|
||||
|
// in the runtime and we don't have an Isolate when we generate this code.
|
||||
|
__ Fmov(double_temp2, 1.0); |
||||
|
__ Fadd(double_temp3, double_temp3, double_temp2); |
||||
|
|
||||
|
__ And(temp2, temp2, 0x7ff); |
||||
|
__ Add(temp1, temp1, 0x3ff); |
||||
|
|
||||
|
// Do the final table lookup.
|
||||
|
__ Mov(temp3, Operand(ExternalReference::math_exp_log_table())); |
||||
|
|
||||
|
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2)); |
||||
|
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3)); |
||||
|
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20)); |
||||
|
__ Bfi(temp2, temp1, 32, 32); |
||||
|
__ Fmov(double_temp1, temp2); |
||||
|
|
||||
|
__ Fmul(result, double_temp3, double_temp1); |
||||
|
|
||||
|
__ Bind(&done); |
||||
|
} |
||||
|
|
||||
|
#undef __ |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
File diff suppressed because it is too large
@ -0,0 +1,199 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// CPU specific code for arm independent of OS goes here.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#include "a64/cpu-a64.h" |
||||
|
#include "a64/utils-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
bool CpuFeatures::initialized_ = false; |
||||
|
#endif |
||||
|
unsigned CpuFeatures::supported_ = 0; |
||||
|
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; |
||||
|
unsigned CpuFeatures::cross_compile_ = 0; |
||||
|
|
||||
|
// Initialise to smallest possible cache size.
|
||||
|
unsigned CpuFeatures::dcache_line_size_ = 1; |
||||
|
unsigned CpuFeatures::icache_line_size_ = 1; |
||||
|
|
||||
|
|
||||
|
void CPU::SetUp() { |
||||
|
CpuFeatures::Probe(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool CPU::SupportsCrankshaft() { |
||||
|
return true; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void CPU::FlushICache(void* address, size_t length) { |
||||
|
if (length == 0) { |
||||
|
return; |
||||
|
} |
||||
|
|
||||
|
#ifdef USE_SIMULATOR |
||||
|
// TODO(all): consider doing some cache simulation to ensure every address
|
||||
|
// run has been synced.
|
||||
|
USE(address); |
||||
|
USE(length); |
||||
|
#else |
||||
|
// The code below assumes user space cache operations are allowed. The goal
|
||||
|
// of this routine is to make sure the code generated is visible to the I
|
||||
|
// side of the CPU.
|
||||
|
|
||||
|
uintptr_t start = reinterpret_cast<uintptr_t>(address); |
||||
|
// Sizes will be used to generate a mask big enough to cover a pointer.
|
||||
|
uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size()); |
||||
|
uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size()); |
||||
|
// Cache line sizes are always a power of 2.
|
||||
|
ASSERT(CountSetBits(dsize, 64) == 1); |
||||
|
ASSERT(CountSetBits(isize, 64) == 1); |
||||
|
uintptr_t dstart = start & ~(dsize - 1); |
||||
|
uintptr_t istart = start & ~(isize - 1); |
||||
|
uintptr_t end = start + length; |
||||
|
|
||||
|
__asm__ __volatile__ ( // NOLINT
|
||||
|
// Clean every line of the D cache containing the target data.
|
||||
|
"0: \n\t" |
||||
|
// dc : Data Cache maintenance
|
||||
|
// c : Clean
|
||||
|
// va : by (Virtual) Address
|
||||
|
// u : to the point of Unification
|
||||
|
// The point of unification for a processor is the point by which the
|
||||
|
// instruction and data caches are guaranteed to see the same copy of a
|
||||
|
// memory location. See ARM DDI 0406B page B2-12 for more information.
|
||||
|
"dc cvau, %[dline] \n\t" |
||||
|
"add %[dline], %[dline], %[dsize] \n\t" |
||||
|
"cmp %[dline], %[end] \n\t" |
||||
|
"b.lt 0b \n\t" |
||||
|
// Barrier to make sure the effect of the code above is visible to the rest
|
||||
|
// of the world.
|
||||
|
// dsb : Data Synchronisation Barrier
|
||||
|
// ish : Inner SHareable domain
|
||||
|
// The point of unification for an Inner Shareable shareability domain is
|
||||
|
// the point by which the instruction and data caches of all the processors
|
||||
|
// in that Inner Shareable shareability domain are guaranteed to see the
|
||||
|
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
|
||||
|
// information.
|
||||
|
"dsb ish \n\t" |
||||
|
// Invalidate every line of the I cache containing the target data.
|
||||
|
"1: \n\t" |
||||
|
// ic : instruction cache maintenance
|
||||
|
// i : invalidate
|
||||
|
// va : by address
|
||||
|
// u : to the point of unification
|
||||
|
"ic ivau, %[iline] \n\t" |
||||
|
"add %[iline], %[iline], %[isize] \n\t" |
||||
|
"cmp %[iline], %[end] \n\t" |
||||
|
"b.lt 1b \n\t" |
||||
|
// Barrier to make sure the effect of the code above is visible to the rest
|
||||
|
// of the world.
|
||||
|
"dsb ish \n\t" |
||||
|
// Barrier to ensure any prefetching which happened before this code is
|
||||
|
// discarded.
|
||||
|
// isb : Instruction Synchronisation Barrier
|
||||
|
"isb \n\t" |
||||
|
: [dline] "+r" (dstart), |
||||
|
[iline] "+r" (istart) |
||||
|
: [dsize] "r" (dsize), |
||||
|
[isize] "r" (isize), |
||||
|
[end] "r" (end) |
||||
|
// This code does not write to memory but without the dependency gcc might
|
||||
|
// move this code before the code is generated.
|
||||
|
: "cc", "memory" |
||||
|
); // NOLINT
|
||||
|
#endif |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void CpuFeatures::Probe() { |
||||
|
// Compute I and D cache line size. The cache type register holds
|
||||
|
// information about the caches.
|
||||
|
uint32_t cache_type_register = GetCacheType(); |
||||
|
|
||||
|
static const int kDCacheLineSizeShift = 16; |
||||
|
static const int kICacheLineSizeShift = 0; |
||||
|
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift; |
||||
|
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift; |
||||
|
|
||||
|
// The cache type register holds the size of the I and D caches as a power of
|
||||
|
// two.
|
||||
|
uint32_t dcache_line_size_power_of_two = |
||||
|
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift; |
||||
|
uint32_t icache_line_size_power_of_two = |
||||
|
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift; |
||||
|
|
||||
|
dcache_line_size_ = 1 << dcache_line_size_power_of_two; |
||||
|
icache_line_size_ = 1 << icache_line_size_power_of_two; |
||||
|
|
||||
|
// AArch64 has no configuration options, no further probing is required.
|
||||
|
supported_ = 0; |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
initialized_ = true; |
||||
|
#endif |
||||
|
} |
||||
|
|
||||
|
|
||||
|
unsigned CpuFeatures::dcache_line_size() { |
||||
|
ASSERT(initialized_); |
||||
|
return dcache_line_size_; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
unsigned CpuFeatures::icache_line_size() { |
||||
|
ASSERT(initialized_); |
||||
|
return icache_line_size_; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
uint32_t CpuFeatures::GetCacheType() { |
||||
|
#ifdef USE_SIMULATOR |
||||
|
// This will lead to a cache with 1 byte long lines, which is fine since the
|
||||
|
// simulator will not need this information.
|
||||
|
return 0; |
||||
|
#else |
||||
|
uint32_t cache_type_register; |
||||
|
// Copy the content of the cache type register to a core register.
|
||||
|
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
|
||||
|
: [ctr] "=r" (cache_type_register)); |
||||
|
return cache_type_register; |
||||
|
#endif |
||||
|
} |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,107 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_CPU_A64_H_ |
||||
|
#define V8_A64_CPU_A64_H_ |
||||
|
|
||||
|
#include <stdio.h> |
||||
|
#include "serialize.h" |
||||
|
#include "cpu.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
|
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
|
class CpuFeatures : public AllStatic { |
||||
|
public: |
||||
|
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
|
// is enabled (snapshots must be portable).
|
||||
|
static void Probe(); |
||||
|
|
||||
|
// Check whether a feature is supported by the target CPU.
|
||||
|
static bool IsSupported(CpuFeature f) { |
||||
|
ASSERT(initialized_); |
||||
|
// There are no optional features for A64.
|
||||
|
return false; |
||||
|
}; |
||||
|
|
||||
|
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { |
||||
|
ASSERT(initialized_); |
||||
|
// There are no optional features for A64.
|
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
static bool IsSafeForSnapshot(CpuFeature f) { |
||||
|
return (IsSupported(f) && |
||||
|
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); |
||||
|
} |
||||
|
|
||||
|
// I and D cache line size in bytes.
|
||||
|
static unsigned dcache_line_size(); |
||||
|
static unsigned icache_line_size(); |
||||
|
|
||||
|
static unsigned supported_; |
||||
|
|
||||
|
static bool VerifyCrossCompiling() { |
||||
|
// There are no optional features for A64.
|
||||
|
ASSERT(cross_compile_ == 0); |
||||
|
return true; |
||||
|
} |
||||
|
|
||||
|
static bool VerifyCrossCompiling(CpuFeature f) { |
||||
|
// There are no optional features for A64.
|
||||
|
USE(f); |
||||
|
ASSERT(cross_compile_ == 0); |
||||
|
return true; |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
// Return the content of the cache type register.
|
||||
|
static uint32_t GetCacheType(); |
||||
|
|
||||
|
// I and D cache line size in bytes.
|
||||
|
static unsigned icache_line_size_; |
||||
|
static unsigned dcache_line_size_; |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
static bool initialized_; |
||||
|
#endif |
||||
|
|
||||
|
// This isn't used (and is always 0), but it is required by V8.
|
||||
|
static unsigned found_by_runtime_probing_only_; |
||||
|
|
||||
|
static unsigned cross_compile_; |
||||
|
|
||||
|
friend class PlatformFeatureScope; |
||||
|
DISALLOW_COPY_AND_ASSIGN(CpuFeatures); |
||||
|
}; |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_CPU_A64_H_
|
@ -0,0 +1,394 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#include "codegen.h" |
||||
|
#include "debug.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
#define __ ACCESS_MASM(masm) |
||||
|
|
||||
|
|
||||
|
#ifdef ENABLE_DEBUGGER_SUPPORT |
||||
|
bool BreakLocationIterator::IsDebugBreakAtReturn() { |
||||
|
return Debug::IsDebugBreakAtReturn(rinfo()); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void BreakLocationIterator::SetDebugBreakAtReturn() { |
||||
|
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
|
||||
|
// the return from JS function sequence from
|
||||
|
// mov sp, fp
|
||||
|
// ldp fp, lr, [sp] #16
|
||||
|
// lrd ip0, [pc, #(3 * kInstructionSize)]
|
||||
|
// add sp, sp, ip0
|
||||
|
// ret
|
||||
|
// <number of paramters ...
|
||||
|
// ... plus one (64 bits)>
|
||||
|
// to a call to the debug break return code.
|
||||
|
// ldr ip0, [pc, #(3 * kInstructionSize)]
|
||||
|
// blr ip0
|
||||
|
// hlt kHltBadCode @ code should not return, catch if it does.
|
||||
|
// <debug break return code ...
|
||||
|
// ... entry point address (64 bits)>
|
||||
|
|
||||
|
// The patching code must not overflow the space occupied by the return
|
||||
|
// sequence.
|
||||
|
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5); |
||||
|
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5); |
||||
|
byte* entry = |
||||
|
debug_info_->GetIsolate()->debug()->debug_break_return()->entry(); |
||||
|
|
||||
|
// The first instruction of a patched return sequence must be a load literal
|
||||
|
// loading the address of the debug break return code.
|
||||
|
patcher.LoadLiteral(ip0, 3 * kInstructionSize); |
||||
|
// TODO(all): check the following is correct.
|
||||
|
// The debug break return code will push a frame and call statically compiled
|
||||
|
// code. By using blr, even though control will not return after the branch,
|
||||
|
// this call site will be registered in the frame (lr being saved as the pc
|
||||
|
// of the next instruction to execute for this frame). The debugger can now
|
||||
|
// iterate on the frames to find call to debug break return code.
|
||||
|
patcher.blr(ip0); |
||||
|
patcher.hlt(kHltBadCode); |
||||
|
patcher.dc64(reinterpret_cast<int64_t>(entry)); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void BreakLocationIterator::ClearDebugBreakAtReturn() { |
||||
|
// Reset the code emitted by EmitReturnSequence to its original state.
|
||||
|
rinfo()->PatchCode(original_rinfo()->pc(), |
||||
|
Assembler::kJSRetSequenceInstructions); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { |
||||
|
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); |
||||
|
return rinfo->IsPatchedReturnSequence(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool BreakLocationIterator::IsDebugBreakAtSlot() { |
||||
|
ASSERT(IsDebugBreakSlot()); |
||||
|
// Check whether the debug break slot instructions have been patched.
|
||||
|
return rinfo()->IsPatchedDebugBreakSlotSequence(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void BreakLocationIterator::SetDebugBreakAtSlot() { |
||||
|
// Patch the code emitted by Debug::GenerateSlots, changing the debug break
|
||||
|
// slot code from
|
||||
|
// mov x0, x0 @ nop DEBUG_BREAK_NOP
|
||||
|
// mov x0, x0 @ nop DEBUG_BREAK_NOP
|
||||
|
// mov x0, x0 @ nop DEBUG_BREAK_NOP
|
||||
|
// mov x0, x0 @ nop DEBUG_BREAK_NOP
|
||||
|
// to a call to the debug slot code.
|
||||
|
// ldr ip0, [pc, #(2 * kInstructionSize)]
|
||||
|
// blr ip0
|
||||
|
// <debug break slot code ...
|
||||
|
// ... entry point address (64 bits)>
|
||||
|
|
||||
|
// TODO(all): consider adding a hlt instruction after the blr as we don't
|
||||
|
// expect control to return here. This implies increasing
|
||||
|
// kDebugBreakSlotInstructions to 5 instructions.
|
||||
|
|
||||
|
// The patching code must not overflow the space occupied by the return
|
||||
|
// sequence.
|
||||
|
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4); |
||||
|
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4); |
||||
|
byte* entry = |
||||
|
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(); |
||||
|
|
||||
|
// The first instruction of a patched debug break slot must be a load literal
|
||||
|
// loading the address of the debug break slot code.
|
||||
|
patcher.LoadLiteral(ip0, 2 * kInstructionSize); |
||||
|
// TODO(all): check the following is correct.
|
||||
|
// The debug break slot code will push a frame and call statically compiled
|
||||
|
// code. By using blr, event hough control will not return after the branch,
|
||||
|
// this call site will be registered in the frame (lr being saved as the pc
|
||||
|
// of the next instruction to execute for this frame). The debugger can now
|
||||
|
// iterate on the frames to find call to debug break slot code.
|
||||
|
patcher.blr(ip0); |
||||
|
patcher.dc64(reinterpret_cast<int64_t>(entry)); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void BreakLocationIterator::ClearDebugBreakAtSlot() { |
||||
|
ASSERT(IsDebugBreakSlot()); |
||||
|
rinfo()->PatchCode(original_rinfo()->pc(), |
||||
|
Assembler::kDebugBreakSlotInstructions); |
||||
|
} |
||||
|
|
||||
|
const bool Debug::FramePaddingLayout::kIsSupported = false; |
||||
|
|
||||
|
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, |
||||
|
RegList object_regs, |
||||
|
RegList non_object_regs, |
||||
|
Register scratch) { |
||||
|
{ |
||||
|
FrameScope scope(masm, StackFrame::INTERNAL); |
||||
|
|
||||
|
// Any live values (object_regs and non_object_regs) in caller-saved
|
||||
|
// registers (or lr) need to be stored on the stack so that their values are
|
||||
|
// safely preserved for a call into C code.
|
||||
|
//
|
||||
|
// Also:
|
||||
|
// * object_regs may be modified during the C code by the garbage
|
||||
|
// collector. Every object register must be a valid tagged pointer or
|
||||
|
// SMI.
|
||||
|
//
|
||||
|
// * non_object_regs will be converted to SMIs so that the garbage
|
||||
|
// collector doesn't try to interpret them as pointers.
|
||||
|
//
|
||||
|
// TODO(jbramley): Why can't this handle callee-saved registers?
|
||||
|
ASSERT((~kCallerSaved.list() & object_regs) == 0); |
||||
|
ASSERT((~kCallerSaved.list() & non_object_regs) == 0); |
||||
|
ASSERT((object_regs & non_object_regs) == 0); |
||||
|
ASSERT((scratch.Bit() & object_regs) == 0); |
||||
|
ASSERT((scratch.Bit() & non_object_regs) == 0); |
||||
|
ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0); |
||||
|
ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0); |
||||
|
STATIC_ASSERT(kSmiValueSize == 32); |
||||
|
|
||||
|
CPURegList non_object_list = |
||||
|
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs); |
||||
|
while (!non_object_list.IsEmpty()) { |
||||
|
// Store each non-object register as two SMIs.
|
||||
|
Register reg = Register(non_object_list.PopLowestIndex()); |
||||
|
__ Push(reg); |
||||
|
__ Poke(wzr, 0); |
||||
|
__ Push(reg.W(), wzr); |
||||
|
// Stack:
|
||||
|
// jssp[12]: reg[63:32]
|
||||
|
// jssp[8]: 0x00000000 (SMI tag & padding)
|
||||
|
// jssp[4]: reg[31:0]
|
||||
|
// jssp[0]: 0x00000000 (SMI tag & padding)
|
||||
|
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32)); |
||||
|
} |
||||
|
|
||||
|
if (object_regs != 0) { |
||||
|
__ PushXRegList(object_regs); |
||||
|
} |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
__ RecordComment("// Calling from debug break to runtime - come in - over"); |
||||
|
#endif |
||||
|
__ Mov(x0, 0); // No arguments.
|
||||
|
__ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate()))); |
||||
|
|
||||
|
CEntryStub stub(1); |
||||
|
__ CallStub(&stub); |
||||
|
|
||||
|
// Restore the register values from the expression stack.
|
||||
|
if (object_regs != 0) { |
||||
|
__ PopXRegList(object_regs); |
||||
|
} |
||||
|
|
||||
|
non_object_list = |
||||
|
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs); |
||||
|
while (!non_object_list.IsEmpty()) { |
||||
|
// Load each non-object register from two SMIs.
|
||||
|
// Stack:
|
||||
|
// jssp[12]: reg[63:32]
|
||||
|
// jssp[8]: 0x00000000 (SMI tag & padding)
|
||||
|
// jssp[4]: reg[31:0]
|
||||
|
// jssp[0]: 0x00000000 (SMI tag & padding)
|
||||
|
Register reg = Register(non_object_list.PopHighestIndex()); |
||||
|
__ Pop(scratch, reg); |
||||
|
__ Bfxil(reg, scratch, 32, 32); |
||||
|
} |
||||
|
|
||||
|
// Leave the internal frame.
|
||||
|
} |
||||
|
|
||||
|
// Now that the break point has been handled, resume normal execution by
|
||||
|
// jumping to the target address intended by the caller and that was
|
||||
|
// overwritten by the address of DebugBreakXXX.
|
||||
|
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(), |
||||
|
masm->isolate()); |
||||
|
__ Mov(scratch, Operand(after_break_target)); |
||||
|
__ Ldr(scratch, MemOperand(scratch)); |
||||
|
__ Br(scratch); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { |
||||
|
// Calling convention for IC load (from ic-arm.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x2 : name
|
||||
|
// -- lr : return address
|
||||
|
// -- x0 : receiver
|
||||
|
// -- [sp] : receiver
|
||||
|
// -----------------------------------
|
||||
|
// Registers x0 and x2 contain objects that need to be pushed on the
|
||||
|
// expression stack of the fake JS frame.
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { |
||||
|
// Calling convention for IC store (from ic-arm.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x0 : value
|
||||
|
// -- x1 : receiver
|
||||
|
// -- x2 : name
|
||||
|
// -- lr : return address
|
||||
|
// -----------------------------------
|
||||
|
// Registers x0, x1, and x2 contain objects that need to be pushed on the
|
||||
|
// expression stack of the fake JS frame.
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { |
||||
|
// ---------- S t a t e --------------
|
||||
|
// -- lr : return address
|
||||
|
// -- x0 : key
|
||||
|
// -- x1 : receiver
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { |
||||
|
// ---------- S t a t e --------------
|
||||
|
// -- x0 : value
|
||||
|
// -- x1 : key
|
||||
|
// -- x2 : receiver
|
||||
|
// -- lr : return address
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { |
||||
|
// Register state for CompareNil IC
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- r0 : value
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { |
||||
|
// Calling convention for IC call (from ic-arm.cc)
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x2 : name
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { |
||||
|
// In places other than IC call sites it is expected that r0 is TOS which
|
||||
|
// is an object - this is not generally the case so this should be used with
|
||||
|
// care.
|
||||
|
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { |
||||
|
// Register state for CallFunctionStub (from code-stubs-a64.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x1 : function
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { |
||||
|
// Register state for CallFunctionStub (from code-stubs-a64.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x1 : function
|
||||
|
// -- x2 : feedback array
|
||||
|
// -- x3 : slot in feedback array
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { |
||||
|
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x0 : number of arguments (not smi)
|
||||
|
// -- x1 : constructor function
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { |
||||
|
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
|
||||
|
// ----------- S t a t e -------------
|
||||
|
// -- x0 : number of arguments (not smi)
|
||||
|
// -- x1 : constructor function
|
||||
|
// -- x2 : feedback array
|
||||
|
// -- x3 : feedback slot (smi)
|
||||
|
// -----------------------------------
|
||||
|
Generate_DebugBreakCallHelper( |
||||
|
masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateSlot(MacroAssembler* masm) { |
||||
|
// Generate enough nop's to make space for a call instruction. Avoid emitting
|
||||
|
// the constant pool in the debug break slot code.
|
||||
|
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions); |
||||
|
|
||||
|
__ RecordDebugBreakSlot(); |
||||
|
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) { |
||||
|
__ nop(Assembler::DEBUG_BREAK_NOP); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { |
||||
|
// In the places where a debug break slot is inserted no registers can contain
|
||||
|
// object pointers.
|
||||
|
Generate_DebugBreakCallHelper(masm, 0, 0, x10); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { |
||||
|
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { |
||||
|
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64); |
||||
|
} |
||||
|
|
||||
|
const bool Debug::kFrameDropperSupported = false; |
||||
|
|
||||
|
#endif // ENABLE_DEBUGGER_SUPPORT
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,111 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#if defined(USE_SIMULATOR) |
||||
|
|
||||
|
#include "a64/debugger-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
void Debugger::VisitException(Instruction* instr) { |
||||
|
switch (instr->Mask(ExceptionMask)) { |
||||
|
case HLT: { |
||||
|
if (instr->ImmException() == kImmExceptionIsDebug) { |
||||
|
// Read the arguments encoded inline in the instruction stream.
|
||||
|
uint32_t code; |
||||
|
uint32_t parameters; |
||||
|
char const * message; |
||||
|
|
||||
|
ASSERT(sizeof(*pc_) == 1); |
||||
|
memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code)); |
||||
|
memcpy(¶meters, pc_ + kDebugParamsOffset, sizeof(parameters)); |
||||
|
message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset); |
||||
|
|
||||
|
if (message[0] == '\0') { |
||||
|
fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code); |
||||
|
} else { |
||||
|
fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message); |
||||
|
} |
||||
|
|
||||
|
// Other options.
|
||||
|
switch (parameters & kDebuggerTracingDirectivesMask) { |
||||
|
case TRACE_ENABLE: |
||||
|
set_log_parameters(log_parameters() | parameters); |
||||
|
break; |
||||
|
case TRACE_DISABLE: |
||||
|
set_log_parameters(log_parameters() & ~parameters); |
||||
|
break; |
||||
|
case TRACE_OVERRIDE: |
||||
|
set_log_parameters(parameters); |
||||
|
break; |
||||
|
default: |
||||
|
// We don't support a one-shot LOG_DISASM.
|
||||
|
ASSERT((parameters & LOG_DISASM) == 0); |
||||
|
// Don't print information that is already being traced.
|
||||
|
parameters &= ~log_parameters(); |
||||
|
// Print the requested information.
|
||||
|
if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true); |
||||
|
if (parameters & LOG_REGS) PrintRegisters(true); |
||||
|
if (parameters & LOG_FP_REGS) PrintFPRegisters(true); |
||||
|
} |
||||
|
|
||||
|
// Check if the debugger should break.
|
||||
|
if (parameters & BREAK) OS::DebugBreak(); |
||||
|
|
||||
|
// The stop parameters are inlined in the code. Skip them:
|
||||
|
// - Skip to the end of the message string.
|
||||
|
pc_ += kDebugMessageOffset + strlen(message) + 1; |
||||
|
// - Advance to the next aligned location.
|
||||
|
pc_ = AlignUp(pc_, kInstructionSize); |
||||
|
// - Verify that the unreachable marker is present.
|
||||
|
ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT); |
||||
|
ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() == |
||||
|
kImmExceptionIsUnreachable); |
||||
|
// - Skip past the unreachable marker.
|
||||
|
pc_ += kInstructionSize; |
||||
|
pc_modified_ = true; |
||||
|
} else { |
||||
|
Simulator::VisitException(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
|
||||
|
default: |
||||
|
UNIMPLEMENTED(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // USE_SIMULATOR
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,56 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_DEBUGGER_A64_H_ |
||||
|
#define V8_A64_DEBUGGER_A64_H_ |
||||
|
|
||||
|
#if defined(USE_SIMULATOR) |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "a64/constants-a64.h" |
||||
|
#include "a64/simulator-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
class Debugger : public Simulator { |
||||
|
public: |
||||
|
Debugger(Decoder* decoder, FILE* stream = stderr) |
||||
|
: Simulator(decoder, NULL, stream) {} |
||||
|
|
||||
|
// Functions overloading.
|
||||
|
void VisitException(Instruction* instr); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // USE_SIMULATOR
|
||||
|
|
||||
|
#endif // V8_A64_DEBUGGER_A64_H_
|
@ -0,0 +1,726 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "a64/decoder-a64.h" |
||||
|
|
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// Top-level instruction decode function.
|
||||
|
void Decoder::Decode(Instruction *instr) { |
||||
|
if (instr->Bits(28, 27) == 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
switch (instr->Bits(27, 24)) { |
||||
|
// 0: PC relative addressing.
|
||||
|
case 0x0: DecodePCRelAddressing(instr); break; |
||||
|
|
||||
|
// 1: Add/sub immediate.
|
||||
|
case 0x1: DecodeAddSubImmediate(instr); break; |
||||
|
|
||||
|
// A: Logical shifted register.
|
||||
|
// Add/sub with carry.
|
||||
|
// Conditional compare register.
|
||||
|
// Conditional compare immediate.
|
||||
|
// Conditional select.
|
||||
|
// Data processing 1 source.
|
||||
|
// Data processing 2 source.
|
||||
|
// B: Add/sub shifted register.
|
||||
|
// Add/sub extended register.
|
||||
|
// Data processing 3 source.
|
||||
|
case 0xA: |
||||
|
case 0xB: DecodeDataProcessing(instr); break; |
||||
|
|
||||
|
// 2: Logical immediate.
|
||||
|
// Move wide immediate.
|
||||
|
case 0x2: DecodeLogical(instr); break; |
||||
|
|
||||
|
// 3: Bitfield.
|
||||
|
// Extract.
|
||||
|
case 0x3: DecodeBitfieldExtract(instr); break; |
||||
|
|
||||
|
// 4: Unconditional branch immediate.
|
||||
|
// Exception generation.
|
||||
|
// Compare and branch immediate.
|
||||
|
// 5: Compare and branch immediate.
|
||||
|
// Conditional branch.
|
||||
|
// System.
|
||||
|
// 6,7: Unconditional branch.
|
||||
|
// Test and branch immediate.
|
||||
|
case 0x4: |
||||
|
case 0x5: |
||||
|
case 0x6: |
||||
|
case 0x7: DecodeBranchSystemException(instr); break; |
||||
|
|
||||
|
// 8,9: Load/store register pair post-index.
|
||||
|
// Load register literal.
|
||||
|
// Load/store register unscaled immediate.
|
||||
|
// Load/store register immediate post-index.
|
||||
|
// Load/store register immediate pre-index.
|
||||
|
// Load/store register offset.
|
||||
|
// C,D: Load/store register pair offset.
|
||||
|
// Load/store register pair pre-index.
|
||||
|
// Load/store register unsigned immediate.
|
||||
|
// Advanced SIMD.
|
||||
|
case 0x8: |
||||
|
case 0x9: |
||||
|
case 0xC: |
||||
|
case 0xD: DecodeLoadStore(instr); break; |
||||
|
|
||||
|
// E: FP fixed point conversion.
|
||||
|
// FP integer conversion.
|
||||
|
// FP data processing 1 source.
|
||||
|
// FP compare.
|
||||
|
// FP immediate.
|
||||
|
// FP data processing 2 source.
|
||||
|
// FP conditional compare.
|
||||
|
// FP conditional select.
|
||||
|
// Advanced SIMD.
|
||||
|
// F: FP data processing 3 source.
|
||||
|
// Advanced SIMD.
|
||||
|
case 0xE: |
||||
|
case 0xF: DecodeFP(instr); break; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { |
||||
|
visitors_.remove(new_visitor); |
||||
|
visitors_.push_front(new_visitor); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { |
||||
|
visitors_.remove(new_visitor); |
||||
|
visitors_.push_back(new_visitor); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, |
||||
|
DecoderVisitor* registered_visitor) { |
||||
|
visitors_.remove(new_visitor); |
||||
|
std::list<DecoderVisitor*>::iterator it; |
||||
|
for (it = visitors_.begin(); it != visitors_.end(); it++) { |
||||
|
if (*it == registered_visitor) { |
||||
|
visitors_.insert(it, new_visitor); |
||||
|
return; |
||||
|
} |
||||
|
} |
||||
|
// We reached the end of the list. The last element must be
|
||||
|
// registered_visitor.
|
||||
|
ASSERT(*it == registered_visitor); |
||||
|
visitors_.insert(it, new_visitor); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, |
||||
|
DecoderVisitor* registered_visitor) { |
||||
|
visitors_.remove(new_visitor); |
||||
|
std::list<DecoderVisitor*>::iterator it; |
||||
|
for (it = visitors_.begin(); it != visitors_.end(); it++) { |
||||
|
if (*it == registered_visitor) { |
||||
|
it++; |
||||
|
visitors_.insert(it, new_visitor); |
||||
|
return; |
||||
|
} |
||||
|
} |
||||
|
// We reached the end of the list. The last element must be
|
||||
|
// registered_visitor.
|
||||
|
ASSERT(*it == registered_visitor); |
||||
|
visitors_.push_back(new_visitor); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::RemoveVisitor(DecoderVisitor* visitor) { |
||||
|
visitors_.remove(visitor); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodePCRelAddressing(Instruction* instr) { |
||||
|
ASSERT(instr->Bits(27, 24) == 0x0); |
||||
|
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
|
||||
|
// decode.
|
||||
|
ASSERT(instr->Bit(28) == 0x1); |
||||
|
VisitPCRelAddressing(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeBranchSystemException(Instruction* instr) { |
||||
|
ASSERT((instr->Bits(27, 24) == 0x4) || |
||||
|
(instr->Bits(27, 24) == 0x5) || |
||||
|
(instr->Bits(27, 24) == 0x6) || |
||||
|
(instr->Bits(27, 24) == 0x7) ); |
||||
|
|
||||
|
switch (instr->Bits(31, 29)) { |
||||
|
case 0: |
||||
|
case 4: { |
||||
|
VisitUnconditionalBranch(instr); |
||||
|
break; |
||||
|
} |
||||
|
case 1: |
||||
|
case 5: { |
||||
|
if (instr->Bit(25) == 0) { |
||||
|
VisitCompareBranch(instr); |
||||
|
} else { |
||||
|
VisitTestBranch(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 2: { |
||||
|
if (instr->Bit(25) == 0) { |
||||
|
if ((instr->Bit(24) == 0x1) || |
||||
|
(instr->Mask(0x01000010) == 0x00000010)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitConditionalBranch(instr); |
||||
|
} |
||||
|
} else { |
||||
|
VisitUnallocated(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 6: { |
||||
|
if (instr->Bit(25) == 0) { |
||||
|
if (instr->Bit(24) == 0) { |
||||
|
if ((instr->Bits(4, 2) != 0) || |
||||
|
(instr->Mask(0x00E0001D) == 0x00200001) || |
||||
|
(instr->Mask(0x00E0001D) == 0x00400001) || |
||||
|
(instr->Mask(0x00E0001E) == 0x00200002) || |
||||
|
(instr->Mask(0x00E0001E) == 0x00400002) || |
||||
|
(instr->Mask(0x00E0001C) == 0x00600000) || |
||||
|
(instr->Mask(0x00E0001C) == 0x00800000) || |
||||
|
(instr->Mask(0x00E0001F) == 0x00A00000) || |
||||
|
(instr->Mask(0x00C0001C) == 0x00C00000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitException(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bits(23, 22) == 0) { |
||||
|
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); |
||||
|
if ((instr->Bits(21, 19) == 0x4) || |
||||
|
(masked_003FF0E0 == 0x00033000) || |
||||
|
(masked_003FF0E0 == 0x003FF020) || |
||||
|
(masked_003FF0E0 == 0x003FF060) || |
||||
|
(masked_003FF0E0 == 0x003FF0E0) || |
||||
|
(instr->Mask(0x00388000) == 0x00008000) || |
||||
|
(instr->Mask(0x0038E000) == 0x00000000) || |
||||
|
(instr->Mask(0x0039E000) == 0x00002000) || |
||||
|
(instr->Mask(0x003AE000) == 0x00002000) || |
||||
|
(instr->Mask(0x003CE000) == 0x00042000) || |
||||
|
(instr->Mask(0x003FFFC0) == 0x000320C0) || |
||||
|
(instr->Mask(0x003FF100) == 0x00032100) || |
||||
|
(instr->Mask(0x003FF200) == 0x00032200) || |
||||
|
(instr->Mask(0x003FF400) == 0x00032400) || |
||||
|
(instr->Mask(0x003FF800) == 0x00032800) || |
||||
|
(instr->Mask(0x0038F000) == 0x00005000) || |
||||
|
(instr->Mask(0x0038E000) == 0x00006000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitSystem(instr); |
||||
|
} |
||||
|
} else { |
||||
|
VisitUnallocated(instr); |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Bit(24) == 0x1) || |
||||
|
(instr->Bits(20, 16) != 0x1F) || |
||||
|
(instr->Bits(15, 10) != 0) || |
||||
|
(instr->Bits(4, 0) != 0) || |
||||
|
(instr->Bits(24, 21) == 0x3) || |
||||
|
(instr->Bits(24, 22) == 0x3)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitUnconditionalBranchToRegister(instr); |
||||
|
} |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 3: |
||||
|
case 7: { |
||||
|
VisitUnallocated(instr); |
||||
|
break; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeLoadStore(Instruction* instr) { |
||||
|
ASSERT((instr->Bits(27, 24) == 0x8) || |
||||
|
(instr->Bits(27, 24) == 0x9) || |
||||
|
(instr->Bits(27, 24) == 0xC) || |
||||
|
(instr->Bits(27, 24) == 0xD) ); |
||||
|
|
||||
|
if (instr->Bit(24) == 0) { |
||||
|
if (instr->Bit(28) == 0) { |
||||
|
if (instr->Bit(29) == 0) { |
||||
|
if (instr->Bit(26) == 0) { |
||||
|
// TODO(all): VisitLoadStoreExclusive.
|
||||
|
VisitUnimplemented(instr); |
||||
|
} else { |
||||
|
DecodeAdvSIMDLoadStore(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Bits(31, 30) == 0x3) || |
||||
|
(instr->Mask(0xC4400000) == 0x40000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(23) == 0) { |
||||
|
if (instr->Mask(0xC4400000) == 0xC0400000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadStorePairNonTemporal(instr); |
||||
|
} |
||||
|
} else { |
||||
|
VisitLoadStorePairPostIndex(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bit(29) == 0) { |
||||
|
if (instr->Mask(0xC4000000) == 0xC4000000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadLiteral(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Mask(0x84C00000) == 0x80C00000) || |
||||
|
(instr->Mask(0x44800000) == 0x44800000) || |
||||
|
(instr->Mask(0x84800000) == 0x84800000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(21) == 0) { |
||||
|
switch (instr->Bits(11, 10)) { |
||||
|
case 0: { |
||||
|
VisitLoadStoreUnscaledOffset(instr); |
||||
|
break; |
||||
|
} |
||||
|
case 1: { |
||||
|
if (instr->Mask(0xC4C00000) == 0xC0800000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadStorePostIndex(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 2: { |
||||
|
// TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
|
||||
|
VisitUnimplemented(instr); |
||||
|
break; |
||||
|
} |
||||
|
case 3: { |
||||
|
if (instr->Mask(0xC4C00000) == 0xC0800000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadStorePreIndex(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bits(11, 10) == 0x2) { |
||||
|
if (instr->Bit(14) == 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadStoreRegisterOffset(instr); |
||||
|
} |
||||
|
} else { |
||||
|
VisitUnallocated(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bit(28) == 0) { |
||||
|
if (instr->Bit(29) == 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if ((instr->Bits(31, 30) == 0x3) || |
||||
|
(instr->Mask(0xC4400000) == 0x40000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(23) == 0) { |
||||
|
VisitLoadStorePairOffset(instr); |
||||
|
} else { |
||||
|
VisitLoadStorePairPreIndex(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bit(29) == 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if ((instr->Mask(0x84C00000) == 0x80C00000) || |
||||
|
(instr->Mask(0x44800000) == 0x44800000) || |
||||
|
(instr->Mask(0x84800000) == 0x84800000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLoadStoreUnsignedOffset(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeLogical(Instruction* instr) { |
||||
|
ASSERT(instr->Bits(27, 24) == 0x2); |
||||
|
|
||||
|
if (instr->Mask(0x80400000) == 0x00400000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(23) == 0) { |
||||
|
VisitLogicalImmediate(instr); |
||||
|
} else { |
||||
|
if (instr->Bits(30, 29) == 0x1) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitMoveWideImmediate(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeBitfieldExtract(Instruction* instr) { |
||||
|
ASSERT(instr->Bits(27, 24) == 0x3); |
||||
|
|
||||
|
if ((instr->Mask(0x80400000) == 0x80000000) || |
||||
|
(instr->Mask(0x80400000) == 0x00400000) || |
||||
|
(instr->Mask(0x80008000) == 0x00008000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else if (instr->Bit(23) == 0) { |
||||
|
if ((instr->Mask(0x80200000) == 0x00200000) || |
||||
|
(instr->Mask(0x60000000) == 0x60000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitBitfield(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Mask(0x60200000) == 0x00200000) || |
||||
|
(instr->Mask(0x60000000) != 0x00000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitExtract(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeAddSubImmediate(Instruction* instr) { |
||||
|
ASSERT(instr->Bits(27, 24) == 0x1); |
||||
|
if (instr->Bit(23) == 1) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitAddSubImmediate(instr); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeDataProcessing(Instruction* instr) { |
||||
|
ASSERT((instr->Bits(27, 24) == 0xA) || |
||||
|
(instr->Bits(27, 24) == 0xB) ); |
||||
|
|
||||
|
if (instr->Bit(24) == 0) { |
||||
|
if (instr->Bit(28) == 0) { |
||||
|
if (instr->Mask(0x80008000) == 0x00008000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitLogicalShifted(instr); |
||||
|
} |
||||
|
} else { |
||||
|
switch (instr->Bits(23, 21)) { |
||||
|
case 0: { |
||||
|
if (instr->Mask(0x0000FC00) != 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitAddSubWithCarry(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 2: { |
||||
|
if ((instr->Bit(29) == 0) || |
||||
|
(instr->Mask(0x00000410) != 0)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(11) == 0) { |
||||
|
VisitConditionalCompareRegister(instr); |
||||
|
} else { |
||||
|
VisitConditionalCompareImmediate(instr); |
||||
|
} |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 4: { |
||||
|
if (instr->Mask(0x20000800) != 0x00000000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitConditionalSelect(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 6: { |
||||
|
if (instr->Bit(29) == 0x1) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(30) == 0) { |
||||
|
if ((instr->Bit(15) == 0x1) || |
||||
|
(instr->Bits(15, 11) == 0) || |
||||
|
(instr->Bits(15, 12) == 0x1) || |
||||
|
(instr->Bits(15, 12) == 0x3) || |
||||
|
(instr->Bits(15, 13) == 0x3) || |
||||
|
(instr->Mask(0x8000EC00) == 0x00004C00) || |
||||
|
(instr->Mask(0x8000E800) == 0x80004000) || |
||||
|
(instr->Mask(0x8000E400) == 0x80004000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitDataProcessing2Source(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Bit(13) == 1) || |
||||
|
(instr->Bits(20, 16) != 0) || |
||||
|
(instr->Bits(15, 14) != 0) || |
||||
|
(instr->Mask(0xA01FFC00) == 0x00000C00) || |
||||
|
(instr->Mask(0x201FF800) == 0x00001800)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitDataProcessing1Source(instr); |
||||
|
} |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
} |
||||
|
case 1: |
||||
|
case 3: |
||||
|
case 5: |
||||
|
case 7: VisitUnallocated(instr); break; |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bit(28) == 0) { |
||||
|
if (instr->Bit(21) == 0) { |
||||
|
if ((instr->Bits(23, 22) == 0x3) || |
||||
|
(instr->Mask(0x80008000) == 0x00008000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitAddSubShifted(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Mask(0x00C00000) != 0x00000000) || |
||||
|
(instr->Mask(0x00001400) == 0x00001400) || |
||||
|
(instr->Mask(0x00001800) == 0x00001800)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitAddSubExtended(instr); |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
if ((instr->Bit(30) == 0x1) || |
||||
|
(instr->Bits(30, 29) == 0x1) || |
||||
|
(instr->Mask(0xE0600000) == 0x00200000) || |
||||
|
(instr->Mask(0xE0608000) == 0x00400000) || |
||||
|
(instr->Mask(0x60608000) == 0x00408000) || |
||||
|
(instr->Mask(0x60E00000) == 0x00E00000) || |
||||
|
(instr->Mask(0x60E00000) == 0x00800000) || |
||||
|
(instr->Mask(0x60E00000) == 0x00600000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitDataProcessing3Source(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeFP(Instruction* instr) { |
||||
|
ASSERT((instr->Bits(27, 24) == 0xE) || |
||||
|
(instr->Bits(27, 24) == 0xF) ); |
||||
|
|
||||
|
if (instr->Bit(28) == 0) { |
||||
|
DecodeAdvSIMDDataProcessing(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(29) == 1) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
if (instr->Bits(31, 30) == 0x3) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else if (instr->Bits(31, 30) == 0x1) { |
||||
|
DecodeAdvSIMDDataProcessing(instr); |
||||
|
} else { |
||||
|
if (instr->Bit(24) == 0) { |
||||
|
if (instr->Bit(21) == 0) { |
||||
|
if ((instr->Bit(23) == 1) || |
||||
|
(instr->Bit(18) == 1) || |
||||
|
(instr->Mask(0x80008000) == 0x00000000) || |
||||
|
(instr->Mask(0x000E0000) == 0x00000000) || |
||||
|
(instr->Mask(0x000E0000) == 0x000A0000) || |
||||
|
(instr->Mask(0x00160000) == 0x00000000) || |
||||
|
(instr->Mask(0x00160000) == 0x00120000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPFixedPointConvert(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Bits(15, 10) == 32) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else if (instr->Bits(15, 10) == 0) { |
||||
|
if ((instr->Bits(23, 22) == 0x3) || |
||||
|
(instr->Mask(0x000E0000) == 0x000A0000) || |
||||
|
(instr->Mask(0x000E0000) == 0x000C0000) || |
||||
|
(instr->Mask(0x00160000) == 0x00120000) || |
||||
|
(instr->Mask(0x00160000) == 0x00140000) || |
||||
|
(instr->Mask(0x20C40000) == 0x00800000) || |
||||
|
(instr->Mask(0x20C60000) == 0x00840000) || |
||||
|
(instr->Mask(0xA0C60000) == 0x80060000) || |
||||
|
(instr->Mask(0xA0C60000) == 0x00860000) || |
||||
|
(instr->Mask(0xA0C60000) == 0x00460000) || |
||||
|
(instr->Mask(0xA0CE0000) == 0x80860000) || |
||||
|
(instr->Mask(0xA0CE0000) == 0x804E0000) || |
||||
|
(instr->Mask(0xA0CE0000) == 0x000E0000) || |
||||
|
(instr->Mask(0xA0D60000) == 0x00160000) || |
||||
|
(instr->Mask(0xA0D60000) == 0x80560000) || |
||||
|
(instr->Mask(0xA0D60000) == 0x80960000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPIntegerConvert(instr); |
||||
|
} |
||||
|
} else if (instr->Bits(14, 10) == 16) { |
||||
|
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); |
||||
|
if ((instr->Mask(0x80180000) != 0) || |
||||
|
(masked_A0DF8000 == 0x00020000) || |
||||
|
(masked_A0DF8000 == 0x00030000) || |
||||
|
(masked_A0DF8000 == 0x00068000) || |
||||
|
(masked_A0DF8000 == 0x00428000) || |
||||
|
(masked_A0DF8000 == 0x00430000) || |
||||
|
(masked_A0DF8000 == 0x00468000) || |
||||
|
(instr->Mask(0xA0D80000) == 0x00800000) || |
||||
|
(instr->Mask(0xA0DE0000) == 0x00C00000) || |
||||
|
(instr->Mask(0xA0DF0000) == 0x00C30000) || |
||||
|
(instr->Mask(0xA0DC0000) == 0x00C40000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPDataProcessing1Source(instr); |
||||
|
} |
||||
|
} else if (instr->Bits(13, 10) == 8) { |
||||
|
if ((instr->Bits(15, 14) != 0) || |
||||
|
(instr->Bits(2, 0) != 0) || |
||||
|
(instr->Mask(0x80800000) != 0x00000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPCompare(instr); |
||||
|
} |
||||
|
} else if (instr->Bits(12, 10) == 4) { |
||||
|
if ((instr->Bits(9, 5) != 0) || |
||||
|
(instr->Mask(0x80800000) != 0x00000000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPImmediate(instr); |
||||
|
} |
||||
|
} else { |
||||
|
if (instr->Mask(0x80800000) != 0x00000000) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
switch (instr->Bits(11, 10)) { |
||||
|
case 1: { |
||||
|
VisitFPConditionalCompare(instr); |
||||
|
break; |
||||
|
} |
||||
|
case 2: { |
||||
|
if ((instr->Bits(15, 14) == 0x3) || |
||||
|
(instr->Mask(0x00009000) == 0x00009000) || |
||||
|
(instr->Mask(0x0000A000) == 0x0000A000)) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPDataProcessing2Source(instr); |
||||
|
} |
||||
|
break; |
||||
|
} |
||||
|
case 3: { |
||||
|
VisitFPConditionalSelect(instr); |
||||
|
break; |
||||
|
} |
||||
|
default: UNREACHABLE(); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
// Bit 30 == 1 has been handled earlier.
|
||||
|
ASSERT(instr->Bit(30) == 0); |
||||
|
if (instr->Mask(0xA0800000) != 0) { |
||||
|
VisitUnallocated(instr); |
||||
|
} else { |
||||
|
VisitFPDataProcessing3Source(instr); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) { |
||||
|
// TODO(all): Implement Advanced SIMD load/store instruction decode.
|
||||
|
ASSERT(instr->Bits(29, 25) == 0x6); |
||||
|
VisitUnimplemented(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) { |
||||
|
// TODO(all): Implement Advanced SIMD data processing instruction decode.
|
||||
|
ASSERT(instr->Bits(27, 25) == 0x7); |
||||
|
VisitUnimplemented(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#define DEFINE_VISITOR_CALLERS(A) \ |
||||
|
void Decoder::Visit##A(Instruction *instr) { \ |
||||
|
if (!(instr->Mask(A##FMask) == A##Fixed)) { \ |
||||
|
ASSERT(instr->Mask(A##FMask) == A##Fixed); \ |
||||
|
} \ |
||||
|
std::list<DecoderVisitor*>::iterator it; \ |
||||
|
for (it = visitors_.begin(); it != visitors_.end(); it++) { \ |
||||
|
(*it)->Visit##A(instr); \ |
||||
|
} \ |
||||
|
} |
||||
|
VISITOR_LIST(DEFINE_VISITOR_CALLERS) |
||||
|
#undef DEFINE_VISITOR_CALLERS |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,202 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_DECODER_A64_H_ |
||||
|
#define V8_A64_DECODER_A64_H_ |
||||
|
|
||||
|
#include <list> |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "a64/instructions-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
// List macro containing all visitors needed by the decoder class.
|
||||
|
|
||||
|
#define VISITOR_LIST(V) \ |
||||
|
V(PCRelAddressing) \ |
||||
|
V(AddSubImmediate) \ |
||||
|
V(LogicalImmediate) \ |
||||
|
V(MoveWideImmediate) \ |
||||
|
V(Bitfield) \ |
||||
|
V(Extract) \ |
||||
|
V(UnconditionalBranch) \ |
||||
|
V(UnconditionalBranchToRegister) \ |
||||
|
V(CompareBranch) \ |
||||
|
V(TestBranch) \ |
||||
|
V(ConditionalBranch) \ |
||||
|
V(System) \ |
||||
|
V(Exception) \ |
||||
|
V(LoadStorePairPostIndex) \ |
||||
|
V(LoadStorePairOffset) \ |
||||
|
V(LoadStorePairPreIndex) \ |
||||
|
V(LoadStorePairNonTemporal) \ |
||||
|
V(LoadLiteral) \ |
||||
|
V(LoadStoreUnscaledOffset) \ |
||||
|
V(LoadStorePostIndex) \ |
||||
|
V(LoadStorePreIndex) \ |
||||
|
V(LoadStoreRegisterOffset) \ |
||||
|
V(LoadStoreUnsignedOffset) \ |
||||
|
V(LogicalShifted) \ |
||||
|
V(AddSubShifted) \ |
||||
|
V(AddSubExtended) \ |
||||
|
V(AddSubWithCarry) \ |
||||
|
V(ConditionalCompareRegister) \ |
||||
|
V(ConditionalCompareImmediate) \ |
||||
|
V(ConditionalSelect) \ |
||||
|
V(DataProcessing1Source) \ |
||||
|
V(DataProcessing2Source) \ |
||||
|
V(DataProcessing3Source) \ |
||||
|
V(FPCompare) \ |
||||
|
V(FPConditionalCompare) \ |
||||
|
V(FPConditionalSelect) \ |
||||
|
V(FPImmediate) \ |
||||
|
V(FPDataProcessing1Source) \ |
||||
|
V(FPDataProcessing2Source) \ |
||||
|
V(FPDataProcessing3Source) \ |
||||
|
V(FPIntegerConvert) \ |
||||
|
V(FPFixedPointConvert) \ |
||||
|
V(Unallocated) \ |
||||
|
V(Unimplemented) |
||||
|
|
||||
|
// The Visitor interface. Disassembler and simulator (and other tools)
|
||||
|
// must provide implementations for all of these functions.
|
||||
|
class DecoderVisitor { |
||||
|
public: |
||||
|
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0; |
||||
|
VISITOR_LIST(DECLARE) |
||||
|
#undef DECLARE |
||||
|
|
||||
|
virtual ~DecoderVisitor() {} |
||||
|
|
||||
|
private: |
||||
|
// Visitors are registered in a list.
|
||||
|
std::list<DecoderVisitor*> visitors_; |
||||
|
|
||||
|
friend class Decoder; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class Decoder: public DecoderVisitor { |
||||
|
public: |
||||
|
explicit Decoder() {} |
||||
|
|
||||
|
// Top-level instruction decoder function. Decodes an instruction and calls
|
||||
|
// the visitor functions registered with the Decoder class.
|
||||
|
void Decode(Instruction *instr); |
||||
|
|
||||
|
// Register a new visitor class with the decoder.
|
||||
|
// Decode() will call the corresponding visitor method from all registered
|
||||
|
// visitor classes when decoding reaches the leaf node of the instruction
|
||||
|
// decode tree.
|
||||
|
// Visitors are called in the order.
|
||||
|
// A visitor can only be registered once.
|
||||
|
// Registering an already registered visitor will update its position.
|
||||
|
//
|
||||
|
// d.AppendVisitor(V1);
|
||||
|
// d.AppendVisitor(V2);
|
||||
|
// d.PrependVisitor(V2); // Move V2 at the start of the list.
|
||||
|
// d.InsertVisitorBefore(V3, V2);
|
||||
|
// d.AppendVisitor(V4);
|
||||
|
// d.AppendVisitor(V4); // No effect.
|
||||
|
//
|
||||
|
// d.Decode(i);
|
||||
|
//
|
||||
|
// will call in order visitor methods in V3, V2, V1, V4.
|
||||
|
void AppendVisitor(DecoderVisitor* visitor); |
||||
|
void PrependVisitor(DecoderVisitor* visitor); |
||||
|
void InsertVisitorBefore(DecoderVisitor* new_visitor, |
||||
|
DecoderVisitor* registered_visitor); |
||||
|
void InsertVisitorAfter(DecoderVisitor* new_visitor, |
||||
|
DecoderVisitor* registered_visitor); |
||||
|
|
||||
|
// Remove a previously registered visitor class from the list of visitors
|
||||
|
// stored by the decoder.
|
||||
|
void RemoveVisitor(DecoderVisitor* visitor); |
||||
|
|
||||
|
#define DECLARE(A) void Visit##A(Instruction* instr); |
||||
|
VISITOR_LIST(DECLARE) |
||||
|
#undef DECLARE |
||||
|
|
||||
|
private: |
||||
|
// Decode the PC relative addressing instruction, and call the corresponding
|
||||
|
// visitors.
|
||||
|
// On entry, instruction bits 27:24 = 0x0.
|
||||
|
void DecodePCRelAddressing(Instruction* instr); |
||||
|
|
||||
|
// Decode the add/subtract immediate instruction, and call the corresponding
|
||||
|
// visitors.
|
||||
|
// On entry, instruction bits 27:24 = 0x1.
|
||||
|
void DecodeAddSubImmediate(Instruction* instr); |
||||
|
|
||||
|
// Decode the branch, system command, and exception generation parts of
|
||||
|
// the instruction tree, and call the corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
|
||||
|
void DecodeBranchSystemException(Instruction* instr); |
||||
|
|
||||
|
// Decode the load and store parts of the instruction tree, and call
|
||||
|
// the corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
|
||||
|
void DecodeLoadStore(Instruction* instr); |
||||
|
|
||||
|
// Decode the logical immediate and move wide immediate parts of the
|
||||
|
// instruction tree, and call the corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = 0x2.
|
||||
|
void DecodeLogical(Instruction* instr); |
||||
|
|
||||
|
// Decode the bitfield and extraction parts of the instruction tree,
|
||||
|
// and call the corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = 0x3.
|
||||
|
void DecodeBitfieldExtract(Instruction* instr); |
||||
|
|
||||
|
// Decode the data processing parts of the instruction tree, and call the
|
||||
|
// corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
|
||||
|
void DecodeDataProcessing(Instruction* instr); |
||||
|
|
||||
|
// Decode the floating point parts of the instruction tree, and call the
|
||||
|
// corresponding visitors.
|
||||
|
// On entry, instruction bits 27:24 = {0xE, 0xF}.
|
||||
|
void DecodeFP(Instruction* instr); |
||||
|
|
||||
|
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
|
||||
|
// and call the corresponding visitors.
|
||||
|
// On entry, instruction bits 29:25 = 0x6.
|
||||
|
void DecodeAdvSIMDLoadStore(Instruction* instr); |
||||
|
|
||||
|
// Decode the Advanced SIMD (NEON) data processing part of the instruction
|
||||
|
// tree, and call the corresponding visitors.
|
||||
|
// On entry, instruction bits 27:25 = 0x7.
|
||||
|
void DecodeAdvSIMDDataProcessing(Instruction* instr); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_DECODER_A64_H_
|
@ -0,0 +1,376 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "codegen.h" |
||||
|
#include "deoptimizer.h" |
||||
|
#include "full-codegen.h" |
||||
|
#include "safepoint-table.h" |
||||
|
|
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
int Deoptimizer::patch_size() { |
||||
|
// Size of the code used to patch lazy bailout points.
|
||||
|
// Patching is done by Deoptimizer::DeoptimizeFunction.
|
||||
|
return 4 * kInstructionSize; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
|
||||
|
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { |
||||
|
// Invalidate the relocation information, as it will become invalid by the
|
||||
|
// code patching below, and is not needed any more.
|
||||
|
code->InvalidateRelocation(); |
||||
|
|
||||
|
// For each LLazyBailout instruction insert a call to the corresponding
|
||||
|
// deoptimization entry.
|
||||
|
DeoptimizationInputData* deopt_data = |
||||
|
DeoptimizationInputData::cast(code->deoptimization_data()); |
||||
|
Address code_start_address = code->instruction_start(); |
||||
|
#ifdef DEBUG |
||||
|
Address prev_call_address = NULL; |
||||
|
#endif |
||||
|
|
||||
|
for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
||||
|
if (deopt_data->Pc(i)->value() == -1) continue; |
||||
|
|
||||
|
Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
||||
|
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
||||
|
|
||||
|
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize); |
||||
|
patcher.LoadLiteral(ip0, 2 * kInstructionSize); |
||||
|
patcher.blr(ip0); |
||||
|
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry)); |
||||
|
|
||||
|
ASSERT((prev_call_address == NULL) || |
||||
|
(call_address >= prev_call_address + patch_size())); |
||||
|
ASSERT(call_address + patch_size() <= code->instruction_end()); |
||||
|
#ifdef DEBUG |
||||
|
prev_call_address = call_address; |
||||
|
#endif |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
||||
|
// Set the register values. The values are not important as there are no
|
||||
|
// callee saved registers in JavaScript frames, so all registers are
|
||||
|
// spilled. Registers fp and sp are set to the correct values though.
|
||||
|
for (int i = 0; i < Register::NumRegisters(); i++) { |
||||
|
input_->SetRegister(i, 0); |
||||
|
} |
||||
|
|
||||
|
// TODO(all): Do we also need to set a value to csp?
|
||||
|
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp())); |
||||
|
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); |
||||
|
|
||||
|
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { |
||||
|
input_->SetDoubleRegister(i, 0.0); |
||||
|
} |
||||
|
|
||||
|
// Fill the frame content from the actual data on the frame.
|
||||
|
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { |
||||
|
input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { |
||||
|
// There is no dynamic alignment padding on A64 in the input frame.
|
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::SetPlatformCompiledStubRegisters( |
||||
|
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { |
||||
|
ApiFunction function(descriptor->deoptimization_handler_); |
||||
|
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
||||
|
intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
||||
|
int params = descriptor->GetHandlerParameterCount(); |
||||
|
output_frame->SetRegister(x0.code(), params); |
||||
|
output_frame->SetRegister(x1.code(), handler); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
||||
|
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { |
||||
|
double double_value = input_->GetDoubleRegister(i); |
||||
|
output_frame->SetDoubleRegister(i, double_value); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
Code* Deoptimizer::NotifyStubFailureBuiltin() { |
||||
|
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#define __ masm()-> |
||||
|
|
||||
|
void Deoptimizer::EntryGenerator::Generate() { |
||||
|
GeneratePrologue(); |
||||
|
|
||||
|
// TODO(all): This code needs to be revisited. We probably only need to save
|
||||
|
// caller-saved registers here. Callee-saved registers can be stored directly
|
||||
|
// in the input frame.
|
||||
|
|
||||
|
// Save all allocatable floating point registers.
|
||||
|
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize, |
||||
|
0, FPRegister::NumAllocatableRegisters() - 1); |
||||
|
__ PushCPURegList(saved_fp_registers); |
||||
|
|
||||
|
// We save all the registers expcept jssp, sp and lr.
|
||||
|
CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27); |
||||
|
saved_registers.Combine(fp); |
||||
|
__ PushCPURegList(saved_registers); |
||||
|
|
||||
|
const int kSavedRegistersAreaSize = |
||||
|
(saved_registers.Count() * kXRegSizeInBytes) + |
||||
|
(saved_fp_registers.Count() * kDRegSizeInBytes); |
||||
|
|
||||
|
// Floating point registers are saved on the stack above core registers.
|
||||
|
const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes; |
||||
|
|
||||
|
// Get the bailout id from the stack.
|
||||
|
Register bailout_id = x2; |
||||
|
__ Peek(bailout_id, kSavedRegistersAreaSize); |
||||
|
|
||||
|
Register code_object = x3; |
||||
|
Register fp_to_sp = x4; |
||||
|
// Get the address of the location in the code object. This is the return
|
||||
|
// address for lazy deoptimization.
|
||||
|
__ Mov(code_object, lr); |
||||
|
// Compute the fp-to-sp delta, and correct one word for bailout id.
|
||||
|
__ Add(fp_to_sp, masm()->StackPointer(), |
||||
|
kSavedRegistersAreaSize + (1 * kPointerSize)); |
||||
|
__ Sub(fp_to_sp, fp, fp_to_sp); |
||||
|
|
||||
|
// Allocate a new deoptimizer object.
|
||||
|
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
||||
|
__ Mov(x1, type()); |
||||
|
// Following arguments are already loaded:
|
||||
|
// - x2: bailout id
|
||||
|
// - x3: code object address
|
||||
|
// - x4: fp-to-sp delta
|
||||
|
__ Mov(x5, Operand(ExternalReference::isolate_address(isolate()))); |
||||
|
|
||||
|
{ |
||||
|
// Call Deoptimizer::New().
|
||||
|
AllowExternalCallThatCantCauseGC scope(masm()); |
||||
|
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
||||
|
} |
||||
|
|
||||
|
// Preserve "deoptimizer" object in register x0.
|
||||
|
Register deoptimizer = x0; |
||||
|
|
||||
|
// Get the input frame descriptor pointer.
|
||||
|
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset())); |
||||
|
|
||||
|
// Copy core registers into the input frame.
|
||||
|
CPURegList copy_to_input = saved_registers; |
||||
|
for (int i = 0; i < saved_registers.Count(); i++) { |
||||
|
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
|
||||
|
__ Peek(x2, i * kPointerSize); |
||||
|
CPURegister current_reg = copy_to_input.PopLowestIndex(); |
||||
|
int offset = (current_reg.code() * kPointerSize) + |
||||
|
FrameDescription::registers_offset(); |
||||
|
__ Str(x2, MemOperand(x1, offset)); |
||||
|
} |
||||
|
|
||||
|
// Copy FP registers to the input frame.
|
||||
|
for (int i = 0; i < saved_fp_registers.Count(); i++) { |
||||
|
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
|
||||
|
int dst_offset = FrameDescription::double_registers_offset() + |
||||
|
(i * kDoubleSize); |
||||
|
int src_offset = kFPRegistersOffset + (i * kDoubleSize); |
||||
|
__ Peek(x2, src_offset); |
||||
|
__ Str(x2, MemOperand(x1, dst_offset)); |
||||
|
} |
||||
|
|
||||
|
// Remove the bailout id and the saved registers from the stack.
|
||||
|
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes)); |
||||
|
|
||||
|
// Compute a pointer to the unwinding limit in register x2; that is
|
||||
|
// the first stack slot not part of the input frame.
|
||||
|
Register unwind_limit = x2; |
||||
|
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset())); |
||||
|
__ Add(unwind_limit, unwind_limit, __ StackPointer()); |
||||
|
|
||||
|
// Unwind the stack down to - but not including - the unwinding
|
||||
|
// limit and copy the contents of the activation frame to the input
|
||||
|
// frame description.
|
||||
|
__ Add(x3, x1, FrameDescription::frame_content_offset()); |
||||
|
Label pop_loop; |
||||
|
Label pop_loop_header; |
||||
|
__ B(&pop_loop_header); |
||||
|
__ Bind(&pop_loop); |
||||
|
__ Pop(x4); |
||||
|
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex)); |
||||
|
__ Bind(&pop_loop_header); |
||||
|
__ Cmp(unwind_limit, __ StackPointer()); |
||||
|
__ B(ne, &pop_loop); |
||||
|
|
||||
|
// Compute the output frame in the deoptimizer.
|
||||
|
__ Push(x0); // Preserve deoptimizer object across call.
|
||||
|
|
||||
|
{ |
||||
|
// Call Deoptimizer::ComputeOutputFrames().
|
||||
|
AllowExternalCallThatCantCauseGC scope(masm()); |
||||
|
__ CallCFunction( |
||||
|
ExternalReference::compute_output_frames_function(isolate()), 1); |
||||
|
} |
||||
|
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
|
||||
|
|
||||
|
// Replace the current (input) frame with the output frames.
|
||||
|
Label outer_push_loop, inner_push_loop, |
||||
|
outer_loop_header, inner_loop_header; |
||||
|
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset())); |
||||
|
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset())); |
||||
|
__ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2)); |
||||
|
__ B(&outer_loop_header); |
||||
|
|
||||
|
__ Bind(&outer_push_loop); |
||||
|
Register current_frame = x2; |
||||
|
__ Ldr(current_frame, MemOperand(x0, 0)); |
||||
|
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset())); |
||||
|
__ B(&inner_loop_header); |
||||
|
|
||||
|
__ Bind(&inner_push_loop); |
||||
|
__ Sub(x3, x3, kPointerSize); |
||||
|
__ Add(x6, current_frame, x3); |
||||
|
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset())); |
||||
|
__ Push(x7); |
||||
|
__ Bind(&inner_loop_header); |
||||
|
__ Cbnz(x3, &inner_push_loop); |
||||
|
|
||||
|
__ Add(x0, x0, kPointerSize); |
||||
|
__ Bind(&outer_loop_header); |
||||
|
__ Cmp(x0, x1); |
||||
|
__ B(lt, &outer_push_loop); |
||||
|
|
||||
|
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset())); |
||||
|
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) && |
||||
|
!saved_fp_registers.IncludesAliasOf(fp_zero) && |
||||
|
!saved_fp_registers.IncludesAliasOf(fp_scratch)); |
||||
|
int src_offset = FrameDescription::double_registers_offset(); |
||||
|
while (!saved_fp_registers.IsEmpty()) { |
||||
|
const CPURegister reg = saved_fp_registers.PopLowestIndex(); |
||||
|
__ Ldr(reg, MemOperand(x1, src_offset)); |
||||
|
src_offset += kDoubleSize; |
||||
|
} |
||||
|
|
||||
|
// Push state from the last output frame.
|
||||
|
__ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset())); |
||||
|
__ Push(x6); |
||||
|
|
||||
|
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
|
||||
|
// stack, then pops it all into registers. Here, we try to load it directly
|
||||
|
// into the relevant registers. Is this correct? If so, we should improve the
|
||||
|
// ARM code.
|
||||
|
|
||||
|
// TODO(all): This code needs to be revisited, We probably don't need to
|
||||
|
// restore all the registers as fullcodegen does not keep live values in
|
||||
|
// registers (note that at least fp must be restored though).
|
||||
|
|
||||
|
// Restore registers from the last output frame.
|
||||
|
// Note that lr is not in the list of saved_registers and will be restored
|
||||
|
// later. We can use it to hold the address of last output frame while
|
||||
|
// reloading the other registers.
|
||||
|
ASSERT(!saved_registers.IncludesAliasOf(lr)); |
||||
|
Register last_output_frame = lr; |
||||
|
__ Mov(last_output_frame, current_frame); |
||||
|
|
||||
|
// We don't need to restore x7 as it will be clobbered later to hold the
|
||||
|
// continuation address.
|
||||
|
Register continuation = x7; |
||||
|
saved_registers.Remove(continuation); |
||||
|
|
||||
|
while (!saved_registers.IsEmpty()) { |
||||
|
// TODO(all): Look for opportunities to optimize this by using ldp.
|
||||
|
CPURegister current_reg = saved_registers.PopLowestIndex(); |
||||
|
int offset = (current_reg.code() * kPointerSize) + |
||||
|
FrameDescription::registers_offset(); |
||||
|
__ Ldr(current_reg, MemOperand(last_output_frame, offset)); |
||||
|
} |
||||
|
|
||||
|
__ Ldr(continuation, MemOperand(last_output_frame, |
||||
|
FrameDescription::continuation_offset())); |
||||
|
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset())); |
||||
|
__ InitializeRootRegister(); |
||||
|
__ Br(continuation); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// Size of an entry of the second level deopt table.
|
||||
|
// This is the code size generated by GeneratePrologue for one entry.
|
||||
|
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize; |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
||||
|
// Create a sequence of deoptimization entries.
|
||||
|
// Note that registers are still live when jumping to an entry.
|
||||
|
Label done; |
||||
|
{ |
||||
|
InstructionAccurateScope scope(masm()); |
||||
|
|
||||
|
// The number of entry will never exceed kMaxNumberOfEntries.
|
||||
|
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
|
||||
|
// a movz instruction to load the entry id.
|
||||
|
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries)); |
||||
|
|
||||
|
for (int i = 0; i < count(); i++) { |
||||
|
int start = masm()->pc_offset(); |
||||
|
USE(start); |
||||
|
__ movz(masm()->Tmp0(), i); |
||||
|
__ b(&done); |
||||
|
ASSERT(masm()->pc_offset() - start == table_entry_size_); |
||||
|
} |
||||
|
} |
||||
|
__ Bind(&done); |
||||
|
// TODO(all): We need to add some kind of assertion to verify that Tmp0()
|
||||
|
// is not clobbered by Push.
|
||||
|
__ Push(masm()->Tmp0()); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { |
||||
|
SetFrameSlot(offset, value); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { |
||||
|
SetFrameSlot(offset, value); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#undef __ |
||||
|
|
||||
|
} } // namespace v8::internal
|
File diff suppressed because it is too large
@ -0,0 +1,115 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_DISASM_A64_H |
||||
|
#define V8_A64_DISASM_A64_H |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "instructions-a64.h" |
||||
|
#include "decoder-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
class Disassembler: public DecoderVisitor { |
||||
|
public: |
||||
|
Disassembler(); |
||||
|
Disassembler(char* text_buffer, int buffer_size); |
||||
|
virtual ~Disassembler(); |
||||
|
char* GetOutput(); |
||||
|
|
||||
|
// Declare all Visitor functions.
|
||||
|
#define DECLARE(A) void Visit##A(Instruction* instr); |
||||
|
VISITOR_LIST(DECLARE) |
||||
|
#undef DECLARE |
||||
|
|
||||
|
protected: |
||||
|
virtual void ProcessOutput(Instruction* instr); |
||||
|
|
||||
|
void Format(Instruction* instr, const char* mnemonic, const char* format); |
||||
|
void Substitute(Instruction* instr, const char* string); |
||||
|
int SubstituteField(Instruction* instr, const char* format); |
||||
|
int SubstituteRegisterField(Instruction* instr, const char* format); |
||||
|
int SubstituteImmediateField(Instruction* instr, const char* format); |
||||
|
int SubstituteLiteralField(Instruction* instr, const char* format); |
||||
|
int SubstituteBitfieldImmediateField(Instruction* instr, const char* format); |
||||
|
int SubstituteShiftField(Instruction* instr, const char* format); |
||||
|
int SubstituteExtendField(Instruction* instr, const char* format); |
||||
|
int SubstituteConditionField(Instruction* instr, const char* format); |
||||
|
int SubstitutePCRelAddressField(Instruction* instr, const char* format); |
||||
|
int SubstituteBranchTargetField(Instruction* instr, const char* format); |
||||
|
int SubstituteLSRegOffsetField(Instruction* instr, const char* format); |
||||
|
int SubstitutePrefetchField(Instruction* instr, const char* format); |
||||
|
int SubstituteBarrierField(Instruction* instr, const char* format); |
||||
|
|
||||
|
bool RdIsZROrSP(Instruction* instr) const { |
||||
|
return (instr->Rd() == kZeroRegCode); |
||||
|
} |
||||
|
|
||||
|
bool RnIsZROrSP(Instruction* instr) const { |
||||
|
return (instr->Rn() == kZeroRegCode); |
||||
|
} |
||||
|
|
||||
|
bool RmIsZROrSP(Instruction* instr) const { |
||||
|
return (instr->Rm() == kZeroRegCode); |
||||
|
} |
||||
|
|
||||
|
bool RaIsZROrSP(Instruction* instr) const { |
||||
|
return (instr->Ra() == kZeroRegCode); |
||||
|
} |
||||
|
|
||||
|
bool IsMovzMovnImm(unsigned reg_size, uint64_t value); |
||||
|
|
||||
|
void ResetOutput(); |
||||
|
void AppendToOutput(const char* string, ...); |
||||
|
|
||||
|
char* buffer_; |
||||
|
uint32_t buffer_pos_; |
||||
|
uint32_t buffer_size_; |
||||
|
bool own_buffer_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class PrintDisassembler: public Disassembler { |
||||
|
public: |
||||
|
explicit PrintDisassembler(FILE* stream) : stream_(stream) { } |
||||
|
~PrintDisassembler() { } |
||||
|
|
||||
|
virtual void ProcessOutput(Instruction* instr); |
||||
|
|
||||
|
private: |
||||
|
FILE *stream_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_DISASM_A64_H
|
@ -0,0 +1,131 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "a64/constants-a64.h" |
||||
|
#include "a64/assembler-a64.h" |
||||
|
|
||||
|
#ifndef V8_A64_FRAMES_A64_H_ |
||||
|
#define V8_A64_FRAMES_A64_H_ |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
const int kNumRegs = kNumberOfRegisters; |
||||
|
// Registers x0-x17 are caller-saved.
|
||||
|
const int kNumJSCallerSaved = 18; |
||||
|
const RegList kJSCallerSaved = 0x3ffff; |
||||
|
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; |
||||
|
|
||||
|
// Number of registers for which space is reserved in safepoints. Must be a
|
||||
|
// multiple of eight.
|
||||
|
// TODO(all): Refine this number.
|
||||
|
const int kNumSafepointRegisters = 32; |
||||
|
|
||||
|
// Define the list of registers actually saved at safepoints.
|
||||
|
// Note that the number of saved registers may be smaller than the reserved
|
||||
|
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
|
||||
|
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list() |
||||
|
#define kNumSafepointSavedRegisters \ |
||||
|
CPURegList::GetSafepointSavedRegisters().Count(); |
||||
|
|
||||
|
class EntryFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
static const int kCallerFPOffset = |
||||
|
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class ExitFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
static const int kFrameSize = 2 * kPointerSize; |
||||
|
|
||||
|
static const int kCallerSPDisplacement = 2 * kPointerSize; |
||||
|
static const int kCallerPCOffset = 1 * kPointerSize; |
||||
|
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
|
||||
|
static const int kSPOffset = -1 * kPointerSize; |
||||
|
static const int kCodeOffset = -2 * kPointerSize; |
||||
|
static const int kLastExitFrameField = kCodeOffset; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class JavaScriptFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
// FP-relative.
|
||||
|
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; |
||||
|
|
||||
|
// There are two words on the stack (saved fp and saved lr) between fp and
|
||||
|
// the arguments.
|
||||
|
static const int kLastParameterOffset = 2 * kPointerSize; |
||||
|
|
||||
|
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class ArgumentsAdaptorFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
// FP-relative.
|
||||
|
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; |
||||
|
|
||||
|
static const int kFrameSize = |
||||
|
StandardFrameConstants::kFixedFrameSize + kPointerSize; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class ConstructFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
// FP-relative.
|
||||
|
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; |
||||
|
static const int kLengthOffset = -4 * kPointerSize; |
||||
|
static const int kConstructorOffset = -5 * kPointerSize; |
||||
|
static const int kImplicitReceiverOffset = -6 * kPointerSize; |
||||
|
|
||||
|
static const int kFrameSize = |
||||
|
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class InternalFrameConstants : public AllStatic { |
||||
|
public: |
||||
|
// FP-relative.
|
||||
|
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
inline Object* JavaScriptFrame::function_slot_object() const { |
||||
|
const int offset = JavaScriptFrameConstants::kFunctionOffset; |
||||
|
return Memory::Object_at(fp() + offset); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
inline void StackHandler::SetFp(Address slot, Address fp) { |
||||
|
Memory::Address_at(slot) = fp; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_FRAMES_A64_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,334 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#define A64_DEFINE_FP_STATICS |
||||
|
|
||||
|
#include "a64/instructions-a64.h" |
||||
|
#include "a64/assembler-a64-inl.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
bool Instruction::IsLoad() const { |
||||
|
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { |
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { |
||||
|
return Mask(LoadStorePairLBit) != 0; |
||||
|
} else { |
||||
|
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); |
||||
|
switch (op) { |
||||
|
case LDRB_w: |
||||
|
case LDRH_w: |
||||
|
case LDR_w: |
||||
|
case LDR_x: |
||||
|
case LDRSB_w: |
||||
|
case LDRSB_x: |
||||
|
case LDRSH_w: |
||||
|
case LDRSH_x: |
||||
|
case LDRSW_x: |
||||
|
case LDR_s: |
||||
|
case LDR_d: return true; |
||||
|
default: return false; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Instruction::IsStore() const { |
||||
|
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { |
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { |
||||
|
return Mask(LoadStorePairLBit) == 0; |
||||
|
} else { |
||||
|
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); |
||||
|
switch (op) { |
||||
|
case STRB_w: |
||||
|
case STRH_w: |
||||
|
case STR_w: |
||||
|
case STR_x: |
||||
|
case STR_s: |
||||
|
case STR_d: return true; |
||||
|
default: return false; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static uint64_t RotateRight(uint64_t value, |
||||
|
unsigned int rotate, |
||||
|
unsigned int width) { |
||||
|
ASSERT(width <= 64); |
||||
|
rotate &= 63; |
||||
|
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | |
||||
|
(value >> rotate); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static uint64_t RepeatBitsAcrossReg(unsigned reg_size, |
||||
|
uint64_t value, |
||||
|
unsigned width) { |
||||
|
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || |
||||
|
(width == 32)); |
||||
|
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); |
||||
|
uint64_t result = value & ((1UL << width) - 1UL); |
||||
|
for (unsigned i = width; i < reg_size; i *= 2) { |
||||
|
result |= (result << i); |
||||
|
} |
||||
|
return result; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
|
// indicate a failure case. Specifically, where the constraints on imm_s are not
|
||||
|
// met.
|
||||
|
uint64_t Instruction::ImmLogical() { |
||||
|
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; |
||||
|
int64_t n = BitN(); |
||||
|
int64_t imm_s = ImmSetBits(); |
||||
|
int64_t imm_r = ImmRotate(); |
||||
|
|
||||
|
// An integer is constructed from the n, imm_s and imm_r bits according to
|
||||
|
// the following table:
|
||||
|
//
|
||||
|
// N imms immr size S R
|
||||
|
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||
|
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||
|
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||
|
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||
|
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||
|
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||
|
// (s bits must not be all set)
|
||||
|
//
|
||||
|
// A pattern is constructed of size bits, where the least significant S+1
|
||||
|
// bits are set. The pattern is rotated right by R, and repeated across a
|
||||
|
// 32 or 64-bit value, depending on destination register width.
|
||||
|
//
|
||||
|
|
||||
|
if (n == 1) { |
||||
|
if (imm_s == 0x3F) { |
||||
|
return 0; |
||||
|
} |
||||
|
uint64_t bits = (1UL << (imm_s + 1)) - 1; |
||||
|
return RotateRight(bits, imm_r, 64); |
||||
|
} else { |
||||
|
if ((imm_s >> 1) == 0x1F) { |
||||
|
return 0; |
||||
|
} |
||||
|
for (int width = 0x20; width >= 0x2; width >>= 1) { |
||||
|
if ((imm_s & width) == 0) { |
||||
|
int mask = width - 1; |
||||
|
if ((imm_s & mask) == mask) { |
||||
|
return 0; |
||||
|
} |
||||
|
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1; |
||||
|
return RepeatBitsAcrossReg(reg_size, |
||||
|
RotateRight(bits, imm_r & mask, width), |
||||
|
width); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
UNREACHABLE(); |
||||
|
return 0; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
float Instruction::ImmFP32() { |
||||
|
// ImmFP: abcdefgh (8 bits)
|
||||
|
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
|
||||
|
// where B is b ^ 1
|
||||
|
uint32_t bits = ImmFP(); |
||||
|
uint32_t bit7 = (bits >> 7) & 0x1; |
||||
|
uint32_t bit6 = (bits >> 6) & 0x1; |
||||
|
uint32_t bit5_to_0 = bits & 0x3f; |
||||
|
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); |
||||
|
|
||||
|
return rawbits_to_float(result); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
double Instruction::ImmFP64() { |
||||
|
// ImmFP: abcdefgh (8 bits)
|
||||
|
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
|
||||
|
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
|
||||
|
// where B is b ^ 1
|
||||
|
uint32_t bits = ImmFP(); |
||||
|
uint64_t bit7 = (bits >> 7) & 0x1; |
||||
|
uint64_t bit6 = (bits >> 6) & 0x1; |
||||
|
uint64_t bit5_to_0 = bits & 0x3f; |
||||
|
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); |
||||
|
|
||||
|
return rawbits_to_double(result); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { |
||||
|
switch (op) { |
||||
|
case STP_x: |
||||
|
case LDP_x: |
||||
|
case STP_d: |
||||
|
case LDP_d: return LSDoubleWord; |
||||
|
default: return LSWord; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
ptrdiff_t Instruction::ImmPCOffset() { |
||||
|
ptrdiff_t offset; |
||||
|
if (IsPCRelAddressing()) { |
||||
|
// PC-relative addressing. Only ADR is supported.
|
||||
|
offset = ImmPCRel(); |
||||
|
} else if (BranchType() != UnknownBranchType) { |
||||
|
// All PC-relative branches.
|
||||
|
// Relative branch offsets are instruction-size-aligned.
|
||||
|
offset = ImmBranch() << kInstructionSizeLog2; |
||||
|
} else { |
||||
|
// Load literal (offset from PC).
|
||||
|
ASSERT(IsLdrLiteral()); |
||||
|
// The offset is always shifted by 2 bits, even for loads to 64-bits
|
||||
|
// registers.
|
||||
|
offset = ImmLLiteral() << kInstructionSizeLog2; |
||||
|
} |
||||
|
return offset; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
Instruction* Instruction::ImmPCOffsetTarget() { |
||||
|
return this + ImmPCOffset(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, |
||||
|
int32_t offset) { |
||||
|
return is_intn(offset, ImmBranchRangeBitwidth(branch_type)); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) { |
||||
|
int offset = target - this; |
||||
|
return IsValidImmPCOffset(BranchType(), offset); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instruction::SetImmPCOffsetTarget(Instruction* target) { |
||||
|
if (IsPCRelAddressing()) { |
||||
|
SetPCRelImmTarget(target); |
||||
|
} else if (BranchType() != UnknownBranchType) { |
||||
|
SetBranchImmTarget(target); |
||||
|
} else { |
||||
|
SetImmLLiteral(target); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instruction::SetPCRelImmTarget(Instruction* target) { |
||||
|
// ADRP is not supported, so 'this' must point to an ADR instruction.
|
||||
|
ASSERT(Mask(PCRelAddressingMask) == ADR); |
||||
|
|
||||
|
Instr imm = Assembler::ImmPCRelAddress(target - this); |
||||
|
|
||||
|
SetInstructionBits(Mask(~ImmPCRel_mask) | imm); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instruction::SetBranchImmTarget(Instruction* target) { |
||||
|
ASSERT(((target - this) & 3) == 0); |
||||
|
Instr branch_imm = 0; |
||||
|
uint32_t imm_mask = 0; |
||||
|
int offset = (target - this) >> kInstructionSizeLog2; |
||||
|
switch (BranchType()) { |
||||
|
case CondBranchType: { |
||||
|
branch_imm = Assembler::ImmCondBranch(offset); |
||||
|
imm_mask = ImmCondBranch_mask; |
||||
|
break; |
||||
|
} |
||||
|
case UncondBranchType: { |
||||
|
branch_imm = Assembler::ImmUncondBranch(offset); |
||||
|
imm_mask = ImmUncondBranch_mask; |
||||
|
break; |
||||
|
} |
||||
|
case CompareBranchType: { |
||||
|
branch_imm = Assembler::ImmCmpBranch(offset); |
||||
|
imm_mask = ImmCmpBranch_mask; |
||||
|
break; |
||||
|
} |
||||
|
case TestBranchType: { |
||||
|
branch_imm = Assembler::ImmTestBranch(offset); |
||||
|
imm_mask = ImmTestBranch_mask; |
||||
|
break; |
||||
|
} |
||||
|
default: UNREACHABLE(); |
||||
|
} |
||||
|
SetInstructionBits(Mask(~imm_mask) | branch_imm); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instruction::SetImmLLiteral(Instruction* source) { |
||||
|
ASSERT(((source - this) & 3) == 0); |
||||
|
int offset = (source - this) >> kLiteralEntrySizeLog2; |
||||
|
Instr imm = Assembler::ImmLLiteral(offset); |
||||
|
Instr mask = ImmLLiteral_mask; |
||||
|
|
||||
|
SetInstructionBits(Mask(~mask) | imm); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// TODO(jbramley): We can't put this inline in the class because things like
|
||||
|
// xzr and Register are not defined in that header. Consider adding
|
||||
|
// instructions-a64-inl.h to work around this.
|
||||
|
bool InstructionSequence::IsInlineData() const { |
||||
|
// Inline data is encoded as a single movz instruction which writes to xzr
|
||||
|
// (x31).
|
||||
|
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); |
||||
|
// TODO(all): If we extend ::InlineData() to support bigger data, we need
|
||||
|
// to update this method too.
|
||||
|
} |
||||
|
|
||||
|
|
||||
|
// TODO(jbramley): We can't put this inline in the class because things like
|
||||
|
// xzr and Register are not defined in that header. Consider adding
|
||||
|
// instructions-a64-inl.h to work around this.
|
||||
|
uint64_t InstructionSequence::InlineData() const { |
||||
|
ASSERT(IsInlineData()); |
||||
|
uint64_t payload = ImmMoveWide(); |
||||
|
// TODO(all): If we extend ::InlineData() to support bigger data, we need
|
||||
|
// to update this method too.
|
||||
|
return payload; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,516 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_INSTRUCTIONS_A64_H_ |
||||
|
#define V8_A64_INSTRUCTIONS_A64_H_ |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "a64/constants-a64.h" |
||||
|
#include "a64/utils-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
// ISA constants. --------------------------------------------------------------
|
||||
|
|
||||
|
typedef uint32_t Instr; |
||||
|
|
||||
|
// The following macros initialize a float/double variable with a bit pattern
|
||||
|
// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
|
||||
|
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
|
||||
|
// pattern. Otherwise, the same symbol is declared as an external float/double.
|
||||
|
#if defined(A64_DEFINE_FP_STATICS) |
||||
|
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value |
||||
|
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value |
||||
|
#else |
||||
|
#define DEFINE_FLOAT(name, value) extern const float name |
||||
|
#define DEFINE_DOUBLE(name, value) extern const double name |
||||
|
#endif // defined(A64_DEFINE_FP_STATICS)
|
||||
|
|
||||
|
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000); |
||||
|
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000); |
||||
|
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL); |
||||
|
DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL); |
||||
|
|
||||
|
// This value is a signalling NaN as both a double and as a float (taking the
|
||||
|
// least-significant word).
|
||||
|
DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001); |
||||
|
DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001); |
||||
|
|
||||
|
// A similar value, but as a quiet NaN.
|
||||
|
DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001); |
||||
|
DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001); |
||||
|
|
||||
|
#undef DEFINE_FLOAT |
||||
|
#undef DEFINE_DOUBLE |
||||
|
|
||||
|
|
||||
|
enum LSDataSize { |
||||
|
LSByte = 0, |
||||
|
LSHalfword = 1, |
||||
|
LSWord = 2, |
||||
|
LSDoubleWord = 3 |
||||
|
}; |
||||
|
|
||||
|
LSDataSize CalcLSPairDataSize(LoadStorePairOp op); |
||||
|
|
||||
|
enum ImmBranchType { |
||||
|
UnknownBranchType = 0, |
||||
|
CondBranchType = 1, |
||||
|
UncondBranchType = 2, |
||||
|
CompareBranchType = 3, |
||||
|
TestBranchType = 4 |
||||
|
}; |
||||
|
|
||||
|
enum AddrMode { |
||||
|
Offset, |
||||
|
PreIndex, |
||||
|
PostIndex |
||||
|
}; |
||||
|
|
||||
|
enum FPRounding { |
||||
|
// The first four values are encodable directly by FPCR<RMode>.
|
||||
|
FPTieEven = 0x0, |
||||
|
FPPositiveInfinity = 0x1, |
||||
|
FPNegativeInfinity = 0x2, |
||||
|
FPZero = 0x3, |
||||
|
|
||||
|
// The final rounding mode is only available when explicitly specified by the
|
||||
|
// instruction (such as with fcvta). It cannot be set in FPCR.
|
||||
|
FPTieAway |
||||
|
}; |
||||
|
|
||||
|
enum Reg31Mode { |
||||
|
Reg31IsStackPointer, |
||||
|
Reg31IsZeroRegister |
||||
|
}; |
||||
|
|
||||
|
// Instructions. ---------------------------------------------------------------
|
||||
|
|
||||
|
class Instruction { |
||||
|
public: |
||||
|
Instr InstructionBits() const { |
||||
|
Instr bits; |
||||
|
memcpy(&bits, this, sizeof(bits)); |
||||
|
return bits; |
||||
|
} |
||||
|
|
||||
|
void SetInstructionBits(Instr new_instr) { |
||||
|
memcpy(this, &new_instr, sizeof(new_instr)); |
||||
|
} |
||||
|
|
||||
|
int Bit(int pos) const { |
||||
|
return (InstructionBits() >> pos) & 1; |
||||
|
} |
||||
|
|
||||
|
uint32_t Bits(int msb, int lsb) const { |
||||
|
return unsigned_bitextract_32(msb, lsb, InstructionBits()); |
||||
|
} |
||||
|
|
||||
|
int32_t SignedBits(int msb, int lsb) const { |
||||
|
int32_t bits = *(reinterpret_cast<const int32_t*>(this)); |
||||
|
return signed_bitextract_32(msb, lsb, bits); |
||||
|
} |
||||
|
|
||||
|
Instr Mask(uint32_t mask) const { |
||||
|
return InstructionBits() & mask; |
||||
|
} |
||||
|
|
||||
|
Instruction* following(int count = 1) { |
||||
|
return this + count * kInstructionSize; |
||||
|
} |
||||
|
|
||||
|
Instruction* preceding(int count = 1) { |
||||
|
return this - count * kInstructionSize; |
||||
|
} |
||||
|
|
||||
|
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ |
||||
|
int64_t Name() const { return Func(HighBit, LowBit); } |
||||
|
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) |
||||
|
#undef DEFINE_GETTER |
||||
|
|
||||
|
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
|
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
|
int ImmPCRel() const { |
||||
|
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); |
||||
|
int const width = ImmPCRelLo_width + ImmPCRelHi_width; |
||||
|
return signed_bitextract_32(width-1, 0, offset); |
||||
|
} |
||||
|
|
||||
|
uint64_t ImmLogical(); |
||||
|
float ImmFP32(); |
||||
|
double ImmFP64(); |
||||
|
|
||||
|
LSDataSize SizeLSPair() const { |
||||
|
return CalcLSPairDataSize( |
||||
|
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask))); |
||||
|
} |
||||
|
|
||||
|
// Helpers.
|
||||
|
bool IsCondBranchImm() const { |
||||
|
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsUncondBranchImm() const { |
||||
|
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsCompareBranch() const { |
||||
|
return Mask(CompareBranchFMask) == CompareBranchFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsTestBranch() const { |
||||
|
return Mask(TestBranchFMask) == TestBranchFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsLdrLiteral() const { |
||||
|
return Mask(LoadLiteralFMask) == LoadLiteralFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsLdrLiteralX() const { |
||||
|
return Mask(LoadLiteralMask) == LDR_x_lit; |
||||
|
} |
||||
|
|
||||
|
bool IsPCRelAddressing() const { |
||||
|
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsLogicalImmediate() const { |
||||
|
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsAddSubImmediate() const { |
||||
|
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; |
||||
|
} |
||||
|
|
||||
|
bool IsAddSubExtended() const { |
||||
|
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; |
||||
|
} |
||||
|
|
||||
|
// Match any loads or stores, including pairs.
|
||||
|
bool IsLoadOrStore() const { |
||||
|
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; |
||||
|
} |
||||
|
|
||||
|
// Match any loads, including pairs.
|
||||
|
bool IsLoad() const; |
||||
|
// Match any stores, including pairs.
|
||||
|
bool IsStore() const; |
||||
|
|
||||
|
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
|
// does not check that the instruction actually has an Rd field.
|
||||
|
Reg31Mode RdMode() const { |
||||
|
// The following instructions use csp or wsp as Rd:
|
||||
|
// Add/sub (immediate) when not setting the flags.
|
||||
|
// Add/sub (extended) when not setting the flags.
|
||||
|
// Logical (immediate) when not setting the flags.
|
||||
|
// Otherwise, r31 is the zero register.
|
||||
|
if (IsAddSubImmediate() || IsAddSubExtended()) { |
||||
|
if (Mask(AddSubSetFlagsBit)) { |
||||
|
return Reg31IsZeroRegister; |
||||
|
} else { |
||||
|
return Reg31IsStackPointer; |
||||
|
} |
||||
|
} |
||||
|
if (IsLogicalImmediate()) { |
||||
|
// Of the logical (immediate) instructions, only ANDS (and its aliases)
|
||||
|
// can set the flags. The others can all write into csp.
|
||||
|
// Note that some logical operations are not available to
|
||||
|
// immediate-operand instructions, so we have to combine two masks here.
|
||||
|
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { |
||||
|
return Reg31IsZeroRegister; |
||||
|
} else { |
||||
|
return Reg31IsStackPointer; |
||||
|
} |
||||
|
} |
||||
|
return Reg31IsZeroRegister; |
||||
|
} |
||||
|
|
||||
|
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
|
// does not check that the instruction actually has an Rn field.
|
||||
|
Reg31Mode RnMode() const { |
||||
|
// The following instructions use csp or wsp as Rn:
|
||||
|
// All loads and stores.
|
||||
|
// Add/sub (immediate).
|
||||
|
// Add/sub (extended).
|
||||
|
// Otherwise, r31 is the zero register.
|
||||
|
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { |
||||
|
return Reg31IsStackPointer; |
||||
|
} |
||||
|
return Reg31IsZeroRegister; |
||||
|
} |
||||
|
|
||||
|
ImmBranchType BranchType() const { |
||||
|
if (IsCondBranchImm()) { |
||||
|
return CondBranchType; |
||||
|
} else if (IsUncondBranchImm()) { |
||||
|
return UncondBranchType; |
||||
|
} else if (IsCompareBranch()) { |
||||
|
return CompareBranchType; |
||||
|
} else if (IsTestBranch()) { |
||||
|
return TestBranchType; |
||||
|
} else { |
||||
|
return UnknownBranchType; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
static int ImmBranchRangeBitwidth(ImmBranchType branch_type) { |
||||
|
switch (branch_type) { |
||||
|
case UncondBranchType: |
||||
|
return ImmUncondBranch_width; |
||||
|
case CondBranchType: |
||||
|
return ImmCondBranch_width; |
||||
|
case CompareBranchType: |
||||
|
return ImmCmpBranch_width; |
||||
|
case TestBranchType: |
||||
|
return ImmTestBranch_width; |
||||
|
default: |
||||
|
UNREACHABLE(); |
||||
|
return 0; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// The range of the branch instruction, expressed as 'instr +- range'.
|
||||
|
static int32_t ImmBranchRange(ImmBranchType branch_type) { |
||||
|
return |
||||
|
(1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 - |
||||
|
kInstructionSize; |
||||
|
} |
||||
|
|
||||
|
int ImmBranch() const { |
||||
|
switch (BranchType()) { |
||||
|
case CondBranchType: return ImmCondBranch(); |
||||
|
case UncondBranchType: return ImmUncondBranch(); |
||||
|
case CompareBranchType: return ImmCmpBranch(); |
||||
|
case TestBranchType: return ImmTestBranch(); |
||||
|
default: UNREACHABLE(); |
||||
|
} |
||||
|
return 0; |
||||
|
} |
||||
|
|
||||
|
bool IsBranchAndLinkToRegister() const { |
||||
|
return Mask(UnconditionalBranchToRegisterMask) == BLR; |
||||
|
} |
||||
|
|
||||
|
bool IsMovz() const { |
||||
|
return (Mask(MoveWideImmediateMask) == MOVZ_x) || |
||||
|
(Mask(MoveWideImmediateMask) == MOVZ_w); |
||||
|
} |
||||
|
|
||||
|
bool IsMovk() const { |
||||
|
return (Mask(MoveWideImmediateMask) == MOVK_x) || |
||||
|
(Mask(MoveWideImmediateMask) == MOVK_w); |
||||
|
} |
||||
|
|
||||
|
bool IsMovn() const { |
||||
|
return (Mask(MoveWideImmediateMask) == MOVN_x) || |
||||
|
(Mask(MoveWideImmediateMask) == MOVN_w); |
||||
|
} |
||||
|
|
||||
|
bool IsNop(int n) { |
||||
|
// A marking nop is an instruction
|
||||
|
// mov r<n>, r<n>
|
||||
|
// which is encoded as
|
||||
|
// orr r<n>, xzr, r<n>
|
||||
|
return (Mask(LogicalShiftedMask) == ORR_x) && |
||||
|
(Rd() == Rm()) && |
||||
|
(Rd() == n); |
||||
|
} |
||||
|
|
||||
|
// Find the PC offset encoded in this instruction. 'this' may be a branch or
|
||||
|
// a PC-relative addressing instruction.
|
||||
|
// The offset returned is unscaled.
|
||||
|
ptrdiff_t ImmPCOffset(); |
||||
|
|
||||
|
// Find the target of this instruction. 'this' may be a branch or a
|
||||
|
// PC-relative addressing instruction.
|
||||
|
Instruction* ImmPCOffsetTarget(); |
||||
|
|
||||
|
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset); |
||||
|
bool IsTargetInImmPCOffsetRange(Instruction* target); |
||||
|
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
|
||||
|
// a PC-relative addressing instruction.
|
||||
|
void SetImmPCOffsetTarget(Instruction* target); |
||||
|
// Patch a literal load instruction to load from 'source'.
|
||||
|
void SetImmLLiteral(Instruction* source); |
||||
|
|
||||
|
uint8_t* LiteralAddress() { |
||||
|
int offset = ImmLLiteral() << kLiteralEntrySizeLog2; |
||||
|
return reinterpret_cast<uint8_t*>(this) + offset; |
||||
|
} |
||||
|
|
||||
|
uint32_t Literal32() { |
||||
|
uint32_t literal; |
||||
|
memcpy(&literal, LiteralAddress(), sizeof(literal)); |
||||
|
|
||||
|
return literal; |
||||
|
} |
||||
|
|
||||
|
uint64_t Literal64() { |
||||
|
uint64_t literal; |
||||
|
memcpy(&literal, LiteralAddress(), sizeof(literal)); |
||||
|
|
||||
|
return literal; |
||||
|
} |
||||
|
|
||||
|
float LiteralFP32() { |
||||
|
return rawbits_to_float(Literal32()); |
||||
|
} |
||||
|
|
||||
|
double LiteralFP64() { |
||||
|
return rawbits_to_double(Literal64()); |
||||
|
} |
||||
|
|
||||
|
Instruction* NextInstruction() { |
||||
|
return this + kInstructionSize; |
||||
|
} |
||||
|
|
||||
|
Instruction* InstructionAtOffset(int64_t offset) { |
||||
|
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset, |
||||
|
kInstructionSize)); |
||||
|
return this + offset; |
||||
|
} |
||||
|
|
||||
|
template<typename T> static Instruction* Cast(T src) { |
||||
|
return reinterpret_cast<Instruction*>(src); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void SetPCRelImmTarget(Instruction* target); |
||||
|
void SetBranchImmTarget(Instruction* target); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Where Instruction looks at instructions generated by the Assembler,
|
||||
|
// InstructionSequence looks at instructions sequences generated by the
|
||||
|
// MacroAssembler.
|
||||
|
class InstructionSequence : public Instruction { |
||||
|
public: |
||||
|
static InstructionSequence* At(Address address) { |
||||
|
return reinterpret_cast<InstructionSequence*>(address); |
||||
|
} |
||||
|
|
||||
|
// Sequences generated by MacroAssembler::InlineData().
|
||||
|
bool IsInlineData() const; |
||||
|
uint64_t InlineData() const; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Simulator/Debugger debug instructions ---------------------------------------
|
||||
|
// Each debug marker is represented by a HLT instruction. The immediate comment
|
||||
|
// field in the instruction is used to identify the type of debug marker. Each
|
||||
|
// marker encodes arguments in a different way, as described below.
|
||||
|
|
||||
|
// Indicate to the Debugger that the instruction is a redirected call.
|
||||
|
const Instr kImmExceptionIsRedirectedCall = 0xca11; |
||||
|
|
||||
|
// Represent unreachable code. This is used as a guard in parts of the code that
|
||||
|
// should not be reachable, such as in data encoded inline in the instructions.
|
||||
|
const Instr kImmExceptionIsUnreachable = 0xdebf; |
||||
|
|
||||
|
// A pseudo 'printf' instruction. The arguments will be passed to the platform
|
||||
|
// printf method.
|
||||
|
const Instr kImmExceptionIsPrintf = 0xdeb1; |
||||
|
// Parameters are stored in A64 registers as if the printf pseudo-instruction
|
||||
|
// was a call to the real printf method:
|
||||
|
//
|
||||
|
// x0: The format string, then either of:
|
||||
|
// x1-x7: Optional arguments.
|
||||
|
// d0-d7: Optional arguments.
|
||||
|
//
|
||||
|
// Floating-point and integer arguments are passed in separate sets of
|
||||
|
// registers in AAPCS64 (even for varargs functions), so it is not possible to
|
||||
|
// determine the type of location of each arguments without some information
|
||||
|
// about the values that were passed in. This information could be retrieved
|
||||
|
// from the printf format string, but the format string is not trivial to
|
||||
|
// parse so we encode the relevant information with the HLT instruction.
|
||||
|
// - Type
|
||||
|
// Either kRegister or kFPRegister, but stored as a uint32_t because there's
|
||||
|
// no way to guarantee the size of the CPURegister::RegisterType enum.
|
||||
|
const unsigned kPrintfTypeOffset = 1 * kInstructionSize; |
||||
|
const unsigned kPrintfLength = 2 * kInstructionSize; |
||||
|
|
||||
|
// A pseudo 'debug' instruction.
|
||||
|
const Instr kImmExceptionIsDebug = 0xdeb0; |
||||
|
// Parameters are inlined in the code after a debug pseudo-instruction:
|
||||
|
// - Debug code.
|
||||
|
// - Debug parameters.
|
||||
|
// - Debug message string. This is a NULL-terminated ASCII string, padded to
|
||||
|
// kInstructionSize so that subsequent instructions are correctly aligned.
|
||||
|
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
|
||||
|
// string data.
|
||||
|
const unsigned kDebugCodeOffset = 1 * kInstructionSize; |
||||
|
const unsigned kDebugParamsOffset = 2 * kInstructionSize; |
||||
|
const unsigned kDebugMessageOffset = 3 * kInstructionSize; |
||||
|
|
||||
|
// Debug parameters.
|
||||
|
// Used without a TRACE_ option, the Debugger will print the arguments only
|
||||
|
// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
|
||||
|
// before every instruction for the specified LOG_ parameters.
|
||||
|
//
|
||||
|
// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
|
||||
|
// others that were not specified.
|
||||
|
//
|
||||
|
// For example:
|
||||
|
//
|
||||
|
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
|
||||
|
// will print the registers and fp registers only once.
|
||||
|
//
|
||||
|
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
|
||||
|
// starts disassembling the code.
|
||||
|
//
|
||||
|
// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
|
||||
|
// adds the general purpose registers to the trace.
|
||||
|
//
|
||||
|
// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
|
||||
|
// stops tracing the registers.
|
||||
|
const unsigned kDebuggerTracingDirectivesMask = 3 << 6; |
||||
|
enum DebugParameters { |
||||
|
NO_PARAM = 0, |
||||
|
BREAK = 1 << 0, |
||||
|
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
|
||||
|
LOG_REGS = 1 << 2, // Log general purpose registers.
|
||||
|
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
|
||||
|
LOG_SYS_REGS = 1 << 4, // Log the status flags.
|
||||
|
LOG_WRITE = 1 << 5, // Log any memory write.
|
||||
|
|
||||
|
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS, |
||||
|
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE, |
||||
|
|
||||
|
// Trace control.
|
||||
|
TRACE_ENABLE = 1 << 6, |
||||
|
TRACE_DISABLE = 2 << 6, |
||||
|
TRACE_OVERRIDE = 3 << 6 |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
|
||||
|
#endif // V8_A64_INSTRUCTIONS_A64_H_
|
@ -0,0 +1,618 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "a64/instrument-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
Counter::Counter(const char* name, CounterType type) |
||||
|
: count_(0), enabled_(false), type_(type) { |
||||
|
ASSERT(name != NULL); |
||||
|
strncpy(name_, name, kCounterNameMaxLength); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Counter::Enable() { |
||||
|
enabled_ = true; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Counter::Disable() { |
||||
|
enabled_ = false; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
bool Counter::IsEnabled() { |
||||
|
return enabled_; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Counter::Increment() { |
||||
|
if (enabled_) { |
||||
|
count_++; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
uint64_t Counter::count() { |
||||
|
uint64_t result = count_; |
||||
|
if (type_ == Gauge) { |
||||
|
// If the counter is a Gauge, reset the count after reading.
|
||||
|
count_ = 0; |
||||
|
} |
||||
|
return result; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
const char* Counter::name() { |
||||
|
return name_; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
CounterType Counter::type() { |
||||
|
return type_; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
typedef struct { |
||||
|
const char* name; |
||||
|
CounterType type; |
||||
|
} CounterDescriptor; |
||||
|
|
||||
|
|
||||
|
static const CounterDescriptor kCounterList[] = { |
||||
|
{"Instruction", Cumulative}, |
||||
|
|
||||
|
{"Move Immediate", Gauge}, |
||||
|
{"Add/Sub DP", Gauge}, |
||||
|
{"Logical DP", Gauge}, |
||||
|
{"Other Int DP", Gauge}, |
||||
|
{"FP DP", Gauge}, |
||||
|
|
||||
|
{"Conditional Select", Gauge}, |
||||
|
{"Conditional Compare", Gauge}, |
||||
|
|
||||
|
{"Unconditional Branch", Gauge}, |
||||
|
{"Compare and Branch", Gauge}, |
||||
|
{"Test and Branch", Gauge}, |
||||
|
{"Conditional Branch", Gauge}, |
||||
|
|
||||
|
{"Load Integer", Gauge}, |
||||
|
{"Load FP", Gauge}, |
||||
|
{"Load Pair", Gauge}, |
||||
|
{"Load Literal", Gauge}, |
||||
|
|
||||
|
{"Store Integer", Gauge}, |
||||
|
{"Store FP", Gauge}, |
||||
|
{"Store Pair", Gauge}, |
||||
|
|
||||
|
{"PC Addressing", Gauge}, |
||||
|
{"Other", Gauge}, |
||||
|
{"SP Adjust", Gauge}, |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
Instrument::Instrument(const char* datafile, uint64_t sample_period) |
||||
|
: output_stream_(stderr), sample_period_(sample_period) { |
||||
|
|
||||
|
// Set up the output stream. If datafile is non-NULL, use that file. If it
|
||||
|
// can't be opened, or datafile is NULL, use stderr.
|
||||
|
if (datafile != NULL) { |
||||
|
output_stream_ = fopen(datafile, "w"); |
||||
|
if (output_stream_ == NULL) { |
||||
|
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile); |
||||
|
output_stream_ = stderr; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
static const int num_counters = |
||||
|
sizeof(kCounterList) / sizeof(CounterDescriptor); |
||||
|
|
||||
|
// Dump an instrumentation description comment at the top of the file.
|
||||
|
fprintf(output_stream_, "# counters=%d\n", num_counters); |
||||
|
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_); |
||||
|
|
||||
|
// Construct Counter objects from counter description array.
|
||||
|
for (int i = 0; i < num_counters; i++) { |
||||
|
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type); |
||||
|
counters_.push_back(counter); |
||||
|
} |
||||
|
|
||||
|
DumpCounterNames(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
Instrument::~Instrument() { |
||||
|
// Dump any remaining instruction data to the output file.
|
||||
|
DumpCounters(); |
||||
|
|
||||
|
// Free all the counter objects.
|
||||
|
std::list<Counter*>::iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
delete *it; |
||||
|
} |
||||
|
|
||||
|
if (output_stream_ != stderr) { |
||||
|
fclose(output_stream_); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::Update() { |
||||
|
// Increment the instruction counter, and dump all counters if a sample period
|
||||
|
// has elapsed.
|
||||
|
static Counter* counter = GetCounter("Instruction"); |
||||
|
ASSERT(counter->type() == Cumulative); |
||||
|
counter->Increment(); |
||||
|
|
||||
|
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) { |
||||
|
DumpCounters(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::DumpCounters() { |
||||
|
// Iterate through the counter objects, dumping their values to the output
|
||||
|
// stream.
|
||||
|
std::list<Counter*>::const_iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
fprintf(output_stream_, "%" PRIu64 ",", (*it)->count()); |
||||
|
} |
||||
|
fprintf(output_stream_, "\n"); |
||||
|
fflush(output_stream_); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::DumpCounterNames() { |
||||
|
// Iterate through the counter objects, dumping the counter names to the
|
||||
|
// output stream.
|
||||
|
std::list<Counter*>::const_iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
fprintf(output_stream_, "%s,", (*it)->name()); |
||||
|
} |
||||
|
fprintf(output_stream_, "\n"); |
||||
|
fflush(output_stream_); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::HandleInstrumentationEvent(unsigned event) { |
||||
|
switch (event) { |
||||
|
case InstrumentStateEnable: Enable(); break; |
||||
|
case InstrumentStateDisable: Disable(); break; |
||||
|
default: DumpEventMarker(event); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::DumpEventMarker(unsigned marker) { |
||||
|
// Dumpan event marker to the output stream as a specially formatted comment
|
||||
|
// line.
|
||||
|
static Counter* counter = GetCounter("Instruction"); |
||||
|
|
||||
|
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff, |
||||
|
(marker >> 8) & 0xff, counter->count()); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
Counter* Instrument::GetCounter(const char* name) { |
||||
|
// Get a Counter object by name from the counter list.
|
||||
|
std::list<Counter*>::const_iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
if (strcmp((*it)->name(), name) == 0) { |
||||
|
return *it; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// A Counter by that name does not exist: print an error message to stderr
|
||||
|
// and the output file, and exit.
|
||||
|
static const char* error_message = |
||||
|
"# Error: Unknown counter \"%s\". Exiting.\n"; |
||||
|
fprintf(stderr, error_message, name); |
||||
|
fprintf(output_stream_, error_message, name); |
||||
|
exit(1); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::Enable() { |
||||
|
std::list<Counter*>::iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
(*it)->Enable(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::Disable() { |
||||
|
std::list<Counter*>::iterator it; |
||||
|
for (it = counters_.begin(); it != counters_.end(); it++) { |
||||
|
(*it)->Disable(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitPCRelAddressing(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("PC Addressing"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitAddSubImmediate(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* sp_counter = GetCounter("SP Adjust"); |
||||
|
static Counter* add_sub_counter = GetCounter("Add/Sub DP"); |
||||
|
if (((instr->Mask(AddSubOpMask) == SUB) || |
||||
|
(instr->Mask(AddSubOpMask) == ADD)) && |
||||
|
(instr->Rd() == 31) && (instr->Rn() == 31)) { |
||||
|
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
|
||||
|
sp_counter->Increment(); |
||||
|
} else { |
||||
|
add_sub_counter->Increment(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLogicalImmediate(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Logical DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitMoveWideImmediate(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Move Immediate"); |
||||
|
|
||||
|
if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) { |
||||
|
unsigned imm = instr->ImmMoveWide(); |
||||
|
HandleInstrumentationEvent(imm); |
||||
|
} else { |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitBitfield(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other Int DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitExtract(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other Int DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitUnconditionalBranch(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Unconditional Branch"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Unconditional Branch"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitCompareBranch(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Compare and Branch"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitTestBranch(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Test and Branch"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitConditionalBranch(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Branch"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitSystem(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitException(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::InstrumentLoadStorePair(Instruction* instr) { |
||||
|
static Counter* load_pair_counter = GetCounter("Load Pair"); |
||||
|
static Counter* store_pair_counter = GetCounter("Store Pair"); |
||||
|
if (instr->Mask(LoadStorePairLBit) != 0) { |
||||
|
load_pair_counter->Increment(); |
||||
|
} else { |
||||
|
store_pair_counter->Increment(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStorePair(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePairOffset(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStorePair(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStorePair(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStorePair(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadLiteral(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Load Literal"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::InstrumentLoadStore(Instruction* instr) { |
||||
|
static Counter* load_int_counter = GetCounter("Load Integer"); |
||||
|
static Counter* store_int_counter = GetCounter("Store Integer"); |
||||
|
static Counter* load_fp_counter = GetCounter("Load FP"); |
||||
|
static Counter* store_fp_counter = GetCounter("Store FP"); |
||||
|
|
||||
|
switch (instr->Mask(LoadStoreOpMask)) { |
||||
|
case STRB_w: // Fall through.
|
||||
|
case STRH_w: // Fall through.
|
||||
|
case STR_w: // Fall through.
|
||||
|
case STR_x: store_int_counter->Increment(); break; |
||||
|
case STR_s: // Fall through.
|
||||
|
case STR_d: store_fp_counter->Increment(); break; |
||||
|
case LDRB_w: // Fall through.
|
||||
|
case LDRH_w: // Fall through.
|
||||
|
case LDR_w: // Fall through.
|
||||
|
case LDR_x: // Fall through.
|
||||
|
case LDRSB_x: // Fall through.
|
||||
|
case LDRSH_x: // Fall through.
|
||||
|
case LDRSW_x: // Fall through.
|
||||
|
case LDRSB_w: // Fall through.
|
||||
|
case LDRSH_w: load_int_counter->Increment(); break; |
||||
|
case LDR_s: // Fall through.
|
||||
|
case LDR_d: load_fp_counter->Increment(); break; |
||||
|
default: UNREACHABLE(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStore(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePostIndex(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStore(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStorePreIndex(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStore(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStore(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) { |
||||
|
Update(); |
||||
|
InstrumentLoadStore(instr); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitLogicalShifted(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Logical DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitAddSubShifted(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Add/Sub DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitAddSubExtended(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* sp_counter = GetCounter("SP Adjust"); |
||||
|
static Counter* add_sub_counter = GetCounter("Add/Sub DP"); |
||||
|
if (((instr->Mask(AddSubOpMask) == SUB) || |
||||
|
(instr->Mask(AddSubOpMask) == ADD)) && |
||||
|
(instr->Rd() == 31) && (instr->Rn() == 31)) { |
||||
|
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
|
||||
|
sp_counter->Increment(); |
||||
|
} else { |
||||
|
add_sub_counter->Increment(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitAddSubWithCarry(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Add/Sub DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitConditionalCompareRegister(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Compare"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Compare"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitConditionalSelect(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Select"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitDataProcessing1Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other Int DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitDataProcessing2Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other Int DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitDataProcessing3Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other Int DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPCompare(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPConditionalCompare(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Compare"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPConditionalSelect(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Conditional Select"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPImmediate(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPIntegerConvert(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitFPFixedPointConvert(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("FP DP"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitUnallocated(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Instrument::VisitUnimplemented(Instruction* instr) { |
||||
|
Update(); |
||||
|
static Counter* counter = GetCounter("Other"); |
||||
|
counter->Increment(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
@ -0,0 +1,108 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_INSTRUMENT_A64_H_ |
||||
|
#define V8_A64_INSTRUMENT_A64_H_ |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "a64/decoder-a64.h" |
||||
|
#include "a64/constants-a64.h" |
||||
|
#include "a64/instrument-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
const int kCounterNameMaxLength = 256; |
||||
|
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22; |
||||
|
|
||||
|
|
||||
|
enum InstrumentState { |
||||
|
InstrumentStateDisable = 0, |
||||
|
InstrumentStateEnable = 1 |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
enum CounterType { |
||||
|
Gauge = 0, // Gauge counters reset themselves after reading.
|
||||
|
Cumulative = 1 // Cumulative counters keep their value after reading.
|
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class Counter { |
||||
|
public: |
||||
|
Counter(const char* name, CounterType type = Gauge); |
||||
|
|
||||
|
void Increment(); |
||||
|
void Enable(); |
||||
|
void Disable(); |
||||
|
bool IsEnabled(); |
||||
|
uint64_t count(); |
||||
|
const char* name(); |
||||
|
CounterType type(); |
||||
|
|
||||
|
private: |
||||
|
char name_[kCounterNameMaxLength]; |
||||
|
uint64_t count_; |
||||
|
bool enabled_; |
||||
|
CounterType type_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class Instrument: public DecoderVisitor { |
||||
|
public: |
||||
|
explicit Instrument(const char* datafile = NULL, |
||||
|
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod); |
||||
|
~Instrument(); |
||||
|
|
||||
|
// Declare all Visitor functions.
|
||||
|
#define DECLARE(A) void Visit##A(Instruction* instr); |
||||
|
VISITOR_LIST(DECLARE) |
||||
|
#undef DECLARE |
||||
|
|
||||
|
private: |
||||
|
void Update(); |
||||
|
void Enable(); |
||||
|
void Disable(); |
||||
|
void DumpCounters(); |
||||
|
void DumpCounterNames(); |
||||
|
void DumpEventMarker(unsigned marker); |
||||
|
void HandleInstrumentationEvent(unsigned event); |
||||
|
Counter* GetCounter(const char* name); |
||||
|
|
||||
|
void InstrumentLoadStore(Instruction* instr); |
||||
|
void InstrumentLoadStorePair(Instruction* instr); |
||||
|
|
||||
|
std::list<Counter*> counters_; |
||||
|
|
||||
|
FILE *output_stream_; |
||||
|
uint64_t sample_period_; |
||||
|
}; |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_INSTRUMENT_A64_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,473 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_ |
||||
|
#define V8_A64_LITHIUM_CODEGEN_A64_H_ |
||||
|
|
||||
|
#include "a64/lithium-a64.h" |
||||
|
|
||||
|
#include "a64/lithium-gap-resolver-a64.h" |
||||
|
#include "deoptimizer.h" |
||||
|
#include "lithium-codegen.h" |
||||
|
#include "safepoint-table.h" |
||||
|
#include "scopes.h" |
||||
|
#include "v8utils.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// Forward declarations.
|
||||
|
class LDeferredCode; |
||||
|
class SafepointGenerator; |
||||
|
class BranchGenerator; |
||||
|
|
||||
|
class LCodeGen: public LCodeGenBase { |
||||
|
public: |
||||
|
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) |
||||
|
: LCodeGenBase(chunk, assembler, info), |
||||
|
deoptimizations_(4, info->zone()), |
||||
|
deopt_jump_table_(4, info->zone()), |
||||
|
deoptimization_literals_(8, info->zone()), |
||||
|
inlined_function_count_(0), |
||||
|
scope_(info->scope()), |
||||
|
translations_(info->zone()), |
||||
|
deferred_(8, info->zone()), |
||||
|
osr_pc_offset_(-1), |
||||
|
frame_is_built_(false), |
||||
|
safepoints_(info->zone()), |
||||
|
resolver_(this), |
||||
|
expected_safepoint_kind_(Safepoint::kSimple) { |
||||
|
PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
||||
|
} |
||||
|
|
||||
|
// Simple accessors.
|
||||
|
Scope* scope() const { return scope_; } |
||||
|
|
||||
|
int LookupDestination(int block_id) const { |
||||
|
return chunk()->LookupDestination(block_id); |
||||
|
} |
||||
|
|
||||
|
bool IsNextEmittedBlock(int block_id) const { |
||||
|
return LookupDestination(block_id) == GetNextEmittedBlock(); |
||||
|
} |
||||
|
|
||||
|
bool NeedsEagerFrame() const { |
||||
|
return GetStackSlotCount() > 0 || |
||||
|
info()->is_non_deferred_calling() || |
||||
|
!info()->IsStub() || |
||||
|
info()->requires_frame(); |
||||
|
} |
||||
|
bool NeedsDeferredFrame() const { |
||||
|
return !NeedsEagerFrame() && info()->is_deferred_calling(); |
||||
|
} |
||||
|
|
||||
|
LinkRegisterStatus GetLinkRegisterState() const { |
||||
|
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; |
||||
|
} |
||||
|
|
||||
|
// Try to generate code for the entire chunk, but it may fail if the
|
||||
|
// chunk contains constructs we cannot handle. Returns true if the
|
||||
|
// code generation attempt succeeded.
|
||||
|
bool GenerateCode(); |
||||
|
|
||||
|
// Finish the code by setting stack height, safepoint, and bailout
|
||||
|
// information on it.
|
||||
|
void FinishCode(Handle<Code> code); |
||||
|
|
||||
|
// Support for converting LOperands to assembler types.
|
||||
|
// LOperand must be a register.
|
||||
|
Register ToRegister(LOperand* op) const; |
||||
|
Register ToRegister32(LOperand* op) const; |
||||
|
Operand ToOperand(LOperand* op); |
||||
|
Operand ToOperand32I(LOperand* op); |
||||
|
Operand ToOperand32U(LOperand* op); |
||||
|
MemOperand ToMemOperand(LOperand* op) const; |
||||
|
Handle<Object> ToHandle(LConstantOperand* op) const; |
||||
|
|
||||
|
// TODO(jbramley): Examine these helpers and check that they make sense.
|
||||
|
// IsInteger32Constant returns true for smi constants, for example.
|
||||
|
bool IsInteger32Constant(LConstantOperand* op) const; |
||||
|
bool IsSmi(LConstantOperand* op) const; |
||||
|
|
||||
|
int32_t ToInteger32(LConstantOperand* op) const; |
||||
|
Smi* ToSmi(LConstantOperand* op) const; |
||||
|
double ToDouble(LConstantOperand* op) const; |
||||
|
DoubleRegister ToDoubleRegister(LOperand* op) const; |
||||
|
|
||||
|
// Declare methods that deal with the individual node types.
|
||||
|
#define DECLARE_DO(type) void Do##type(L##type* node); |
||||
|
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) |
||||
|
#undef DECLARE_DO |
||||
|
|
||||
|
private: |
||||
|
// Return a double scratch register which can be used locally
|
||||
|
// when generating code for a lithium instruction.
|
||||
|
DoubleRegister double_scratch() { return crankshaft_fp_scratch; } |
||||
|
|
||||
|
// Deferred code support.
|
||||
|
void DoDeferredNumberTagD(LNumberTagD* instr); |
||||
|
void DoDeferredStackCheck(LStackCheck* instr); |
||||
|
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); |
||||
|
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); |
||||
|
void DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
||||
|
Label* exit, |
||||
|
Label* allocation_entry); |
||||
|
|
||||
|
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; |
||||
|
void DoDeferredNumberTagU(LInstruction* instr, |
||||
|
LOperand* value, |
||||
|
LOperand* temp1, |
||||
|
LOperand* temp2); |
||||
|
void DoDeferredTaggedToI(LTaggedToI* instr, |
||||
|
LOperand* value, |
||||
|
LOperand* temp1, |
||||
|
LOperand* temp2); |
||||
|
void DoDeferredAllocate(LAllocate* instr); |
||||
|
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); |
||||
|
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); |
||||
|
|
||||
|
Operand ToOperand32(LOperand* op, IntegerSignedness signedness); |
||||
|
|
||||
|
static Condition TokenToCondition(Token::Value op, bool is_unsigned); |
||||
|
void EmitGoto(int block); |
||||
|
void DoGap(LGap* instr); |
||||
|
|
||||
|
// Generic version of EmitBranch. It contains some code to avoid emitting a
|
||||
|
// branch on the next emitted basic block where we could just fall-through.
|
||||
|
// You shouldn't use that directly but rather consider one of the helper like
|
||||
|
// LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
|
||||
|
template<class InstrType> |
||||
|
void EmitBranchGeneric(InstrType instr, |
||||
|
const BranchGenerator& branch); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitBranch(InstrType instr, Condition condition); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitCompareAndBranch(InstrType instr, |
||||
|
Condition condition, |
||||
|
const Register& lhs, |
||||
|
const Operand& rhs); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitTestAndBranch(InstrType instr, |
||||
|
Condition condition, |
||||
|
const Register& value, |
||||
|
uint64_t mask); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitBranchIfNonZeroNumber(InstrType instr, |
||||
|
const FPRegister& value, |
||||
|
const FPRegister& scratch); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitBranchIfHeapNumber(InstrType instr, |
||||
|
const Register& value); |
||||
|
|
||||
|
template<class InstrType> |
||||
|
void EmitBranchIfRoot(InstrType instr, |
||||
|
const Register& value, |
||||
|
Heap::RootListIndex index); |
||||
|
|
||||
|
// Emits optimized code to deep-copy the contents of statically known object
|
||||
|
// graphs (e.g. object literal boilerplate). Expects a pointer to the
|
||||
|
// allocated destination object in the result register, and a pointer to the
|
||||
|
// source object in the source register.
|
||||
|
void EmitDeepCopy(Handle<JSObject> object, |
||||
|
Register result, |
||||
|
Register source, |
||||
|
Register scratch, |
||||
|
int* offset, |
||||
|
AllocationSiteMode mode); |
||||
|
|
||||
|
// Emits optimized code for %_IsString(x). Preserves input register.
|
||||
|
// Returns the condition on which a final split to
|
||||
|
// true and false label should be made, to optimize fallthrough.
|
||||
|
Condition EmitIsString(Register input, Register temp1, Label* is_not_string, |
||||
|
SmiCheck check_needed); |
||||
|
|
||||
|
int DefineDeoptimizationLiteral(Handle<Object> literal); |
||||
|
void PopulateDeoptimizationData(Handle<Code> code); |
||||
|
void PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
||||
|
|
||||
|
MemOperand BuildSeqStringOperand(Register string, |
||||
|
Register temp, |
||||
|
LOperand* index, |
||||
|
String::Encoding encoding); |
||||
|
Deoptimizer::BailoutType DeoptimizeHeader( |
||||
|
LEnvironment* environment, |
||||
|
Deoptimizer::BailoutType* override_bailout_type); |
||||
|
void Deoptimize(LEnvironment* environment); |
||||
|
void Deoptimize(LEnvironment* environment, |
||||
|
Deoptimizer::BailoutType bailout_type); |
||||
|
void DeoptimizeIf(Condition cc, LEnvironment* environment); |
||||
|
void DeoptimizeIfZero(Register rt, LEnvironment* environment); |
||||
|
void DeoptimizeIfNegative(Register rt, LEnvironment* environment); |
||||
|
void DeoptimizeIfSmi(Register rt, LEnvironment* environment); |
||||
|
void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment); |
||||
|
void DeoptimizeIfRoot(Register rt, |
||||
|
Heap::RootListIndex index, |
||||
|
LEnvironment* environment); |
||||
|
void DeoptimizeIfNotRoot(Register rt, |
||||
|
Heap::RootListIndex index, |
||||
|
LEnvironment* environment); |
||||
|
void ApplyCheckIf(Condition cc, LBoundsCheck* check); |
||||
|
|
||||
|
MemOperand PrepareKeyedExternalArrayOperand(Register key, |
||||
|
Register base, |
||||
|
Register scratch, |
||||
|
bool key_is_smi, |
||||
|
bool key_is_constant, |
||||
|
int constant_key, |
||||
|
ElementsKind elements_kind, |
||||
|
int additional_index); |
||||
|
void CalcKeyedArrayBaseRegister(Register base, |
||||
|
Register elements, |
||||
|
Register key, |
||||
|
bool key_is_tagged, |
||||
|
ElementsKind elements_kind); |
||||
|
|
||||
|
void RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
||||
|
Safepoint::DeoptMode mode); |
||||
|
|
||||
|
int GetStackSlotCount() const { return chunk()->spill_slot_count(); } |
||||
|
|
||||
|
void Abort(BailoutReason reason); |
||||
|
|
||||
|
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } |
||||
|
|
||||
|
// Emit frame translation commands for an environment.
|
||||
|
void WriteTranslation(LEnvironment* environment, Translation* translation); |
||||
|
|
||||
|
void AddToTranslation(LEnvironment* environment, |
||||
|
Translation* translation, |
||||
|
LOperand* op, |
||||
|
bool is_tagged, |
||||
|
bool is_uint32, |
||||
|
int* object_index_pointer, |
||||
|
int* dematerialized_index_pointer); |
||||
|
|
||||
|
void SaveCallerDoubles(); |
||||
|
void RestoreCallerDoubles(); |
||||
|
|
||||
|
// Code generation steps. Returns true if code generation should continue.
|
||||
|
bool GeneratePrologue(); |
||||
|
bool GenerateDeferredCode(); |
||||
|
bool GenerateDeoptJumpTable(); |
||||
|
bool GenerateSafepointTable(); |
||||
|
|
||||
|
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
|
||||
|
void GenerateOsrPrologue(); |
||||
|
|
||||
|
enum SafepointMode { |
||||
|
RECORD_SIMPLE_SAFEPOINT, |
||||
|
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS |
||||
|
}; |
||||
|
|
||||
|
void CallCode(Handle<Code> code, |
||||
|
RelocInfo::Mode mode, |
||||
|
LInstruction* instr); |
||||
|
|
||||
|
void CallCodeGeneric(Handle<Code> code, |
||||
|
RelocInfo::Mode mode, |
||||
|
LInstruction* instr, |
||||
|
SafepointMode safepoint_mode); |
||||
|
|
||||
|
void CallRuntime(const Runtime::Function* function, |
||||
|
int num_arguments, |
||||
|
LInstruction* instr, |
||||
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs); |
||||
|
|
||||
|
void CallRuntime(Runtime::FunctionId id, |
||||
|
int num_arguments, |
||||
|
LInstruction* instr) { |
||||
|
const Runtime::Function* function = Runtime::FunctionForId(id); |
||||
|
CallRuntime(function, num_arguments, instr); |
||||
|
} |
||||
|
|
||||
|
void LoadContextFromDeferred(LOperand* context); |
||||
|
void CallRuntimeFromDeferred(Runtime::FunctionId id, |
||||
|
int argc, |
||||
|
LInstruction* instr, |
||||
|
LOperand* context); |
||||
|
|
||||
|
// Generate a direct call to a known function.
|
||||
|
// If the function is already loaded into x1 by the caller, function_reg may
|
||||
|
// be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
|
||||
|
// automatically load it.
|
||||
|
void CallKnownFunction(Handle<JSFunction> function, |
||||
|
int formal_parameter_count, |
||||
|
int arity, |
||||
|
LInstruction* instr, |
||||
|
Register function_reg = NoReg); |
||||
|
|
||||
|
// Support for recording safepoint and position information.
|
||||
|
void RecordAndWritePosition(int position) V8_OVERRIDE; |
||||
|
void RecordSafepoint(LPointerMap* pointers, |
||||
|
Safepoint::Kind kind, |
||||
|
int arguments, |
||||
|
Safepoint::DeoptMode mode); |
||||
|
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); |
||||
|
void RecordSafepoint(Safepoint::DeoptMode mode); |
||||
|
void RecordSafepointWithRegisters(LPointerMap* pointers, |
||||
|
int arguments, |
||||
|
Safepoint::DeoptMode mode); |
||||
|
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, |
||||
|
int arguments, |
||||
|
Safepoint::DeoptMode mode); |
||||
|
void RecordSafepointWithLazyDeopt(LInstruction* instr, |
||||
|
SafepointMode safepoint_mode); |
||||
|
|
||||
|
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; |
||||
|
|
||||
|
ZoneList<LEnvironment*> deoptimizations_; |
||||
|
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; |
||||
|
ZoneList<Handle<Object> > deoptimization_literals_; |
||||
|
int inlined_function_count_; |
||||
|
Scope* const scope_; |
||||
|
TranslationBuffer translations_; |
||||
|
ZoneList<LDeferredCode*> deferred_; |
||||
|
int osr_pc_offset_; |
||||
|
bool frame_is_built_; |
||||
|
|
||||
|
// Builder that keeps track of safepoints in the code. The table itself is
|
||||
|
// emitted at the end of the generated code.
|
||||
|
SafepointTableBuilder safepoints_; |
||||
|
|
||||
|
// Compiler from a set of parallel moves to a sequential list of moves.
|
||||
|
LGapResolver resolver_; |
||||
|
|
||||
|
Safepoint::Kind expected_safepoint_kind_; |
||||
|
|
||||
|
int old_position_; |
||||
|
|
||||
|
class PushSafepointRegistersScope BASE_EMBEDDED { |
||||
|
public: |
||||
|
PushSafepointRegistersScope(LCodeGen* codegen, |
||||
|
Safepoint::Kind kind) |
||||
|
: codegen_(codegen) { |
||||
|
ASSERT(codegen_->info()->is_calling()); |
||||
|
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); |
||||
|
codegen_->expected_safepoint_kind_ = kind; |
||||
|
|
||||
|
switch (codegen_->expected_safepoint_kind_) { |
||||
|
case Safepoint::kWithRegisters: |
||||
|
codegen_->masm_->PushSafepointRegisters(); |
||||
|
break; |
||||
|
case Safepoint::kWithRegistersAndDoubles: |
||||
|
codegen_->masm_->PushSafepointRegisters(); |
||||
|
codegen_->masm_->PushSafepointFPRegisters(); |
||||
|
break; |
||||
|
default: |
||||
|
UNREACHABLE(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
~PushSafepointRegistersScope() { |
||||
|
Safepoint::Kind kind = codegen_->expected_safepoint_kind_; |
||||
|
ASSERT((kind & Safepoint::kWithRegisters) != 0); |
||||
|
switch (kind) { |
||||
|
case Safepoint::kWithRegisters: |
||||
|
codegen_->masm_->PopSafepointRegisters(); |
||||
|
break; |
||||
|
case Safepoint::kWithRegistersAndDoubles: |
||||
|
codegen_->masm_->PopSafepointFPRegisters(); |
||||
|
codegen_->masm_->PopSafepointRegisters(); |
||||
|
break; |
||||
|
default: |
||||
|
UNREACHABLE(); |
||||
|
} |
||||
|
codegen_->expected_safepoint_kind_ = Safepoint::kSimple; |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
LCodeGen* codegen_; |
||||
|
}; |
||||
|
|
||||
|
friend class LDeferredCode; |
||||
|
friend class SafepointGenerator; |
||||
|
DISALLOW_COPY_AND_ASSIGN(LCodeGen); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class LDeferredCode: public ZoneObject { |
||||
|
public: |
||||
|
explicit LDeferredCode(LCodeGen* codegen) |
||||
|
: codegen_(codegen), |
||||
|
external_exit_(NULL), |
||||
|
instruction_index_(codegen->current_instruction_) { |
||||
|
codegen->AddDeferredCode(this); |
||||
|
} |
||||
|
|
||||
|
virtual ~LDeferredCode() { } |
||||
|
virtual void Generate() = 0; |
||||
|
virtual LInstruction* instr() = 0; |
||||
|
|
||||
|
void SetExit(Label* exit) { external_exit_ = exit; } |
||||
|
Label* entry() { return &entry_; } |
||||
|
Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; } |
||||
|
int instruction_index() const { return instruction_index_; } |
||||
|
|
||||
|
protected: |
||||
|
LCodeGen* codegen() const { return codegen_; } |
||||
|
MacroAssembler* masm() const { return codegen_->masm(); } |
||||
|
|
||||
|
private: |
||||
|
LCodeGen* codegen_; |
||||
|
Label entry_; |
||||
|
Label exit_; |
||||
|
Label* external_exit_; |
||||
|
int instruction_index_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// This is the abstract class used by EmitBranchGeneric.
|
||||
|
// It is used to emit code for conditional branching. The Emit() function
|
||||
|
// emits code to branch when the condition holds and EmitInverted() emits
|
||||
|
// the branch when the inverted condition is verified.
|
||||
|
//
|
||||
|
// For actual examples of condition see the concrete implementation in
|
||||
|
// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
|
||||
|
class BranchGenerator BASE_EMBEDDED { |
||||
|
public: |
||||
|
explicit BranchGenerator(LCodeGen* codegen) |
||||
|
: codegen_(codegen) { } |
||||
|
|
||||
|
virtual ~BranchGenerator() { } |
||||
|
|
||||
|
virtual void Emit(Label* label) const = 0; |
||||
|
virtual void EmitInverted(Label* label) const = 0; |
||||
|
|
||||
|
protected: |
||||
|
MacroAssembler* masm() const { return codegen_->masm(); } |
||||
|
|
||||
|
LCodeGen* codegen_; |
||||
|
}; |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
|
@ -0,0 +1,326 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "a64/lithium-gap-resolver-a64.h" |
||||
|
#include "a64/lithium-codegen-a64.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// We use the root register to spill a value while breaking a cycle in parallel
|
||||
|
// moves. We don't need access to roots while resolving the move list and using
|
||||
|
// the root register has two advantages:
|
||||
|
// - It is not in crankshaft allocatable registers list, so it can't interfere
|
||||
|
// with any of the moves we are resolving.
|
||||
|
// - We don't need to push it on the stack, as we can reload it with its value
|
||||
|
// once we have resolved a cycle.
|
||||
|
#define kSavedValue root |
||||
|
|
||||
|
LGapResolver::LGapResolver(LCodeGen* owner) |
||||
|
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), |
||||
|
saved_destination_(NULL), need_to_restore_root_(false) { } |
||||
|
|
||||
|
|
||||
|
#define __ ACCESS_MASM(cgen_->masm()) |
||||
|
|
||||
|
void LGapResolver::Resolve(LParallelMove* parallel_move) { |
||||
|
ASSERT(moves_.is_empty()); |
||||
|
|
||||
|
// Build up a worklist of moves.
|
||||
|
BuildInitialMoveList(parallel_move); |
||||
|
|
||||
|
for (int i = 0; i < moves_.length(); ++i) { |
||||
|
LMoveOperands move = moves_[i]; |
||||
|
|
||||
|
// Skip constants to perform them last. They don't block other moves
|
||||
|
// and skipping such moves with register destinations keeps those
|
||||
|
// registers free for the whole algorithm.
|
||||
|
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { |
||||
|
root_index_ = i; // Any cycle is found when we reach this move again.
|
||||
|
PerformMove(i); |
||||
|
if (in_cycle_) RestoreValue(); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Perform the moves with constant sources.
|
||||
|
for (int i = 0; i < moves_.length(); ++i) { |
||||
|
LMoveOperands move = moves_[i]; |
||||
|
|
||||
|
if (!move.IsEliminated()) { |
||||
|
ASSERT(move.source()->IsConstantOperand()); |
||||
|
EmitMove(i); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if (need_to_restore_root_) { |
||||
|
ASSERT(kSavedValue.Is(root)); |
||||
|
__ InitializeRootRegister(); |
||||
|
need_to_restore_root_ = false; |
||||
|
} |
||||
|
|
||||
|
moves_.Rewind(0); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { |
||||
|
// Perform a linear sweep of the moves to add them to the initial list of
|
||||
|
// moves to perform, ignoring any move that is redundant (the source is
|
||||
|
// the same as the destination, the destination is ignored and
|
||||
|
// unallocated, or the move was already eliminated).
|
||||
|
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); |
||||
|
for (int i = 0; i < moves->length(); ++i) { |
||||
|
LMoveOperands move = moves->at(i); |
||||
|
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); |
||||
|
} |
||||
|
Verify(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::PerformMove(int index) { |
||||
|
// Each call to this function performs a move and deletes it from the move
|
||||
|
// graph. We first recursively perform any move blocking this one. We
|
||||
|
// mark a move as "pending" on entry to PerformMove in order to detect
|
||||
|
// cycles in the move graph.
|
||||
|
LMoveOperands& current_move = moves_[index]; |
||||
|
|
||||
|
ASSERT(!current_move.IsPending()); |
||||
|
ASSERT(!current_move.IsRedundant()); |
||||
|
|
||||
|
// Clear this move's destination to indicate a pending move. The actual
|
||||
|
// destination is saved in a stack allocated local. Multiple moves can
|
||||
|
// be pending because this function is recursive.
|
||||
|
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
|
||||
|
LOperand* destination = current_move.destination(); |
||||
|
current_move.set_destination(NULL); |
||||
|
|
||||
|
// Perform a depth-first traversal of the move graph to resolve
|
||||
|
// dependencies. Any unperformed, unpending move with a source the same
|
||||
|
// as this one's destination blocks this one so recursively perform all
|
||||
|
// such moves.
|
||||
|
for (int i = 0; i < moves_.length(); ++i) { |
||||
|
LMoveOperands other_move = moves_[i]; |
||||
|
if (other_move.Blocks(destination) && !other_move.IsPending()) { |
||||
|
PerformMove(i); |
||||
|
// If there is a blocking, pending move it must be moves_[root_index_]
|
||||
|
// and all other moves with the same source as moves_[root_index_] are
|
||||
|
// sucessfully executed (because they are cycle-free) by this loop.
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// We are about to resolve this move and don't need it marked as
|
||||
|
// pending, so restore its destination.
|
||||
|
current_move.set_destination(destination); |
||||
|
|
||||
|
// The move may be blocked on a pending move, which must be the starting move.
|
||||
|
// In this case, we have a cycle, and we save the source of this move to
|
||||
|
// a scratch register to break it.
|
||||
|
LMoveOperands other_move = moves_[root_index_]; |
||||
|
if (other_move.Blocks(destination)) { |
||||
|
ASSERT(other_move.IsPending()); |
||||
|
BreakCycle(index); |
||||
|
return; |
||||
|
} |
||||
|
|
||||
|
// This move is no longer blocked.
|
||||
|
EmitMove(index); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::Verify() { |
||||
|
#ifdef ENABLE_SLOW_ASSERTS |
||||
|
// No operand should be the destination for more than one move.
|
||||
|
for (int i = 0; i < moves_.length(); ++i) { |
||||
|
LOperand* destination = moves_[i].destination(); |
||||
|
for (int j = i + 1; j < moves_.length(); ++j) { |
||||
|
SLOW_ASSERT(!destination->Equals(moves_[j].destination())); |
||||
|
} |
||||
|
} |
||||
|
#endif |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::BreakCycle(int index) { |
||||
|
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); |
||||
|
ASSERT(!in_cycle_); |
||||
|
|
||||
|
// We use a register which is not allocatable by crankshaft to break the cycle
|
||||
|
// to be sure it doesn't interfere with the moves we are resolving.
|
||||
|
ASSERT(!kSavedValue.IsAllocatable()); |
||||
|
need_to_restore_root_ = true; |
||||
|
|
||||
|
// We save in a register the source of that move and we remember its
|
||||
|
// destination. Then we mark this move as resolved so the cycle is
|
||||
|
// broken and we can perform the other moves.
|
||||
|
in_cycle_ = true; |
||||
|
LOperand* source = moves_[index].source(); |
||||
|
saved_destination_ = moves_[index].destination(); |
||||
|
|
||||
|
if (source->IsRegister()) { |
||||
|
__ Mov(kSavedValue, cgen_->ToRegister(source)); |
||||
|
} else if (source->IsStackSlot()) { |
||||
|
__ Ldr(kSavedValue, cgen_->ToMemOperand(source)); |
||||
|
} else if (source->IsDoubleRegister()) { |
||||
|
// TODO(all): We should use a double register to store the value to avoid
|
||||
|
// the penalty of the mov across register banks. We are going to reserve
|
||||
|
// d31 to hold 0.0 value. We could clobber this register while breaking the
|
||||
|
// cycle and restore it after like we do with the root register.
|
||||
|
// LGapResolver::RestoreValue() will need to be updated as well when we'll
|
||||
|
// do that.
|
||||
|
__ Fmov(kSavedValue, cgen_->ToDoubleRegister(source)); |
||||
|
} else if (source->IsDoubleStackSlot()) { |
||||
|
__ Ldr(kSavedValue, cgen_->ToMemOperand(source)); |
||||
|
} else { |
||||
|
UNREACHABLE(); |
||||
|
} |
||||
|
|
||||
|
// Mark this move as resolved.
|
||||
|
// This move will be actually performed by moving the saved value to this
|
||||
|
// move's destination in LGapResolver::RestoreValue().
|
||||
|
moves_[index].Eliminate(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::RestoreValue() { |
||||
|
ASSERT(in_cycle_); |
||||
|
ASSERT(saved_destination_ != NULL); |
||||
|
|
||||
|
if (saved_destination_->IsRegister()) { |
||||
|
__ Mov(cgen_->ToRegister(saved_destination_), kSavedValue); |
||||
|
} else if (saved_destination_->IsStackSlot()) { |
||||
|
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_)); |
||||
|
} else if (saved_destination_->IsDoubleRegister()) { |
||||
|
__ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue); |
||||
|
} else if (saved_destination_->IsDoubleStackSlot()) { |
||||
|
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_)); |
||||
|
} else { |
||||
|
UNREACHABLE(); |
||||
|
} |
||||
|
|
||||
|
in_cycle_ = false; |
||||
|
saved_destination_ = NULL; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::EmitMove(int index) { |
||||
|
LOperand* source = moves_[index].source(); |
||||
|
LOperand* destination = moves_[index].destination(); |
||||
|
|
||||
|
// Dispatch on the source and destination operand kinds. Not all
|
||||
|
// combinations are possible.
|
||||
|
|
||||
|
if (source->IsRegister()) { |
||||
|
Register source_register = cgen_->ToRegister(source); |
||||
|
if (destination->IsRegister()) { |
||||
|
__ Mov(cgen_->ToRegister(destination), source_register); |
||||
|
} else { |
||||
|
ASSERT(destination->IsStackSlot()); |
||||
|
__ Str(source_register, cgen_->ToMemOperand(destination)); |
||||
|
} |
||||
|
|
||||
|
} else if (source->IsStackSlot()) { |
||||
|
MemOperand source_operand = cgen_->ToMemOperand(source); |
||||
|
if (destination->IsRegister()) { |
||||
|
__ Ldr(cgen_->ToRegister(destination), source_operand); |
||||
|
} else { |
||||
|
ASSERT(destination->IsStackSlot()); |
||||
|
EmitStackSlotMove(index); |
||||
|
} |
||||
|
|
||||
|
} else if (source->IsConstantOperand()) { |
||||
|
LConstantOperand* constant_source = LConstantOperand::cast(source); |
||||
|
if (destination->IsRegister()) { |
||||
|
Register dst = cgen_->ToRegister(destination); |
||||
|
if (cgen_->IsSmi(constant_source)) { |
||||
|
__ Mov(dst, Operand(cgen_->ToSmi(constant_source))); |
||||
|
} else if (cgen_->IsInteger32Constant(constant_source)) { |
||||
|
__ Mov(dst, cgen_->ToInteger32(constant_source)); |
||||
|
} else { |
||||
|
__ LoadObject(dst, cgen_->ToHandle(constant_source)); |
||||
|
} |
||||
|
} else if (destination->IsDoubleRegister()) { |
||||
|
DoubleRegister result = cgen_->ToDoubleRegister(destination); |
||||
|
__ Fmov(result, cgen_->ToDouble(constant_source)); |
||||
|
} else { |
||||
|
ASSERT(destination->IsStackSlot()); |
||||
|
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
|
need_to_restore_root_ = true; |
||||
|
if (cgen_->IsSmi(constant_source)) { |
||||
|
__ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source))); |
||||
|
} else if (cgen_->IsInteger32Constant(constant_source)) { |
||||
|
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source)); |
||||
|
} else { |
||||
|
__ LoadObject(kSavedValue, cgen_->ToHandle(constant_source)); |
||||
|
} |
||||
|
__ Str(kSavedValue, cgen_->ToMemOperand(destination)); |
||||
|
} |
||||
|
|
||||
|
} else if (source->IsDoubleRegister()) { |
||||
|
DoubleRegister src = cgen_->ToDoubleRegister(source); |
||||
|
if (destination->IsDoubleRegister()) { |
||||
|
__ Fmov(cgen_->ToDoubleRegister(destination), src); |
||||
|
} else { |
||||
|
ASSERT(destination->IsDoubleStackSlot()); |
||||
|
__ Str(src, cgen_->ToMemOperand(destination)); |
||||
|
} |
||||
|
|
||||
|
} else if (source->IsDoubleStackSlot()) { |
||||
|
MemOperand src = cgen_->ToMemOperand(source); |
||||
|
if (destination->IsDoubleRegister()) { |
||||
|
__ Ldr(cgen_->ToDoubleRegister(destination), src); |
||||
|
} else { |
||||
|
ASSERT(destination->IsDoubleStackSlot()); |
||||
|
EmitStackSlotMove(index); |
||||
|
} |
||||
|
|
||||
|
} else { |
||||
|
UNREACHABLE(); |
||||
|
} |
||||
|
|
||||
|
// The move has been emitted, we can eliminate it.
|
||||
|
moves_[index].Eliminate(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void LGapResolver::EmitStackSlotMove(int index) { |
||||
|
// We need a temp register to perform a stack slot to stack slot move, and
|
||||
|
// the register must not be involved in breaking cycles.
|
||||
|
|
||||
|
// Use the Crankshaft double scratch register as the temporary.
|
||||
|
DoubleRegister temp = crankshaft_fp_scratch; |
||||
|
|
||||
|
LOperand* src = moves_[index].source(); |
||||
|
LOperand* dst = moves_[index].destination(); |
||||
|
|
||||
|
ASSERT(src->IsStackSlot()); |
||||
|
ASSERT(dst->IsStackSlot()); |
||||
|
__ Ldr(temp, cgen_->ToMemOperand(src)); |
||||
|
__ Str(temp, cgen_->ToMemOperand(dst)); |
||||
|
} |
||||
|
|
||||
|
} } // namespace v8::internal
|
@ -0,0 +1,90 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_ |
||||
|
#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_ |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "lithium.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
class LCodeGen; |
||||
|
class LGapResolver; |
||||
|
|
||||
|
class LGapResolver BASE_EMBEDDED { |
||||
|
public: |
||||
|
explicit LGapResolver(LCodeGen* owner); |
||||
|
|
||||
|
// Resolve a set of parallel moves, emitting assembler instructions.
|
||||
|
void Resolve(LParallelMove* parallel_move); |
||||
|
|
||||
|
private: |
||||
|
// Build the initial list of moves.
|
||||
|
void BuildInitialMoveList(LParallelMove* parallel_move); |
||||
|
|
||||
|
// Perform the move at the moves_ index in question (possibly requiring
|
||||
|
// other moves to satisfy dependencies).
|
||||
|
void PerformMove(int index); |
||||
|
|
||||
|
// If a cycle is found in the series of moves, save the blocking value to
|
||||
|
// a scratch register. The cycle must be found by hitting the root of the
|
||||
|
// depth-first search.
|
||||
|
void BreakCycle(int index); |
||||
|
|
||||
|
// After a cycle has been resolved, restore the value from the scratch
|
||||
|
// register to its proper destination.
|
||||
|
void RestoreValue(); |
||||
|
|
||||
|
// Emit a move and remove it from the move graph.
|
||||
|
void EmitMove(int index); |
||||
|
|
||||
|
// Emit a move from one stack slot to another.
|
||||
|
void EmitStackSlotMove(int index); |
||||
|
|
||||
|
// Verify the move list before performing moves.
|
||||
|
void Verify(); |
||||
|
|
||||
|
LCodeGen* cgen_; |
||||
|
|
||||
|
// List of moves not yet resolved.
|
||||
|
ZoneList<LMoveOperands> moves_; |
||||
|
|
||||
|
int root_index_; |
||||
|
bool in_cycle_; |
||||
|
LOperand* saved_destination_; |
||||
|
|
||||
|
// We use the root register as a scratch in a few places. When that happens,
|
||||
|
// this flag is set to indicate that it needs to be restored.
|
||||
|
bool need_to_restore_root_; |
||||
|
}; |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,315 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_ |
||||
|
#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_ |
||||
|
|
||||
|
#include "a64/assembler-a64.h" |
||||
|
#include "a64/assembler-a64-inl.h" |
||||
|
#include "macro-assembler.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
|
||||
|
#ifndef V8_INTERPRETED_REGEXP |
||||
|
class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler { |
||||
|
public: |
||||
|
RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone); |
||||
|
virtual ~RegExpMacroAssemblerA64(); |
||||
|
virtual int stack_limit_slack(); |
||||
|
virtual void AdvanceCurrentPosition(int by); |
||||
|
virtual void AdvanceRegister(int reg, int by); |
||||
|
virtual void Backtrack(); |
||||
|
virtual void Bind(Label* label); |
||||
|
virtual void CheckAtStart(Label* on_at_start); |
||||
|
virtual void CheckCharacter(unsigned c, Label* on_equal); |
||||
|
virtual void CheckCharacterAfterAnd(unsigned c, |
||||
|
unsigned mask, |
||||
|
Label* on_equal); |
||||
|
virtual void CheckCharacterGT(uc16 limit, Label* on_greater); |
||||
|
virtual void CheckCharacterLT(uc16 limit, Label* on_less); |
||||
|
virtual void CheckCharacters(Vector<const uc16> str, |
||||
|
int cp_offset, |
||||
|
Label* on_failure, |
||||
|
bool check_end_of_string); |
||||
|
// A "greedy loop" is a loop that is both greedy and with a simple
|
||||
|
// body. It has a particularly simple implementation.
|
||||
|
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); |
||||
|
virtual void CheckNotAtStart(Label* on_not_at_start); |
||||
|
virtual void CheckNotBackReference(int start_reg, Label* on_no_match); |
||||
|
virtual void CheckNotBackReferenceIgnoreCase(int start_reg, |
||||
|
Label* on_no_match); |
||||
|
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal); |
||||
|
virtual void CheckNotCharacterAfterAnd(unsigned c, |
||||
|
unsigned mask, |
||||
|
Label* on_not_equal); |
||||
|
virtual void CheckNotCharacterAfterMinusAnd(uc16 c, |
||||
|
uc16 minus, |
||||
|
uc16 mask, |
||||
|
Label* on_not_equal); |
||||
|
virtual void CheckCharacterInRange(uc16 from, |
||||
|
uc16 to, |
||||
|
Label* on_in_range); |
||||
|
virtual void CheckCharacterNotInRange(uc16 from, |
||||
|
uc16 to, |
||||
|
Label* on_not_in_range); |
||||
|
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set); |
||||
|
|
||||
|
// Checks whether the given offset from the current position is before
|
||||
|
// the end of the string.
|
||||
|
virtual void CheckPosition(int cp_offset, Label* on_outside_input); |
||||
|
virtual bool CheckSpecialCharacterClass(uc16 type, |
||||
|
Label* on_no_match); |
||||
|
virtual void Fail(); |
||||
|
virtual Handle<HeapObject> GetCode(Handle<String> source); |
||||
|
virtual void GoTo(Label* label); |
||||
|
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); |
||||
|
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); |
||||
|
virtual void IfRegisterEqPos(int reg, Label* if_eq); |
||||
|
virtual IrregexpImplementation Implementation(); |
||||
|
virtual void LoadCurrentCharacter(int cp_offset, |
||||
|
Label* on_end_of_input, |
||||
|
bool check_bounds = true, |
||||
|
int characters = 1); |
||||
|
virtual void PopCurrentPosition(); |
||||
|
virtual void PopRegister(int register_index); |
||||
|
virtual void PushBacktrack(Label* label); |
||||
|
virtual void PushCurrentPosition(); |
||||
|
virtual void PushRegister(int register_index, |
||||
|
StackCheckFlag check_stack_limit); |
||||
|
virtual void ReadCurrentPositionFromRegister(int reg); |
||||
|
virtual void ReadStackPointerFromRegister(int reg); |
||||
|
virtual void SetCurrentPositionFromEnd(int by); |
||||
|
virtual void SetRegister(int register_index, int to); |
||||
|
virtual bool Succeed(); |
||||
|
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); |
||||
|
virtual void ClearRegisters(int reg_from, int reg_to); |
||||
|
virtual void WriteStackPointerToRegister(int reg); |
||||
|
virtual bool CanReadUnaligned(); |
||||
|
|
||||
|
// Called from RegExp if the stack-guard is triggered.
|
||||
|
// If the code object is relocated, the return address is fixed before
|
||||
|
// returning.
|
||||
|
static int CheckStackGuardState(Address* return_address, |
||||
|
Code* re_code, |
||||
|
Address re_frame, |
||||
|
int start_offset, |
||||
|
const byte** input_start, |
||||
|
const byte** input_end); |
||||
|
|
||||
|
private: |
||||
|
// Above the frame pointer - Stored registers and stack passed parameters.
|
||||
|
// Callee-saved registers x19-x29, where x29 is the old frame pointer.
|
||||
|
static const int kCalleeSavedRegisters = 0; |
||||
|
// Return address.
|
||||
|
// It is placed above the 11 callee-saved registers.
|
||||
|
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize; |
||||
|
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; |
||||
|
// Stack parameter placed by caller.
|
||||
|
static const int kIsolate = kSecondaryReturnAddress + kPointerSize; |
||||
|
|
||||
|
// Below the frame pointer.
|
||||
|
// Register parameters stored by setup code.
|
||||
|
static const int kDirectCall = kCalleeSavedRegisters - kPointerSize; |
||||
|
static const int kStackBase = kDirectCall - kPointerSize; |
||||
|
static const int kOutputSize = kStackBase - kPointerSize; |
||||
|
static const int kInput = kOutputSize - kPointerSize; |
||||
|
// When adding local variables remember to push space for them in
|
||||
|
// the frame in GetCode.
|
||||
|
static const int kSuccessCounter = kInput - kPointerSize; |
||||
|
// First position register address on the stack. Following positions are
|
||||
|
// below it. A position is a 32 bit value.
|
||||
|
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes; |
||||
|
// A capture is a 64 bit value holding two position.
|
||||
|
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes; |
||||
|
|
||||
|
// Initial size of code buffer.
|
||||
|
static const size_t kRegExpCodeSize = 1024; |
||||
|
|
||||
|
// When initializing registers to a non-position value we can unroll
|
||||
|
// the loop. Set the limit of registers to unroll.
|
||||
|
static const int kNumRegistersToUnroll = 16; |
||||
|
|
||||
|
// We are using x0 to x7 as a register cache. Each hardware register must
|
||||
|
// contain one capture, that is two 32 bit registers. We can cache at most
|
||||
|
// 16 registers.
|
||||
|
static const int kNumCachedRegisters = 16; |
||||
|
|
||||
|
// Load a number of characters at the given offset from the
|
||||
|
// current position, into the current-character register.
|
||||
|
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count); |
||||
|
|
||||
|
// Check whether preemption has been requested.
|
||||
|
void CheckPreemption(); |
||||
|
|
||||
|
// Check whether we are exceeding the stack limit on the backtrack stack.
|
||||
|
void CheckStackLimit(); |
||||
|
|
||||
|
// Generate a call to CheckStackGuardState.
|
||||
|
void CallCheckStackGuardState(Register scratch); |
||||
|
|
||||
|
// Location of a 32 bit position register.
|
||||
|
MemOperand register_location(int register_index); |
||||
|
|
||||
|
// Location of a 64 bit capture, combining two position registers.
|
||||
|
MemOperand capture_location(int register_index, Register scratch); |
||||
|
|
||||
|
// Register holding the current input position as negative offset from
|
||||
|
// the end of the string.
|
||||
|
Register current_input_offset() { return w21; } |
||||
|
|
||||
|
// The register containing the current character after LoadCurrentCharacter.
|
||||
|
Register current_character() { return w22; } |
||||
|
|
||||
|
// Register holding address of the end of the input string.
|
||||
|
Register input_end() { return x25; } |
||||
|
|
||||
|
// Register holding address of the start of the input string.
|
||||
|
Register input_start() { return x26; } |
||||
|
|
||||
|
// Register holding the offset from the start of the string where we should
|
||||
|
// start matching.
|
||||
|
Register start_offset() { return w27; } |
||||
|
|
||||
|
// Pointer to the output array's first element.
|
||||
|
Register output_array() { return x28; } |
||||
|
|
||||
|
// Register holding the frame address. Local variables, parameters and
|
||||
|
// regexp registers are addressed relative to this.
|
||||
|
Register frame_pointer() { return fp; } |
||||
|
|
||||
|
// The register containing the backtrack stack top. Provides a meaningful
|
||||
|
// name to the register.
|
||||
|
Register backtrack_stackpointer() { return x23; } |
||||
|
|
||||
|
// Register holding pointer to the current code object.
|
||||
|
Register code_pointer() { return x20; } |
||||
|
|
||||
|
// Register holding the value used for clearing capture registers.
|
||||
|
Register non_position_value() { return w24; } |
||||
|
// The top 32 bit of this register is used to store this value
|
||||
|
// twice. This is used for clearing more than one register at a time.
|
||||
|
Register twice_non_position_value() { return x24; } |
||||
|
|
||||
|
// Byte size of chars in the string to match (decided by the Mode argument)
|
||||
|
int char_size() { return static_cast<int>(mode_); } |
||||
|
|
||||
|
// Equivalent to a conditional branch to the label, unless the label
|
||||
|
// is NULL, in which case it is a conditional Backtrack.
|
||||
|
void BranchOrBacktrack(Condition condition, Label* to); |
||||
|
|
||||
|
// Compares reg against immmediate before calling BranchOrBacktrack.
|
||||
|
// It makes use of the Cbz and Cbnz instructions.
|
||||
|
void CompareAndBranchOrBacktrack(Register reg, |
||||
|
int immediate, |
||||
|
Condition condition, |
||||
|
Label* to); |
||||
|
|
||||
|
inline void CallIf(Label* to, Condition condition); |
||||
|
|
||||
|
// Save and restore the link register on the stack in a way that
|
||||
|
// is GC-safe.
|
||||
|
inline void SaveLinkRegister(); |
||||
|
inline void RestoreLinkRegister(); |
||||
|
|
||||
|
// Pushes the value of a register on the backtrack stack. Decrements the
|
||||
|
// stack pointer by a word size and stores the register's value there.
|
||||
|
inline void Push(Register source); |
||||
|
|
||||
|
// Pops a value from the backtrack stack. Reads the word at the stack pointer
|
||||
|
// and increments it by a word size.
|
||||
|
inline void Pop(Register target); |
||||
|
|
||||
|
// This state indicates where the register actually is.
|
||||
|
enum RegisterState { |
||||
|
STACKED, // Resides in memory.
|
||||
|
CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
|
||||
|
CACHED_MSW // Most Significant Word of a 64 bit hardware register.
|
||||
|
}; |
||||
|
|
||||
|
RegisterState GetRegisterState(int register_index) { |
||||
|
ASSERT(register_index >= 0); |
||||
|
if (register_index >= kNumCachedRegisters) { |
||||
|
return STACKED; |
||||
|
} else { |
||||
|
if ((register_index % 2) == 0) { |
||||
|
return CACHED_LSW; |
||||
|
} else { |
||||
|
return CACHED_MSW; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Store helper that takes the state of the register into account.
|
||||
|
inline void StoreRegister(int register_index, Register source); |
||||
|
|
||||
|
// Returns a hardware W register that holds the value of the capture
|
||||
|
// register.
|
||||
|
//
|
||||
|
// This function will try to use an existing cache register (w0-w7) for the
|
||||
|
// result. Otherwise, it will load the value into maybe_result.
|
||||
|
//
|
||||
|
// If the returned register is anything other than maybe_result, calling code
|
||||
|
// must not write to it.
|
||||
|
inline Register GetRegister(int register_index, Register maybe_result); |
||||
|
|
||||
|
// Returns the harware register (x0-x7) holding the value of the capture
|
||||
|
// register.
|
||||
|
// This assumes that the state of the register is not STACKED.
|
||||
|
inline Register GetCachedRegister(int register_index); |
||||
|
|
||||
|
Isolate* isolate() const { return masm_->isolate(); } |
||||
|
|
||||
|
MacroAssembler* masm_; |
||||
|
|
||||
|
// Which mode to generate code for (ASCII or UC16).
|
||||
|
Mode mode_; |
||||
|
|
||||
|
// One greater than maximal register index actually used.
|
||||
|
int num_registers_; |
||||
|
|
||||
|
// Number of registers to output at the end (the saved registers
|
||||
|
// are always 0..num_saved_registers_-1)
|
||||
|
int num_saved_registers_; |
||||
|
|
||||
|
// Labels used internally.
|
||||
|
Label entry_label_; |
||||
|
Label start_label_; |
||||
|
Label success_label_; |
||||
|
Label backtrack_label_; |
||||
|
Label exit_label_; |
||||
|
Label check_preempt_label_; |
||||
|
Label stack_overflow_label_; |
||||
|
}; |
||||
|
|
||||
|
#endif // V8_INTERPRETED_REGEXP
|
||||
|
|
||||
|
|
||||
|
}} // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
|
File diff suppressed because it is too large
@ -0,0 +1,868 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_SIMULATOR_A64_H_ |
||||
|
#define V8_A64_SIMULATOR_A64_H_ |
||||
|
|
||||
|
#include <stdarg.h> |
||||
|
#include <vector> |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "globals.h" |
||||
|
#include "utils.h" |
||||
|
#include "allocation.h" |
||||
|
#include "assembler.h" |
||||
|
#include "a64/assembler-a64.h" |
||||
|
#include "a64/decoder-a64.h" |
||||
|
#include "a64/disasm-a64.h" |
||||
|
#include "a64/instrument-a64.h" |
||||
|
|
||||
|
#define REGISTER_CODE_LIST(R) \ |
||||
|
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ |
||||
|
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ |
||||
|
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ |
||||
|
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
#if !defined(USE_SIMULATOR) |
||||
|
|
||||
|
// Running without a simulator on a native A64 platform.
|
||||
|
// When running without a simulator we call the entry directly.
|
||||
|
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ |
||||
|
(entry(p0, p1, p2, p3, p4)) |
||||
|
|
||||
|
typedef int (*a64_regexp_matcher)(String* input, |
||||
|
int64_t start_offset, |
||||
|
const byte* input_start, |
||||
|
const byte* input_end, |
||||
|
int* output, |
||||
|
int64_t output_size, |
||||
|
Address stack_base, |
||||
|
int64_t direct_call, |
||||
|
void* return_address, |
||||
|
Isolate* isolate); |
||||
|
|
||||
|
// Call the generated regexp code directly. The code at the entry address
|
||||
|
// should act as a function matching the type a64_regexp_matcher.
|
||||
|
// The ninth argument is a dummy that reserves the space used for
|
||||
|
// the return address added by the ExitFrame in native calls.
|
||||
|
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ |
||||
|
(FUNCTION_CAST<a64_regexp_matcher>(entry)( \ |
||||
|
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)) |
||||
|
|
||||
|
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ |
||||
|
reinterpret_cast<TryCatch*>(try_catch_address) |
||||
|
|
||||
|
// Running without a simulator there is nothing to do.
|
||||
|
class SimulatorStack : public v8::internal::AllStatic { |
||||
|
public: |
||||
|
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, |
||||
|
uintptr_t c_limit) { |
||||
|
USE(isolate); |
||||
|
return c_limit; |
||||
|
} |
||||
|
|
||||
|
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { |
||||
|
return try_catch_address; |
||||
|
} |
||||
|
|
||||
|
static void UnregisterCTryCatch() { } |
||||
|
}; |
||||
|
|
||||
|
#else // !defined(USE_SIMULATOR)
|
||||
|
|
||||
|
enum ReverseByteMode { |
||||
|
Reverse16 = 0, |
||||
|
Reverse32 = 1, |
||||
|
Reverse64 = 2 |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// The proper way to initialize a simulated system register (such as NZCV) is as
|
||||
|
// follows:
|
||||
|
// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
|
||||
|
class SimSystemRegister { |
||||
|
public: |
||||
|
// The default constructor represents a register which has no writable bits.
|
||||
|
// It is not possible to set its value to anything other than 0.
|
||||
|
SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { } |
||||
|
|
||||
|
uint32_t RawValue() const { |
||||
|
return value_; |
||||
|
} |
||||
|
|
||||
|
void SetRawValue(uint32_t new_value) { |
||||
|
value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_); |
||||
|
} |
||||
|
|
||||
|
uint32_t Bits(int msb, int lsb) const { |
||||
|
return unsigned_bitextract_32(msb, lsb, value_); |
||||
|
} |
||||
|
|
||||
|
int32_t SignedBits(int msb, int lsb) const { |
||||
|
return signed_bitextract_32(msb, lsb, value_); |
||||
|
} |
||||
|
|
||||
|
void SetBits(int msb, int lsb, uint32_t bits); |
||||
|
|
||||
|
// Default system register values.
|
||||
|
static SimSystemRegister DefaultValueFor(SystemRegister id); |
||||
|
|
||||
|
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ |
||||
|
uint32_t Name() const { return Func(HighBit, LowBit); } \ |
||||
|
void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); } |
||||
|
#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \ |
||||
|
static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask); |
||||
|
|
||||
|
SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK) |
||||
|
|
||||
|
#undef DEFINE_ZERO_BITS |
||||
|
#undef DEFINE_GETTER |
||||
|
|
||||
|
protected: |
||||
|
// Most system registers only implement a few of the bits in the word. Other
|
||||
|
// bits are "read-as-zero, write-ignored". The write_ignore_mask argument
|
||||
|
// describes the bits which are not modifiable.
|
||||
|
SimSystemRegister(uint32_t value, uint32_t write_ignore_mask) |
||||
|
: value_(value), write_ignore_mask_(write_ignore_mask) { } |
||||
|
|
||||
|
uint32_t value_; |
||||
|
uint32_t write_ignore_mask_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Represent a register (r0-r31, v0-v31).
|
||||
|
template<int kSizeInBytes> |
||||
|
class SimRegisterBase { |
||||
|
public: |
||||
|
template<typename T> |
||||
|
void Set(T new_value, unsigned size = sizeof(T)) { |
||||
|
ASSERT(size <= kSizeInBytes); |
||||
|
ASSERT(size <= sizeof(new_value)); |
||||
|
// All AArch64 registers are zero-extending; Writing a W register clears the
|
||||
|
// top bits of the corresponding X register.
|
||||
|
memset(value_, 0, kSizeInBytes); |
||||
|
memcpy(value_, &new_value, size); |
||||
|
} |
||||
|
|
||||
|
// Copy 'size' bytes of the register to the result, and zero-extend to fill
|
||||
|
// the result.
|
||||
|
template<typename T> |
||||
|
T Get(unsigned size = sizeof(T)) const { |
||||
|
ASSERT(size <= kSizeInBytes); |
||||
|
T result; |
||||
|
memset(&result, 0, sizeof(result)); |
||||
|
memcpy(&result, value_, size); |
||||
|
return result; |
||||
|
} |
||||
|
|
||||
|
protected: |
||||
|
uint8_t value_[kSizeInBytes]; |
||||
|
}; |
||||
|
typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
|
||||
|
typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
|
||||
|
|
||||
|
|
||||
|
class Simulator : public DecoderVisitor { |
||||
|
public: |
||||
|
explicit Simulator(Decoder* decoder, |
||||
|
Isolate* isolate = NULL, |
||||
|
FILE* stream = stderr); |
||||
|
~Simulator(); |
||||
|
|
||||
|
// System functions.
|
||||
|
|
||||
|
static void Initialize(Isolate* isolate); |
||||
|
|
||||
|
static Simulator* current(v8::internal::Isolate* isolate); |
||||
|
|
||||
|
class CallArgument; |
||||
|
|
||||
|
// Call an arbitrary function taking an arbitrary number of arguments. The
|
||||
|
// varargs list must be a set of arguments with type CallArgument, and
|
||||
|
// terminated by CallArgument::End().
|
||||
|
void CallVoid(byte* entry, CallArgument* args); |
||||
|
|
||||
|
// Like CallVoid, but expect a return value.
|
||||
|
int64_t CallInt64(byte* entry, CallArgument* args); |
||||
|
double CallDouble(byte* entry, CallArgument* args); |
||||
|
|
||||
|
// V8 calls into generated JS code with 5 parameters and into
|
||||
|
// generated RegExp code with 10 parameters. These are convenience functions,
|
||||
|
// which set up the simulator state and grab the result on return.
|
||||
|
int64_t CallJS(byte* entry, |
||||
|
byte* function_entry, |
||||
|
JSFunction* func, |
||||
|
Object* revc, |
||||
|
int64_t argc, |
||||
|
Object*** argv); |
||||
|
int64_t CallRegExp(byte* entry, |
||||
|
String* input, |
||||
|
int64_t start_offset, |
||||
|
const byte* input_start, |
||||
|
const byte* input_end, |
||||
|
int* output, |
||||
|
int64_t output_size, |
||||
|
Address stack_base, |
||||
|
int64_t direct_call, |
||||
|
void* return_address, |
||||
|
Isolate* isolate); |
||||
|
|
||||
|
// A wrapper class that stores an argument for one of the above Call
|
||||
|
// functions.
|
||||
|
//
|
||||
|
// Only arguments up to 64 bits in size are supported.
|
||||
|
class CallArgument { |
||||
|
public: |
||||
|
template<typename T> |
||||
|
explicit CallArgument(T argument) { |
||||
|
ASSERT(sizeof(argument) <= sizeof(bits_)); |
||||
|
memcpy(&bits_, &argument, sizeof(argument)); |
||||
|
type_ = X_ARG; |
||||
|
} |
||||
|
|
||||
|
explicit CallArgument(double argument) { |
||||
|
ASSERT(sizeof(argument) == sizeof(bits_)); |
||||
|
memcpy(&bits_, &argument, sizeof(argument)); |
||||
|
type_ = D_ARG; |
||||
|
} |
||||
|
|
||||
|
explicit CallArgument(float argument) { |
||||
|
// TODO(all): CallArgument(float) is untested, remove this check once
|
||||
|
// tested.
|
||||
|
UNIMPLEMENTED(); |
||||
|
// Make the D register a NaN to try to trap errors if the callee expects a
|
||||
|
// double. If it expects a float, the callee should ignore the top word.
|
||||
|
ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_)); |
||||
|
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN)); |
||||
|
// Write the float payload to the S register.
|
||||
|
ASSERT(sizeof(argument) <= sizeof(bits_)); |
||||
|
memcpy(&bits_, &argument, sizeof(argument)); |
||||
|
type_ = D_ARG; |
||||
|
} |
||||
|
|
||||
|
// This indicates the end of the arguments list, so that CallArgument
|
||||
|
// objects can be passed into varargs functions.
|
||||
|
static CallArgument End() { return CallArgument(); } |
||||
|
|
||||
|
int64_t bits() const { return bits_; } |
||||
|
bool IsEnd() const { return type_ == NO_ARG; } |
||||
|
bool IsX() const { return type_ == X_ARG; } |
||||
|
bool IsD() const { return type_ == D_ARG; } |
||||
|
|
||||
|
private: |
||||
|
enum CallArgumentType { X_ARG, D_ARG, NO_ARG }; |
||||
|
|
||||
|
// All arguments are aligned to at least 64 bits and we don't support
|
||||
|
// passing bigger arguments, so the payload size can be fixed at 64 bits.
|
||||
|
int64_t bits_; |
||||
|
CallArgumentType type_; |
||||
|
|
||||
|
CallArgument() { type_ = NO_ARG; } |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Start the debugging command line.
|
||||
|
void Debug(); |
||||
|
|
||||
|
bool GetValue(const char* desc, int64_t* value); |
||||
|
|
||||
|
bool PrintValue(const char* desc); |
||||
|
|
||||
|
// Push an address onto the JS stack.
|
||||
|
uintptr_t PushAddress(uintptr_t address); |
||||
|
|
||||
|
// Pop an address from the JS stack.
|
||||
|
uintptr_t PopAddress(); |
||||
|
|
||||
|
// Accessor to the internal simulator stack area.
|
||||
|
uintptr_t StackLimit() const; |
||||
|
|
||||
|
void ResetState(); |
||||
|
|
||||
|
// Runtime call support.
|
||||
|
static void* RedirectExternalReference(void* external_function, |
||||
|
ExternalReference::Type type); |
||||
|
|
||||
|
// Run the simulator.
|
||||
|
static const Instruction* kEndOfSimAddress; |
||||
|
void DecodeInstruction(); |
||||
|
void Run(); |
||||
|
void RunFrom(Instruction* start); |
||||
|
|
||||
|
// Simulation helpers.
|
||||
|
template <typename T> |
||||
|
void set_pc(T new_pc) { |
||||
|
ASSERT(sizeof(T) == sizeof(pc_)); |
||||
|
memcpy(&pc_, &new_pc, sizeof(T)); |
||||
|
pc_modified_ = true; |
||||
|
} |
||||
|
Instruction* pc() { return pc_; } |
||||
|
|
||||
|
void increment_pc() { |
||||
|
if (!pc_modified_) { |
||||
|
pc_ = pc_->NextInstruction(); |
||||
|
} |
||||
|
|
||||
|
pc_modified_ = false; |
||||
|
} |
||||
|
|
||||
|
void ExecuteInstruction() { |
||||
|
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize)); |
||||
|
CheckBreakNext(); |
||||
|
decoder_->Decode(pc_); |
||||
|
LogProcessorState(); |
||||
|
increment_pc(); |
||||
|
CheckBreakpoints(); |
||||
|
} |
||||
|
|
||||
|
// Declare all Visitor functions.
|
||||
|
#define DECLARE(A) void Visit##A(Instruction* instr); |
||||
|
VISITOR_LIST(DECLARE) |
||||
|
#undef DECLARE |
||||
|
|
||||
|
// Register accessors.
|
||||
|
|
||||
|
// Return 'size' bits of the value of an integer register, as the specified
|
||||
|
// type. The value is zero-extended to fill the result.
|
||||
|
//
|
||||
|
// The only supported values of 'size' are kXRegSize and kWRegSize.
|
||||
|
template<typename T> |
||||
|
T reg(unsigned size, unsigned code, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) const { |
||||
|
unsigned size_in_bytes = size / 8; |
||||
|
ASSERT(size_in_bytes <= sizeof(T)); |
||||
|
ASSERT((size == kXRegSize) || (size == kWRegSize)); |
||||
|
ASSERT(code < kNumberOfRegisters); |
||||
|
|
||||
|
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { |
||||
|
T result; |
||||
|
memset(&result, 0, sizeof(result)); |
||||
|
return result; |
||||
|
} |
||||
|
return registers_[code].Get<T>(size_in_bytes); |
||||
|
} |
||||
|
|
||||
|
// Like reg(), but infer the access size from the template type.
|
||||
|
template<typename T> |
||||
|
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { |
||||
|
return reg<T>(sizeof(T) * 8, code, r31mode); |
||||
|
} |
||||
|
|
||||
|
// Common specialized accessors for the reg() template.
|
||||
|
int32_t wreg(unsigned code, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) const { |
||||
|
return reg<int32_t>(code, r31mode); |
||||
|
} |
||||
|
|
||||
|
int64_t xreg(unsigned code, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) const { |
||||
|
return reg<int64_t>(code, r31mode); |
||||
|
} |
||||
|
|
||||
|
int64_t reg(unsigned size, unsigned code, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) const { |
||||
|
return reg<int64_t>(size, code, r31mode); |
||||
|
} |
||||
|
|
||||
|
// Write 'size' bits of 'value' into an integer register. The value is
|
||||
|
// zero-extended. This behaviour matches AArch64 register writes.
|
||||
|
//
|
||||
|
// The only supported values of 'size' are kXRegSize and kWRegSize.
|
||||
|
template<typename T> |
||||
|
void set_reg(unsigned size, unsigned code, T value, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) { |
||||
|
unsigned size_in_bytes = size / 8; |
||||
|
ASSERT(size_in_bytes <= sizeof(T)); |
||||
|
ASSERT((size == kXRegSize) || (size == kWRegSize)); |
||||
|
ASSERT(code < kNumberOfRegisters); |
||||
|
|
||||
|
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { |
||||
|
return; |
||||
|
} |
||||
|
return registers_[code].Set(value, size_in_bytes); |
||||
|
} |
||||
|
|
||||
|
// Like set_reg(), but infer the access size from the template type.
|
||||
|
template<typename T> |
||||
|
void set_reg(unsigned code, T value, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) { |
||||
|
set_reg(sizeof(value) * 8, code, value, r31mode); |
||||
|
} |
||||
|
|
||||
|
// Common specialized accessors for the set_reg() template.
|
||||
|
void set_wreg(unsigned code, int32_t value, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) { |
||||
|
set_reg(kWRegSize, code, value, r31mode); |
||||
|
} |
||||
|
|
||||
|
void set_xreg(unsigned code, int64_t value, |
||||
|
Reg31Mode r31mode = Reg31IsZeroRegister) { |
||||
|
set_reg(kXRegSize, code, value, r31mode); |
||||
|
} |
||||
|
|
||||
|
// Commonly-used special cases.
|
||||
|
template<typename T> |
||||
|
void set_lr(T value) { |
||||
|
ASSERT(sizeof(T) == kPointerSize); |
||||
|
set_reg(kLinkRegCode, value); |
||||
|
} |
||||
|
|
||||
|
template<typename T> |
||||
|
void set_sp(T value) { |
||||
|
ASSERT(sizeof(T) == kPointerSize); |
||||
|
set_reg(31, value, Reg31IsStackPointer); |
||||
|
} |
||||
|
|
||||
|
int64_t sp() { return xreg(31, Reg31IsStackPointer); } |
||||
|
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); } |
||||
|
int64_t fp() { |
||||
|
return xreg(kFramePointerRegCode, Reg31IsStackPointer); |
||||
|
} |
||||
|
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); } |
||||
|
|
||||
|
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); } |
||||
|
|
||||
|
// Return 'size' bits of the value of a floating-point register, as the
|
||||
|
// specified type. The value is zero-extended to fill the result.
|
||||
|
//
|
||||
|
// The only supported values of 'size' are kDRegSize and kSRegSize.
|
||||
|
template<typename T> |
||||
|
T fpreg(unsigned size, unsigned code) const { |
||||
|
unsigned size_in_bytes = size / 8; |
||||
|
ASSERT(size_in_bytes <= sizeof(T)); |
||||
|
ASSERT((size == kDRegSize) || (size == kSRegSize)); |
||||
|
ASSERT(code < kNumberOfFPRegisters); |
||||
|
return fpregisters_[code].Get<T>(size_in_bytes); |
||||
|
} |
||||
|
|
||||
|
// Like fpreg(), but infer the access size from the template type.
|
||||
|
template<typename T> |
||||
|
T fpreg(unsigned code) const { |
||||
|
return fpreg<T>(sizeof(T) * 8, code); |
||||
|
} |
||||
|
|
||||
|
// Common specialized accessors for the fpreg() template.
|
||||
|
float sreg(unsigned code) const { |
||||
|
return fpreg<float>(code); |
||||
|
} |
||||
|
|
||||
|
uint32_t sreg_bits(unsigned code) const { |
||||
|
return fpreg<uint32_t>(code); |
||||
|
} |
||||
|
|
||||
|
double dreg(unsigned code) const { |
||||
|
return fpreg<double>(code); |
||||
|
} |
||||
|
|
||||
|
uint64_t dreg_bits(unsigned code) const { |
||||
|
return fpreg<uint64_t>(code); |
||||
|
} |
||||
|
|
||||
|
double fpreg(unsigned size, unsigned code) const { |
||||
|
switch (size) { |
||||
|
case kSRegSize: return sreg(code); |
||||
|
case kDRegSize: return dreg(code); |
||||
|
default: |
||||
|
UNREACHABLE(); |
||||
|
return 0.0; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Write 'value' into a floating-point register. The value is zero-extended.
|
||||
|
// This behaviour matches AArch64 register writes.
|
||||
|
template<typename T> |
||||
|
void set_fpreg(unsigned code, T value) { |
||||
|
ASSERT((sizeof(value) == kDRegSizeInBytes) || |
||||
|
(sizeof(value) == kSRegSizeInBytes)); |
||||
|
ASSERT(code < kNumberOfFPRegisters); |
||||
|
fpregisters_[code].Set(value, sizeof(value)); |
||||
|
} |
||||
|
|
||||
|
// Common specialized accessors for the set_fpreg() template.
|
||||
|
void set_sreg(unsigned code, float value) { |
||||
|
set_fpreg(code, value); |
||||
|
} |
||||
|
|
||||
|
void set_sreg_bits(unsigned code, uint32_t value) { |
||||
|
set_fpreg(code, value); |
||||
|
} |
||||
|
|
||||
|
void set_dreg(unsigned code, double value) { |
||||
|
set_fpreg(code, value); |
||||
|
} |
||||
|
|
||||
|
void set_dreg_bits(unsigned code, uint64_t value) { |
||||
|
set_fpreg(code, value); |
||||
|
} |
||||
|
|
||||
|
bool N() { return nzcv_.N() != 0; } |
||||
|
bool Z() { return nzcv_.Z() != 0; } |
||||
|
bool C() { return nzcv_.C() != 0; } |
||||
|
bool V() { return nzcv_.V() != 0; } |
||||
|
SimSystemRegister& nzcv() { return nzcv_; } |
||||
|
|
||||
|
// TODO(jbramley): Find a way to make the fpcr_ members return the proper
|
||||
|
// types, so this accessor is not necessary.
|
||||
|
FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); } |
||||
|
SimSystemRegister& fpcr() { return fpcr_; } |
||||
|
|
||||
|
// Debug helpers
|
||||
|
|
||||
|
// Simulator breakpoints.
|
||||
|
struct Breakpoint { |
||||
|
Instruction* location; |
||||
|
bool enabled; |
||||
|
}; |
||||
|
std::vector<Breakpoint> breakpoints_; |
||||
|
void SetBreakpoint(Instruction* breakpoint); |
||||
|
void ListBreakpoints(); |
||||
|
void CheckBreakpoints(); |
||||
|
|
||||
|
// Helpers for the 'next' command.
|
||||
|
// When this is set, the Simulator will insert a breakpoint after the next BL
|
||||
|
// instruction it meets.
|
||||
|
bool break_on_next_; |
||||
|
// Check if the Simulator should insert a break after the current instruction
|
||||
|
// for the 'next' command.
|
||||
|
void CheckBreakNext(); |
||||
|
|
||||
|
// Disassemble instruction at the given address.
|
||||
|
void PrintInstructionsAt(Instruction* pc, uint64_t count); |
||||
|
|
||||
|
void PrintSystemRegisters(bool print_all = false); |
||||
|
void PrintRegisters(bool print_all_regs = false); |
||||
|
void PrintFPRegisters(bool print_all_regs = false); |
||||
|
void PrintProcessorState(); |
||||
|
void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes); |
||||
|
void LogSystemRegisters() { |
||||
|
if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters(); |
||||
|
} |
||||
|
void LogRegisters() { |
||||
|
if (log_parameters_ & LOG_REGS) PrintRegisters(); |
||||
|
} |
||||
|
void LogFPRegisters() { |
||||
|
if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters(); |
||||
|
} |
||||
|
void LogProcessorState() { |
||||
|
LogSystemRegisters(); |
||||
|
LogRegisters(); |
||||
|
LogFPRegisters(); |
||||
|
} |
||||
|
void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) { |
||||
|
if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes); |
||||
|
} |
||||
|
|
||||
|
int log_parameters() { return log_parameters_; } |
||||
|
void set_log_parameters(int new_parameters) { |
||||
|
if (new_parameters & LOG_DISASM) { |
||||
|
decoder_->InsertVisitorBefore(print_disasm_, this); |
||||
|
} else { |
||||
|
decoder_->RemoveVisitor(print_disasm_); |
||||
|
} |
||||
|
log_parameters_ = new_parameters; |
||||
|
} |
||||
|
|
||||
|
static inline const char* WRegNameForCode(unsigned code, |
||||
|
Reg31Mode mode = Reg31IsZeroRegister); |
||||
|
static inline const char* XRegNameForCode(unsigned code, |
||||
|
Reg31Mode mode = Reg31IsZeroRegister); |
||||
|
static inline const char* SRegNameForCode(unsigned code); |
||||
|
static inline const char* DRegNameForCode(unsigned code); |
||||
|
static inline const char* VRegNameForCode(unsigned code); |
||||
|
static inline int CodeFromName(const char* name); |
||||
|
|
||||
|
protected: |
||||
|
// Simulation helpers ------------------------------------
|
||||
|
bool ConditionPassed(Condition cond) { |
||||
|
switch (cond) { |
||||
|
case eq: |
||||
|
return Z(); |
||||
|
case ne: |
||||
|
return !Z(); |
||||
|
case hs: |
||||
|
return C(); |
||||
|
case lo: |
||||
|
return !C(); |
||||
|
case mi: |
||||
|
return N(); |
||||
|
case pl: |
||||
|
return !N(); |
||||
|
case vs: |
||||
|
return V(); |
||||
|
case vc: |
||||
|
return !V(); |
||||
|
case hi: |
||||
|
return C() && !Z(); |
||||
|
case ls: |
||||
|
return !(C() && !Z()); |
||||
|
case ge: |
||||
|
return N() == V(); |
||||
|
case lt: |
||||
|
return N() != V(); |
||||
|
case gt: |
||||
|
return !Z() && (N() == V()); |
||||
|
case le: |
||||
|
return !(!Z() && (N() == V())); |
||||
|
case nv: // Fall through.
|
||||
|
case al: |
||||
|
return true; |
||||
|
default: |
||||
|
UNREACHABLE(); |
||||
|
return false; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
bool ConditionFailed(Condition cond) { |
||||
|
return !ConditionPassed(cond); |
||||
|
} |
||||
|
|
||||
|
void AddSubHelper(Instruction* instr, int64_t op2); |
||||
|
int64_t AddWithCarry(unsigned reg_size, |
||||
|
bool set_flags, |
||||
|
int64_t src1, |
||||
|
int64_t src2, |
||||
|
int64_t carry_in = 0); |
||||
|
void LogicalHelper(Instruction* instr, int64_t op2); |
||||
|
void ConditionalCompareHelper(Instruction* instr, int64_t op2); |
||||
|
void LoadStoreHelper(Instruction* instr, |
||||
|
int64_t offset, |
||||
|
AddrMode addrmode); |
||||
|
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode); |
||||
|
uint8_t* LoadStoreAddress(unsigned addr_reg, |
||||
|
int64_t offset, |
||||
|
AddrMode addrmode); |
||||
|
void LoadStoreWriteBack(unsigned addr_reg, |
||||
|
int64_t offset, |
||||
|
AddrMode addrmode); |
||||
|
void CheckMemoryAccess(uint8_t* address, uint8_t* stack); |
||||
|
|
||||
|
uint64_t MemoryRead(uint8_t* address, unsigned num_bytes); |
||||
|
uint8_t MemoryRead8(uint8_t* address); |
||||
|
uint16_t MemoryRead16(uint8_t* address); |
||||
|
uint32_t MemoryRead32(uint8_t* address); |
||||
|
float MemoryReadFP32(uint8_t* address); |
||||
|
uint64_t MemoryRead64(uint8_t* address); |
||||
|
double MemoryReadFP64(uint8_t* address); |
||||
|
|
||||
|
void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes); |
||||
|
void MemoryWrite32(uint8_t* address, uint32_t value); |
||||
|
void MemoryWriteFP32(uint8_t* address, float value); |
||||
|
void MemoryWrite64(uint8_t* address, uint64_t value); |
||||
|
void MemoryWriteFP64(uint8_t* address, double value); |
||||
|
|
||||
|
int64_t ShiftOperand(unsigned reg_size, |
||||
|
int64_t value, |
||||
|
Shift shift_type, |
||||
|
unsigned amount); |
||||
|
int64_t Rotate(unsigned reg_width, |
||||
|
int64_t value, |
||||
|
Shift shift_type, |
||||
|
unsigned amount); |
||||
|
int64_t ExtendValue(unsigned reg_width, |
||||
|
int64_t value, |
||||
|
Extend extend_type, |
||||
|
unsigned left_shift = 0); |
||||
|
|
||||
|
uint64_t ReverseBits(uint64_t value, unsigned num_bits); |
||||
|
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode); |
||||
|
|
||||
|
void FPCompare(double val0, double val1); |
||||
|
double FPRoundInt(double value, FPRounding round_mode); |
||||
|
double FPToDouble(float value); |
||||
|
float FPToFloat(double value, FPRounding round_mode); |
||||
|
double FixedToDouble(int64_t src, int fbits, FPRounding round_mode); |
||||
|
double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode); |
||||
|
float FixedToFloat(int64_t src, int fbits, FPRounding round_mode); |
||||
|
float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode); |
||||
|
int32_t FPToInt32(double value, FPRounding rmode); |
||||
|
int64_t FPToInt64(double value, FPRounding rmode); |
||||
|
uint32_t FPToUInt32(double value, FPRounding rmode); |
||||
|
uint64_t FPToUInt64(double value, FPRounding rmode); |
||||
|
|
||||
|
template <typename T> |
||||
|
T FPMax(T a, T b); |
||||
|
|
||||
|
template <typename T> |
||||
|
T FPMin(T a, T b); |
||||
|
|
||||
|
template <typename T> |
||||
|
T FPMaxNM(T a, T b); |
||||
|
|
||||
|
template <typename T> |
||||
|
T FPMinNM(T a, T b); |
||||
|
|
||||
|
void CheckStackAlignment(); |
||||
|
|
||||
|
inline void CheckPCSComplianceAndRun(); |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
// Corruption values should have their least significant byte cleared to
|
||||
|
// allow the code of the register being corrupted to be inserted.
|
||||
|
static const uint64_t kCallerSavedRegisterCorruptionValue = |
||||
|
0xca11edc0de000000UL; |
||||
|
// This value is a NaN in both 32-bit and 64-bit FP.
|
||||
|
static const uint64_t kCallerSavedFPRegisterCorruptionValue = |
||||
|
0x7ff000007f801000UL; |
||||
|
// This value is a mix of 32/64-bits NaN and "verbose" immediate.
|
||||
|
static const uint64_t kDefaultCPURegisterCorruptionValue = |
||||
|
0x7ffbad007f8bad00UL; |
||||
|
|
||||
|
void CorruptRegisters(CPURegList* list, |
||||
|
uint64_t value = kDefaultCPURegisterCorruptionValue); |
||||
|
void CorruptAllCallerSavedCPURegisters(); |
||||
|
#endif |
||||
|
|
||||
|
// Processor state ---------------------------------------
|
||||
|
|
||||
|
// Output stream.
|
||||
|
FILE* stream_; |
||||
|
PrintDisassembler* print_disasm_; |
||||
|
|
||||
|
// Instrumentation.
|
||||
|
Instrument* instrument_; |
||||
|
|
||||
|
// General purpose registers. Register 31 is the stack pointer.
|
||||
|
SimRegister registers_[kNumberOfRegisters]; |
||||
|
|
||||
|
// Floating point registers
|
||||
|
SimFPRegister fpregisters_[kNumberOfFPRegisters]; |
||||
|
|
||||
|
// Processor state
|
||||
|
// bits[31, 27]: Condition flags N, Z, C, and V.
|
||||
|
// (Negative, Zero, Carry, Overflow)
|
||||
|
SimSystemRegister nzcv_; |
||||
|
|
||||
|
// Floating-Point Control Register
|
||||
|
SimSystemRegister fpcr_; |
||||
|
|
||||
|
// Only a subset of FPCR features are supported by the simulator. This helper
|
||||
|
// checks that the FPCR settings are supported.
|
||||
|
//
|
||||
|
// This is checked when floating-point instructions are executed, not when
|
||||
|
// FPCR is set. This allows generated code to modify FPCR for external
|
||||
|
// functions, or to save and restore it when entering and leaving generated
|
||||
|
// code.
|
||||
|
void AssertSupportedFPCR() { |
||||
|
ASSERT(fpcr().DN() == 0); // No default-NaN support.
|
||||
|
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
|
||||
|
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
|
||||
|
|
||||
|
// The simulator does not support half-precision operations so fpcr().AHP()
|
||||
|
// is irrelevant, and is not checked here.
|
||||
|
} |
||||
|
|
||||
|
static int CalcNFlag(uint64_t result, unsigned reg_size) { |
||||
|
return (result >> (reg_size - 1)) & 1; |
||||
|
} |
||||
|
|
||||
|
static int CalcZFlag(uint64_t result) { |
||||
|
return result == 0; |
||||
|
} |
||||
|
|
||||
|
static const uint32_t kConditionFlagsMask = 0xf0000000; |
||||
|
|
||||
|
// Stack
|
||||
|
byte* stack_; |
||||
|
static const intptr_t stack_protection_size_ = KB; |
||||
|
intptr_t stack_size_; |
||||
|
byte* stack_limit_; |
||||
|
// TODO(aleram): protect the stack.
|
||||
|
|
||||
|
Decoder* decoder_; |
||||
|
Decoder* disassembler_decoder_; |
||||
|
|
||||
|
// Indicates if the pc has been modified by the instruction and should not be
|
||||
|
// automatically incremented.
|
||||
|
bool pc_modified_; |
||||
|
Instruction* pc_; |
||||
|
|
||||
|
static const char* xreg_names[]; |
||||
|
static const char* wreg_names[]; |
||||
|
static const char* sreg_names[]; |
||||
|
static const char* dreg_names[]; |
||||
|
static const char* vreg_names[]; |
||||
|
|
||||
|
// Debugger input.
|
||||
|
void set_last_debugger_input(char* input) { |
||||
|
DeleteArray(last_debugger_input_); |
||||
|
last_debugger_input_ = input; |
||||
|
} |
||||
|
char* last_debugger_input() { return last_debugger_input_; } |
||||
|
char* last_debugger_input_; |
||||
|
|
||||
|
private: |
||||
|
int log_parameters_; |
||||
|
Isolate* isolate_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// When running with the simulator transition into simulated execution at this
|
||||
|
// point.
|
||||
|
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ |
||||
|
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \ |
||||
|
FUNCTION_ADDR(entry), \ |
||||
|
p0, p1, p2, p3, p4)) |
||||
|
|
||||
|
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ |
||||
|
Simulator::current(Isolate::Current())->CallRegExp( \ |
||||
|
entry, \ |
||||
|
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8) |
||||
|
|
||||
|
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ |
||||
|
try_catch_address == NULL ? \ |
||||
|
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) |
||||
|
|
||||
|
|
||||
|
// The simulator has its own stack. Thus it has a different stack limit from
|
||||
|
// the C-based native code.
|
||||
|
// See also 'class SimulatorStack' in arm/simulator-arm.h.
|
||||
|
class SimulatorStack : public v8::internal::AllStatic { |
||||
|
public: |
||||
|
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, |
||||
|
uintptr_t c_limit) { |
||||
|
return Simulator::current(isolate)->StackLimit(); |
||||
|
} |
||||
|
|
||||
|
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { |
||||
|
Simulator* sim = Simulator::current(Isolate::Current()); |
||||
|
return sim->PushAddress(try_catch_address); |
||||
|
} |
||||
|
|
||||
|
static void UnregisterCTryCatch() { |
||||
|
Simulator::current(Isolate::Current())->PopAddress(); |
||||
|
} |
||||
|
}; |
||||
|
|
||||
|
#endif // !defined(USE_SIMULATOR)
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_SIMULATOR_A64_H_
|
File diff suppressed because it is too large
@ -0,0 +1,112 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#if V8_TARGET_ARCH_A64 |
||||
|
|
||||
|
#include "a64/utils-a64.h" |
||||
|
|
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
#define __ assm-> |
||||
|
|
||||
|
|
||||
|
int CountLeadingZeros(uint64_t value, int width) { |
||||
|
// TODO(jbramley): Optimize this for A64 hosts.
|
||||
|
ASSERT((width == 32) || (width == 64)); |
||||
|
int count = 0; |
||||
|
uint64_t bit_test = 1UL << (width - 1); |
||||
|
while ((count < width) && ((bit_test & value) == 0)) { |
||||
|
count++; |
||||
|
bit_test >>= 1; |
||||
|
} |
||||
|
return count; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
int CountLeadingSignBits(int64_t value, int width) { |
||||
|
// TODO(jbramley): Optimize this for A64 hosts.
|
||||
|
ASSERT((width == 32) || (width == 64)); |
||||
|
if (value >= 0) { |
||||
|
return CountLeadingZeros(value, width) - 1; |
||||
|
} else { |
||||
|
return CountLeadingZeros(~value, width) - 1; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
int CountTrailingZeros(uint64_t value, int width) { |
||||
|
// TODO(jbramley): Optimize this for A64 hosts.
|
||||
|
ASSERT((width == 32) || (width == 64)); |
||||
|
int count = 0; |
||||
|
while ((count < width) && (((value >> count) & 1) == 0)) { |
||||
|
count++; |
||||
|
} |
||||
|
return count; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
int CountSetBits(uint64_t value, int width) { |
||||
|
// TODO(jbramley): Would it be useful to allow other widths? The
|
||||
|
// implementation already supports them.
|
||||
|
ASSERT((width == 32) || (width == 64)); |
||||
|
|
||||
|
// Mask out unused bits to ensure that they are not counted.
|
||||
|
value &= (0xffffffffffffffffUL >> (64-width)); |
||||
|
|
||||
|
// Add up the set bits.
|
||||
|
// The algorithm works by adding pairs of bit fields together iteratively,
|
||||
|
// where the size of each bit field doubles each time.
|
||||
|
// An example for an 8-bit value:
|
||||
|
// Bits: h g f e d c b a
|
||||
|
// \ | \ | \ | \ |
|
||||
|
// value = h+g f+e d+c b+a
|
||||
|
// \ | \ |
|
||||
|
// value = h+g+f+e d+c+b+a
|
||||
|
// \ |
|
||||
|
// value = h+g+f+e+d+c+b+a
|
||||
|
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); |
||||
|
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); |
||||
|
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); |
||||
|
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); |
||||
|
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); |
||||
|
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); |
||||
|
|
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
int MaskToBit(uint64_t mask) { |
||||
|
ASSERT(CountSetBits(mask, 64) == 1); |
||||
|
return CountTrailingZeros(mask, 64); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_TARGET_ARCH_A64
|
@ -0,0 +1,109 @@ |
|||||
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_A64_UTILS_A64_H_ |
||||
|
#define V8_A64_UTILS_A64_H_ |
||||
|
|
||||
|
#include <cmath> |
||||
|
#include "v8.h" |
||||
|
#include "a64/constants-a64.h" |
||||
|
|
||||
|
#define REGISTER_CODE_LIST(R) \ |
||||
|
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ |
||||
|
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ |
||||
|
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ |
||||
|
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// Floating point representation.
|
||||
|
static inline uint32_t float_to_rawbits(float value) { |
||||
|
uint32_t bits = 0; |
||||
|
memcpy(&bits, &value, 4); |
||||
|
return bits; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static inline uint64_t double_to_rawbits(double value) { |
||||
|
uint64_t bits = 0; |
||||
|
memcpy(&bits, &value, 8); |
||||
|
return bits; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static inline float rawbits_to_float(uint32_t bits) { |
||||
|
float value = 0.0; |
||||
|
memcpy(&value, &bits, 4); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
static inline double rawbits_to_double(uint64_t bits) { |
||||
|
double value = 0.0; |
||||
|
memcpy(&value, &bits, 8); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// Bits counting.
|
||||
|
int CountLeadingZeros(uint64_t value, int width); |
||||
|
int CountLeadingSignBits(int64_t value, int width); |
||||
|
int CountTrailingZeros(uint64_t value, int width); |
||||
|
int CountSetBits(uint64_t value, int width); |
||||
|
int MaskToBit(uint64_t mask); |
||||
|
|
||||
|
|
||||
|
// NaN tests.
|
||||
|
inline bool IsSignallingNaN(double num) { |
||||
|
const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL; |
||||
|
uint64_t raw = double_to_rawbits(num); |
||||
|
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) { |
||||
|
return true; |
||||
|
} |
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
inline bool IsSignallingNaN(float num) { |
||||
|
const uint64_t kFP32QuietNaNMask = 0x00400000UL; |
||||
|
uint32_t raw = float_to_rawbits(num); |
||||
|
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) { |
||||
|
return true; |
||||
|
} |
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
template <typename T> |
||||
|
inline bool IsQuietNaN(T num) { |
||||
|
return std::isnan(num) && !IsSignallingNaN(num); |
||||
|
} |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_A64_UTILS_A64_H_
|
File diff suppressed because it is too large
@ -0,0 +1 @@ |
|||||
|
rmcilroy@chromium.org |
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue