AMDGPUInstrInfo.h revision 285181
1//===-- AMDGPUInstrInfo.h - AMDGPU Instruction Information ------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief Contains the definition of a TargetInstrInfo class that is common 12/// to all AMD GPUs. 13// 14//===----------------------------------------------------------------------===// 15 16#ifndef LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H 17#define LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H 18 19#include "AMDGPURegisterInfo.h" 20#include "llvm/Target/TargetInstrInfo.h" 21#include <map> 22 23#define GET_INSTRINFO_HEADER 24#define GET_INSTRINFO_ENUM 25#define GET_INSTRINFO_OPERAND_ENUM 26#include "AMDGPUGenInstrInfo.inc" 27 28#define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT 29#define OPCODE_IS_NOT_ZERO_INT AMDGPU::PRED_SETNE_INT 30#define OPCODE_IS_ZERO AMDGPU::PRED_SETE 31#define OPCODE_IS_NOT_ZERO AMDGPU::PRED_SETNE 32 33namespace llvm { 34 35class AMDGPUSubtarget; 36class MachineFunction; 37class MachineInstr; 38class MachineInstrBuilder; 39 40class AMDGPUInstrInfo : public AMDGPUGenInstrInfo { 41private: 42 const AMDGPURegisterInfo RI; 43 virtual void anchor(); 44protected: 45 const AMDGPUSubtarget &ST; 46public: 47 explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st); 48 49 virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0; 50 51 bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, 52 unsigned &DstReg, unsigned &SubIdx) const override; 53 54 unsigned isLoadFromStackSlot(const MachineInstr *MI, 55 int &FrameIndex) const override; 56 unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, 57 int &FrameIndex) const override; 58 bool hasLoadFromStackSlot(const MachineInstr *MI, 59 const MachineMemOperand *&MMO, 60 int &FrameIndex) const override; 61 unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; 62 unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI, 63 int &FrameIndex) const; 64 bool hasStoreFromStackSlot(const MachineInstr *MI, 65 const MachineMemOperand *&MMO, 66 int &FrameIndex) const; 67 68 MachineInstr * 69 convertToThreeAddress(MachineFunction::iterator &MFI, 70 MachineBasicBlock::iterator &MBBI, 71 LiveVariables *LV) const override; 72 73 74 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override; 75 76 void storeRegToStackSlot(MachineBasicBlock &MBB, 77 MachineBasicBlock::iterator MI, 78 unsigned SrcReg, bool isKill, int FrameIndex, 79 const TargetRegisterClass *RC, 80 const TargetRegisterInfo *TRI) const override; 81 void loadRegFromStackSlot(MachineBasicBlock &MBB, 82 MachineBasicBlock::iterator MI, 83 unsigned DestReg, int FrameIndex, 84 const TargetRegisterClass *RC, 85 const TargetRegisterInfo *TRI) const override; 86 87protected: 88 MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 89 ArrayRef<unsigned> Ops, 90 MachineBasicBlock::iterator InsertPt, 91 int FrameIndex) const override; 92 MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 93 ArrayRef<unsigned> Ops, 94 MachineBasicBlock::iterator InsertPt, 95 MachineInstr *LoadMI) const override; 96 97public: 98 /// \returns the smallest register index that will be accessed by an indirect 99 /// read or write or -1 if indirect addressing is not used by this program. 100 int getIndirectIndexBegin(const MachineFunction &MF) const; 101 102 /// \returns the largest register index that will be accessed by an indirect 103 /// read or write or -1 if indirect addressing is not used by this program. 104 int getIndirectIndexEnd(const MachineFunction &MF) const; 105 106 bool canFoldMemoryOperand(const MachineInstr *MI, 107 ArrayRef<unsigned> Ops) const override; 108 bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 109 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 110 SmallVectorImpl<MachineInstr *> &NewMIs) const override; 111 bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 112 SmallVectorImpl<SDNode *> &NewNodes) const override; 113 unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, 114 bool UnfoldLoad, bool UnfoldStore, 115 unsigned *LoadRegIndex = nullptr) const override; 116 117 bool enableClusterLoads() const override; 118 119 bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 120 int64_t Offset1, int64_t Offset2, 121 unsigned NumLoads) const override; 122 123 bool 124 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; 125 void insertNoop(MachineBasicBlock &MBB, 126 MachineBasicBlock::iterator MI) const override; 127 bool isPredicated(const MachineInstr *MI) const override; 128 bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 129 ArrayRef<MachineOperand> Pred2) const override; 130 bool DefinesPredicate(MachineInstr *MI, 131 std::vector<MachineOperand> &Pred) const override; 132 bool isPredicable(MachineInstr *MI) const override; 133 bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; 134 135 // Helper functions that check the opcode for status information 136 bool isRegisterStore(const MachineInstr &MI) const; 137 bool isRegisterLoad(const MachineInstr &MI) const; 138 139 /// \brief Return a target-specific opcode if Opcode is a pseudo instruction. 140 /// Return -1 if the target-specific opcode for the pseudo instruction does 141 /// not exist. If Opcode is not a pseudo instruction, this is identity. 142 int pseudoToMCOpcode(int Opcode) const; 143 144 /// \brief Return the descriptor of the target-specific machine instruction 145 /// that corresponds to the specified pseudo or native opcode. 146 const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const { 147 return get(pseudoToMCOpcode(Opcode)); 148 } 149 150//===---------------------------------------------------------------------===// 151// Pure virtual funtions to be implemented by sub-classes. 152//===---------------------------------------------------------------------===// 153 154 virtual bool isMov(unsigned opcode) const = 0; 155 156 /// \brief Calculate the "Indirect Address" for the given \p RegIndex and 157 /// \p Channel 158 /// 159 /// We model indirect addressing using a virtual address space that can be 160 /// accesed with loads and stores. The "Indirect Address" is the memory 161 /// address in this virtual address space that maps to the given \p RegIndex 162 /// and \p Channel. 163 virtual unsigned calculateIndirectAddress(unsigned RegIndex, 164 unsigned Channel) const = 0; 165 166 /// \returns The register class to be used for loading and storing values 167 /// from an "Indirect Address" . 168 virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0; 169 170 /// \brief Build instruction(s) for an indirect register write. 171 /// 172 /// \returns The instruction that performs the indirect register write 173 virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB, 174 MachineBasicBlock::iterator I, 175 unsigned ValueReg, unsigned Address, 176 unsigned OffsetReg) const = 0; 177 178 /// \brief Build instruction(s) for an indirect register read. 179 /// 180 /// \returns The instruction that performs the indirect register read 181 virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB, 182 MachineBasicBlock::iterator I, 183 unsigned ValueReg, unsigned Address, 184 unsigned OffsetReg) const = 0; 185 186 /// \brief Build a MOV instruction. 187 virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB, 188 MachineBasicBlock::iterator I, 189 unsigned DstReg, unsigned SrcReg) const = 0; 190 191 /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the 192 /// equivalent opcode that writes \p Channels Channels. 193 int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const; 194 195}; 196 197namespace AMDGPU { 198 int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); 199} // End namespace AMDGPU 200 201} // End llvm namespace 202 203#define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63) 204#define AMDGPU_FLAG_REGISTER_STORE (UINT64_C(1) << 62) 205 206#endif 207