Utils.cpp revision 344779
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file This file implements the utility functions used by the GlobalISel
10/// pipeline.
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/GlobalISel/Utils.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/Twine.h"
16#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
17#include "llvm/CodeGen/MachineInstr.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/StackProtector.h"
22#include "llvm/CodeGen/TargetInstrInfo.h"
23#include "llvm/CodeGen/TargetPassConfig.h"
24#include "llvm/CodeGen/TargetRegisterInfo.h"
25#include "llvm/IR/Constants.h"
26
27#define DEBUG_TYPE "globalisel-utils"
28
29using namespace llvm;
30
31unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
32                                   const TargetInstrInfo &TII,
33                                   const RegisterBankInfo &RBI,
34                                   MachineInstr &InsertPt, unsigned Reg,
35                                   const TargetRegisterClass &RegClass) {
36  if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) {
37    unsigned NewReg = MRI.createVirtualRegister(&RegClass);
38    BuildMI(*InsertPt.getParent(), InsertPt, InsertPt.getDebugLoc(),
39            TII.get(TargetOpcode::COPY), NewReg)
40        .addReg(Reg);
41    return NewReg;
42  }
43
44  return Reg;
45}
46
47unsigned llvm::constrainOperandRegClass(
48    const MachineFunction &MF, const TargetRegisterInfo &TRI,
49    MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
50    const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
51    const MachineOperand &RegMO, unsigned OpIdx) {
52  unsigned Reg = RegMO.getReg();
53  // Assume physical registers are properly constrained.
54  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
55         "PhysReg not implemented");
56
57  const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
58  // Some of the target independent instructions, like COPY, may not impose any
59  // register class constraints on some of their operands: If it's a use, we can
60  // skip constraining as the instruction defining the register would constrain
61  // it.
62
63  // We can't constrain unallocatable register classes, because we can't create
64  // virtual registers for these classes, so we need to let targets handled this
65  // case.
66  if (RegClass && !RegClass->isAllocatable())
67    RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
68
69  if (!RegClass) {
70    assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
71           "Register class constraint is required unless either the "
72           "instruction is target independent or the operand is a use");
73    // FIXME: Just bailing out like this here could be not enough, unless we
74    // expect the users of this function to do the right thing for PHIs and
75    // COPY:
76    //   v1 = COPY v0
77    //   v2 = COPY v1
78    // v1 here may end up not being constrained at all. Please notice that to
79    // reproduce the issue we likely need a destination pattern of a selection
80    // rule producing such extra copies, not just an input GMIR with them as
81    // every existing target using selectImpl handles copies before calling it
82    // and they never reach this function.
83    return Reg;
84  }
85  return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass);
86}
87
88bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
89                                            const TargetInstrInfo &TII,
90                                            const TargetRegisterInfo &TRI,
91                                            const RegisterBankInfo &RBI) {
92  assert(!isPreISelGenericOpcode(I.getOpcode()) &&
93         "A selected instruction is expected");
94  MachineBasicBlock &MBB = *I.getParent();
95  MachineFunction &MF = *MBB.getParent();
96  MachineRegisterInfo &MRI = MF.getRegInfo();
97
98  for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
99    MachineOperand &MO = I.getOperand(OpI);
100
101    // There's nothing to be done on non-register operands.
102    if (!MO.isReg())
103      continue;
104
105    LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
106    assert(MO.isReg() && "Unsupported non-reg operand");
107
108    unsigned Reg = MO.getReg();
109    // Physical registers don't need to be constrained.
110    if (TRI.isPhysicalRegister(Reg))
111      continue;
112
113    // Register operands with a value of 0 (e.g. predicate operands) don't need
114    // to be constrained.
115    if (Reg == 0)
116      continue;
117
118    // If the operand is a vreg, we should constrain its regclass, and only
119    // insert COPYs if that's impossible.
120    // constrainOperandRegClass does that for us.
121    MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
122                                       MO, OpI));
123
124    // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
125    // done.
126    if (MO.isUse()) {
127      int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
128      if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
129        I.tieOperands(DefIdx, OpI);
130    }
131  }
132  return true;
133}
134
135bool llvm::isTriviallyDead(const MachineInstr &MI,
136                           const MachineRegisterInfo &MRI) {
137  // If we can move an instruction, we can remove it.  Otherwise, it has
138  // a side-effect of some sort.
139  bool SawStore = false;
140  if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
141    return false;
142
143  // Instructions without side-effects are dead iff they only define dead vregs.
144  for (auto &MO : MI.operands()) {
145    if (!MO.isReg() || !MO.isDef())
146      continue;
147
148    unsigned Reg = MO.getReg();
149    if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
150        !MRI.use_nodbg_empty(Reg))
151      return false;
152  }
153  return true;
154}
155
156void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
157                              MachineOptimizationRemarkEmitter &MORE,
158                              MachineOptimizationRemarkMissed &R) {
159  MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
160
161  // Print the function name explicitly if we don't have a debug location (which
162  // makes the diagnostic less useful) or if we're going to emit a raw error.
163  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
164    R << (" (in function: " + MF.getName() + ")").str();
165
166  if (TPC.isGlobalISelAbortEnabled())
167    report_fatal_error(R.getMsg());
168  else
169    MORE.emit(R);
170}
171
172void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
173                              MachineOptimizationRemarkEmitter &MORE,
174                              const char *PassName, StringRef Msg,
175                              const MachineInstr &MI) {
176  MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
177                                    MI.getDebugLoc(), MI.getParent());
178  R << Msg;
179  // Printing MI is expensive;  only do it if expensive remarks are enabled.
180  if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
181    R << ": " << ore::MNV("Inst", MI);
182  reportGISelFailure(MF, TPC, MORE, R);
183}
184
185Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
186                                           const MachineRegisterInfo &MRI) {
187  MachineInstr *MI = MRI.getVRegDef(VReg);
188  if (MI->getOpcode() != TargetOpcode::G_CONSTANT)
189    return None;
190
191  if (MI->getOperand(1).isImm())
192    return MI->getOperand(1).getImm();
193
194  if (MI->getOperand(1).isCImm() &&
195      MI->getOperand(1).getCImm()->getBitWidth() <= 64)
196    return MI->getOperand(1).getCImm()->getSExtValue();
197
198  return None;
199}
200
201const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg,
202                                       const MachineRegisterInfo &MRI) {
203  MachineInstr *MI = MRI.getVRegDef(VReg);
204  if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
205    return nullptr;
206  return MI->getOperand(1).getFPImm();
207}
208
209llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg,
210                                       const MachineRegisterInfo &MRI) {
211  auto *DefMI = MRI.getVRegDef(Reg);
212  auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
213  if (!DstTy.isValid())
214    return nullptr;
215  while (DefMI->getOpcode() == TargetOpcode::COPY) {
216    unsigned SrcReg = DefMI->getOperand(1).getReg();
217    auto SrcTy = MRI.getType(SrcReg);
218    if (!SrcTy.isValid() || SrcTy != DstTy)
219      break;
220    DefMI = MRI.getVRegDef(SrcReg);
221  }
222  return DefMI->getOpcode() == Opcode ? DefMI : nullptr;
223}
224
225APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
226  if (Size == 32)
227    return APFloat(float(Val));
228  if (Size == 64)
229    return APFloat(Val);
230  if (Size != 16)
231    llvm_unreachable("Unsupported FPConstant size");
232  bool Ignored;
233  APFloat APF(Val);
234  APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
235  return APF;
236}
237
238Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
239                                        const unsigned Op2,
240                                        const MachineRegisterInfo &MRI) {
241  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
242  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
243  if (MaybeOp1Cst && MaybeOp2Cst) {
244    LLT Ty = MRI.getType(Op1);
245    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
246    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
247    switch (Opcode) {
248    default:
249      break;
250    case TargetOpcode::G_ADD:
251      return C1 + C2;
252    case TargetOpcode::G_AND:
253      return C1 & C2;
254    case TargetOpcode::G_ASHR:
255      return C1.ashr(C2);
256    case TargetOpcode::G_LSHR:
257      return C1.lshr(C2);
258    case TargetOpcode::G_MUL:
259      return C1 * C2;
260    case TargetOpcode::G_OR:
261      return C1 | C2;
262    case TargetOpcode::G_SHL:
263      return C1 << C2;
264    case TargetOpcode::G_SUB:
265      return C1 - C2;
266    case TargetOpcode::G_XOR:
267      return C1 ^ C2;
268    case TargetOpcode::G_UDIV:
269      if (!C2.getBoolValue())
270        break;
271      return C1.udiv(C2);
272    case TargetOpcode::G_SDIV:
273      if (!C2.getBoolValue())
274        break;
275      return C1.sdiv(C2);
276    case TargetOpcode::G_UREM:
277      if (!C2.getBoolValue())
278        break;
279      return C1.urem(C2);
280    case TargetOpcode::G_SREM:
281      if (!C2.getBoolValue())
282        break;
283      return C1.srem(C2);
284    }
285  }
286  return None;
287}
288
289void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
290  AU.addPreserved<StackProtector>();
291}
292