RISCVInstrInfo.cpp revision 360784
1//===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISCV implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVInstrInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "RISCVTargetMachine.h"
17#include "Utils/RISCVMatInt.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/RegisterScavenging.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/TargetRegistry.h"
26
27using namespace llvm;
28
29#define GEN_CHECK_COMPRESS_INSTR
30#include "RISCVGenCompressInstEmitter.inc"
31
32#define GET_INSTRINFO_CTOR_DTOR
33#include "RISCVGenInstrInfo.inc"
34
35RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
36    : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
37      STI(STI) {}
38
39unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
40                                             int &FrameIndex) const {
41  switch (MI.getOpcode()) {
42  default:
43    return 0;
44  case RISCV::LB:
45  case RISCV::LBU:
46  case RISCV::LH:
47  case RISCV::LHU:
48  case RISCV::LW:
49  case RISCV::FLW:
50  case RISCV::LWU:
51  case RISCV::LD:
52  case RISCV::FLD:
53    break;
54  }
55
56  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
57      MI.getOperand(2).getImm() == 0) {
58    FrameIndex = MI.getOperand(1).getIndex();
59    return MI.getOperand(0).getReg();
60  }
61
62  return 0;
63}
64
65unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
66                                            int &FrameIndex) const {
67  switch (MI.getOpcode()) {
68  default:
69    return 0;
70  case RISCV::SB:
71  case RISCV::SH:
72  case RISCV::SW:
73  case RISCV::FSW:
74  case RISCV::SD:
75  case RISCV::FSD:
76    break;
77  }
78
79  if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
80      MI.getOperand(1).getImm() == 0) {
81    FrameIndex = MI.getOperand(0).getIndex();
82    return MI.getOperand(2).getReg();
83  }
84
85  return 0;
86}
87
88void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
89                                 MachineBasicBlock::iterator MBBI,
90                                 const DebugLoc &DL, MCRegister DstReg,
91                                 MCRegister SrcReg, bool KillSrc) const {
92  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
93    BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
94        .addReg(SrcReg, getKillRegState(KillSrc))
95        .addImm(0);
96    return;
97  }
98
99  // FPR->FPR copies
100  unsigned Opc;
101  if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
102    Opc = RISCV::FSGNJ_S;
103  else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
104    Opc = RISCV::FSGNJ_D;
105  else
106    llvm_unreachable("Impossible reg-to-reg copy");
107
108  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
109      .addReg(SrcReg, getKillRegState(KillSrc))
110      .addReg(SrcReg, getKillRegState(KillSrc));
111}
112
113void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
114                                         MachineBasicBlock::iterator I,
115                                         unsigned SrcReg, bool IsKill, int FI,
116                                         const TargetRegisterClass *RC,
117                                         const TargetRegisterInfo *TRI) const {
118  DebugLoc DL;
119  if (I != MBB.end())
120    DL = I->getDebugLoc();
121
122  unsigned Opcode;
123
124  if (RISCV::GPRRegClass.hasSubClassEq(RC))
125    Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
126             RISCV::SW : RISCV::SD;
127  else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
128    Opcode = RISCV::FSW;
129  else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
130    Opcode = RISCV::FSD;
131  else
132    llvm_unreachable("Can't store this register to stack slot");
133
134  BuildMI(MBB, I, DL, get(Opcode))
135      .addReg(SrcReg, getKillRegState(IsKill))
136      .addFrameIndex(FI)
137      .addImm(0);
138}
139
140void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
141                                          MachineBasicBlock::iterator I,
142                                          unsigned DstReg, int FI,
143                                          const TargetRegisterClass *RC,
144                                          const TargetRegisterInfo *TRI) const {
145  DebugLoc DL;
146  if (I != MBB.end())
147    DL = I->getDebugLoc();
148
149  unsigned Opcode;
150
151  if (RISCV::GPRRegClass.hasSubClassEq(RC))
152    Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
153             RISCV::LW : RISCV::LD;
154  else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
155    Opcode = RISCV::FLW;
156  else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
157    Opcode = RISCV::FLD;
158  else
159    llvm_unreachable("Can't load this register from stack slot");
160
161  BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0);
162}
163
164void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
165                            MachineBasicBlock::iterator MBBI,
166                            const DebugLoc &DL, Register DstReg, uint64_t Val,
167                            MachineInstr::MIFlag Flag) const {
168  MachineFunction *MF = MBB.getParent();
169  MachineRegisterInfo &MRI = MF->getRegInfo();
170  bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
171  Register SrcReg = RISCV::X0;
172  Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
173  unsigned Num = 0;
174
175  if (!IsRV64 && !isInt<32>(Val))
176    report_fatal_error("Should only materialize 32-bit constants for RV32");
177
178  RISCVMatInt::InstSeq Seq;
179  RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
180  assert(Seq.size() > 0);
181
182  for (RISCVMatInt::Inst &Inst : Seq) {
183    // Write the final result to DstReg if it's the last instruction in the Seq.
184    // Otherwise, write the result to the temp register.
185    if (++Num == Seq.size())
186      Result = DstReg;
187
188    if (Inst.Opc == RISCV::LUI) {
189      BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
190          .addImm(Inst.Imm)
191          .setMIFlag(Flag);
192    } else {
193      BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
194          .addReg(SrcReg, RegState::Kill)
195          .addImm(Inst.Imm)
196          .setMIFlag(Flag);
197    }
198    // Only the first instruction has X0 as its source.
199    SrcReg = Result;
200  }
201}
202
203// The contents of values added to Cond are not examined outside of
204// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
205// push BranchOpcode, Reg1, Reg2.
206static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
207                            SmallVectorImpl<MachineOperand> &Cond) {
208  // Block ends with fall-through condbranch.
209  assert(LastInst.getDesc().isConditionalBranch() &&
210         "Unknown conditional branch");
211  Target = LastInst.getOperand(2).getMBB();
212  Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
213  Cond.push_back(LastInst.getOperand(0));
214  Cond.push_back(LastInst.getOperand(1));
215}
216
217static unsigned getOppositeBranchOpcode(int Opc) {
218  switch (Opc) {
219  default:
220    llvm_unreachable("Unrecognized conditional branch");
221  case RISCV::BEQ:
222    return RISCV::BNE;
223  case RISCV::BNE:
224    return RISCV::BEQ;
225  case RISCV::BLT:
226    return RISCV::BGE;
227  case RISCV::BGE:
228    return RISCV::BLT;
229  case RISCV::BLTU:
230    return RISCV::BGEU;
231  case RISCV::BGEU:
232    return RISCV::BLTU;
233  }
234}
235
236bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
237                                   MachineBasicBlock *&TBB,
238                                   MachineBasicBlock *&FBB,
239                                   SmallVectorImpl<MachineOperand> &Cond,
240                                   bool AllowModify) const {
241  TBB = FBB = nullptr;
242  Cond.clear();
243
244  // If the block has no terminators, it just falls into the block after it.
245  MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
246  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
247    return false;
248
249  // Count the number of terminators and find the first unconditional or
250  // indirect branch.
251  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
252  int NumTerminators = 0;
253  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
254       J++) {
255    NumTerminators++;
256    if (J->getDesc().isUnconditionalBranch() ||
257        J->getDesc().isIndirectBranch()) {
258      FirstUncondOrIndirectBr = J.getReverse();
259    }
260  }
261
262  // If AllowModify is true, we can erase any terminators after
263  // FirstUncondOrIndirectBR.
264  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
265    while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
266      std::next(FirstUncondOrIndirectBr)->eraseFromParent();
267      NumTerminators--;
268    }
269    I = FirstUncondOrIndirectBr;
270  }
271
272  // We can't handle blocks that end in an indirect branch.
273  if (I->getDesc().isIndirectBranch())
274    return true;
275
276  // We can't handle blocks with more than 2 terminators.
277  if (NumTerminators > 2)
278    return true;
279
280  // Handle a single unconditional branch.
281  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
282    TBB = I->getOperand(0).getMBB();
283    return false;
284  }
285
286  // Handle a single conditional branch.
287  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
288    parseCondBranch(*I, TBB, Cond);
289    return false;
290  }
291
292  // Handle a conditional branch followed by an unconditional branch.
293  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
294      I->getDesc().isUnconditionalBranch()) {
295    parseCondBranch(*std::prev(I), TBB, Cond);
296    FBB = I->getOperand(0).getMBB();
297    return false;
298  }
299
300  // Otherwise, we can't handle this.
301  return true;
302}
303
304unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
305                                      int *BytesRemoved) const {
306  if (BytesRemoved)
307    *BytesRemoved = 0;
308  MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
309  if (I == MBB.end())
310    return 0;
311
312  if (!I->getDesc().isUnconditionalBranch() &&
313      !I->getDesc().isConditionalBranch())
314    return 0;
315
316  // Remove the branch.
317  if (BytesRemoved)
318    *BytesRemoved += getInstSizeInBytes(*I);
319  I->eraseFromParent();
320
321  I = MBB.end();
322
323  if (I == MBB.begin())
324    return 1;
325  --I;
326  if (!I->getDesc().isConditionalBranch())
327    return 1;
328
329  // Remove the branch.
330  if (BytesRemoved)
331    *BytesRemoved += getInstSizeInBytes(*I);
332  I->eraseFromParent();
333  return 2;
334}
335
336// Inserts a branch into the end of the specific MachineBasicBlock, returning
337// the number of instructions inserted.
338unsigned RISCVInstrInfo::insertBranch(
339    MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
340    ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
341  if (BytesAdded)
342    *BytesAdded = 0;
343
344  // Shouldn't be a fall through.
345  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
346  assert((Cond.size() == 3 || Cond.size() == 0) &&
347         "RISCV branch conditions have two components!");
348
349  // Unconditional branch.
350  if (Cond.empty()) {
351    MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
352    if (BytesAdded)
353      *BytesAdded += getInstSizeInBytes(MI);
354    return 1;
355  }
356
357  // Either a one or two-way conditional branch.
358  unsigned Opc = Cond[0].getImm();
359  MachineInstr &CondMI =
360      *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
361  if (BytesAdded)
362    *BytesAdded += getInstSizeInBytes(CondMI);
363
364  // One-way conditional branch.
365  if (!FBB)
366    return 1;
367
368  // Two-way conditional branch.
369  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
370  if (BytesAdded)
371    *BytesAdded += getInstSizeInBytes(MI);
372  return 2;
373}
374
375unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
376                                              MachineBasicBlock &DestBB,
377                                              const DebugLoc &DL,
378                                              int64_t BrOffset,
379                                              RegScavenger *RS) const {
380  assert(RS && "RegScavenger required for long branching");
381  assert(MBB.empty() &&
382         "new block should be inserted for expanding unconditional branch");
383  assert(MBB.pred_size() == 1);
384
385  MachineFunction *MF = MBB.getParent();
386  MachineRegisterInfo &MRI = MF->getRegInfo();
387  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
388
389  if (TM.isPositionIndependent())
390    report_fatal_error("Unable to insert indirect branch");
391
392  if (!isInt<32>(BrOffset))
393    report_fatal_error(
394        "Branch offsets outside of the signed 32-bit range not supported");
395
396  // FIXME: A virtual register must be used initially, as the register
397  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
398  // uses the same workaround).
399  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
400  auto II = MBB.end();
401
402  MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg)
403                             .addMBB(&DestBB, RISCVII::MO_HI);
404  BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND))
405      .addReg(ScratchReg, RegState::Kill)
406      .addMBB(&DestBB, RISCVII::MO_LO);
407
408  RS->enterBasicBlockEnd(MBB);
409  unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
410                                                LuiMI.getIterator(), false, 0);
411  MRI.replaceRegWith(ScratchReg, Scav);
412  MRI.clearVirtRegs();
413  RS->setRegUsed(Scav);
414  return 8;
415}
416
417bool RISCVInstrInfo::reverseBranchCondition(
418    SmallVectorImpl<MachineOperand> &Cond) const {
419  assert((Cond.size() == 3) && "Invalid branch condition!");
420  Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
421  return false;
422}
423
424MachineBasicBlock *
425RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
426  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
427  // The branch target is always the last operand.
428  int NumOp = MI.getNumExplicitOperands();
429  return MI.getOperand(NumOp - 1).getMBB();
430}
431
432bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
433                                           int64_t BrOffset) const {
434  // Ideally we could determine the supported branch offset from the
435  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
436  // PseudoBR.
437  switch (BranchOp) {
438  default:
439    llvm_unreachable("Unexpected opcode!");
440  case RISCV::BEQ:
441  case RISCV::BNE:
442  case RISCV::BLT:
443  case RISCV::BGE:
444  case RISCV::BLTU:
445  case RISCV::BGEU:
446    return isIntN(13, BrOffset);
447  case RISCV::JAL:
448  case RISCV::PseudoBR:
449    return isIntN(21, BrOffset);
450  }
451}
452
453unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
454  unsigned Opcode = MI.getOpcode();
455
456  switch (Opcode) {
457  default: {
458    if (MI.getParent() && MI.getParent()->getParent()) {
459      const auto MF = MI.getMF();
460      const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
461      const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
462      const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
463      const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
464      if (isCompressibleInst(MI, &ST, MRI, STI))
465        return 2;
466    }
467    return get(Opcode).getSize();
468  }
469  case TargetOpcode::EH_LABEL:
470  case TargetOpcode::IMPLICIT_DEF:
471  case TargetOpcode::KILL:
472  case TargetOpcode::DBG_VALUE:
473    return 0;
474  case RISCV::PseudoCALLReg:
475  case RISCV::PseudoCALL:
476  case RISCV::PseudoTAIL:
477  case RISCV::PseudoLLA:
478  case RISCV::PseudoLA:
479  case RISCV::PseudoLA_TLS_IE:
480  case RISCV::PseudoLA_TLS_GD:
481    return 8;
482  case TargetOpcode::INLINEASM:
483  case TargetOpcode::INLINEASM_BR: {
484    const MachineFunction &MF = *MI.getParent()->getParent();
485    const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
486    return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
487                              *TM.getMCAsmInfo());
488  }
489  }
490}
491
492bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
493  const unsigned Opcode = MI.getOpcode();
494  switch(Opcode) {
495    default:
496      break;
497    case RISCV::ADDI:
498    case RISCV::ORI:
499    case RISCV::XORI:
500      return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0);
501  }
502  return MI.isAsCheapAsAMove();
503}
504
505bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
506                                       StringRef &ErrInfo) const {
507  const MCInstrInfo *MCII = STI.getInstrInfo();
508  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
509
510  for (auto &OI : enumerate(Desc.operands())) {
511    unsigned OpType = OI.value().OperandType;
512    if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
513        OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
514      const MachineOperand &MO = MI.getOperand(OI.index());
515      if (MO.isImm()) {
516        int64_t Imm = MO.getImm();
517        bool Ok;
518        switch (OpType) {
519        default:
520          llvm_unreachable("Unexpected operand type");
521        case RISCVOp::OPERAND_UIMM4:
522          Ok = isUInt<4>(Imm);
523          break;
524        case RISCVOp::OPERAND_UIMM5:
525          Ok = isUInt<5>(Imm);
526          break;
527        case RISCVOp::OPERAND_UIMM12:
528          Ok = isUInt<12>(Imm);
529          break;
530        case RISCVOp::OPERAND_SIMM12:
531          Ok = isInt<12>(Imm);
532          break;
533        case RISCVOp::OPERAND_SIMM13_LSB0:
534          Ok = isShiftedInt<12, 1>(Imm);
535          break;
536        case RISCVOp::OPERAND_UIMM20:
537          Ok = isUInt<20>(Imm);
538          break;
539        case RISCVOp::OPERAND_SIMM21_LSB0:
540          Ok = isShiftedInt<20, 1>(Imm);
541          break;
542        case RISCVOp::OPERAND_UIMMLOG2XLEN:
543          if (STI.getTargetTriple().isArch64Bit())
544            Ok = isUInt<6>(Imm);
545          else
546            Ok = isUInt<5>(Imm);
547          break;
548        }
549        if (!Ok) {
550          ErrInfo = "Invalid immediate";
551          return false;
552        }
553      }
554    }
555  }
556
557  return true;
558}
559
560// Return true if get the base operand, byte offset of an instruction and the
561// memory width. Width is the size of memory that is being loaded/stored.
562bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
563    const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
564    unsigned &Width, const TargetRegisterInfo *TRI) const {
565  if (!LdSt.mayLoadOrStore())
566    return false;
567
568  // Here we assume the standard RISC-V ISA, which uses a base+offset
569  // addressing mode. You'll need to relax these conditions to support custom
570  // load/stores instructions.
571  if (LdSt.getNumExplicitOperands() != 3)
572    return false;
573  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
574    return false;
575
576  if (!LdSt.hasOneMemOperand())
577    return false;
578
579  Width = (*LdSt.memoperands_begin())->getSize();
580  BaseReg = &LdSt.getOperand(1);
581  Offset = LdSt.getOperand(2).getImm();
582  return true;
583}
584
585bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
586    const MachineInstr &MIa, const MachineInstr &MIb) const {
587  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
588  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
589
590  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
591      MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
592    return false;
593
594  // Retrieve the base register, offset from the base register and width. Width
595  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
596  // base registers are identical, and the offset of a lower memory access +
597  // the width doesn't overlap the offset of a higher memory access,
598  // then the memory accesses are different.
599  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
600  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
601  int64_t OffsetA = 0, OffsetB = 0;
602  unsigned int WidthA = 0, WidthB = 0;
603  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
604      getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
605    if (BaseOpA->isIdenticalTo(*BaseOpB)) {
606      int LowOffset = std::min(OffsetA, OffsetB);
607      int HighOffset = std::max(OffsetA, OffsetB);
608      int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
609      if (LowOffset + LowWidth <= HighOffset)
610        return true;
611    }
612  }
613  return false;
614}
615
616std::pair<unsigned, unsigned>
617RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
618  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
619  return std::make_pair(TF & Mask, TF & ~Mask);
620}
621
622ArrayRef<std::pair<unsigned, const char *>>
623RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
624  using namespace RISCVII;
625  static const std::pair<unsigned, const char *> TargetFlags[] = {
626      {MO_CALL, "riscv-call"},
627      {MO_PLT, "riscv-plt"},
628      {MO_LO, "riscv-lo"},
629      {MO_HI, "riscv-hi"},
630      {MO_PCREL_LO, "riscv-pcrel-lo"},
631      {MO_PCREL_HI, "riscv-pcrel-hi"},
632      {MO_GOT_HI, "riscv-got-hi"},
633      {MO_TPREL_LO, "riscv-tprel-lo"},
634      {MO_TPREL_HI, "riscv-tprel-hi"},
635      {MO_TPREL_ADD, "riscv-tprel-add"},
636      {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
637      {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
638  return makeArrayRef(TargetFlags);
639}
640bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
641    MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
642  const Function &F = MF.getFunction();
643
644  // Can F be deduplicated by the linker? If it can, don't outline from it.
645  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
646    return false;
647
648  // Don't outline from functions with section markings; the program could
649  // expect that all the code is in the named section.
650  if (F.hasSection())
651    return false;
652
653  // It's safe to outline from MF.
654  return true;
655}
656
657bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
658                                            unsigned &Flags) const {
659  // More accurate safety checking is done in getOutliningCandidateInfo.
660  return true;
661}
662
663// Enum values indicating how an outlined call should be constructed.
664enum MachineOutlinerConstructionID {
665  MachineOutlinerDefault
666};
667
668outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
669    std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
670
671  // First we need to filter out candidates where the X5 register (IE t0) can't
672  // be used to setup the function call.
673  auto CannotInsertCall = [](outliner::Candidate &C) {
674    const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
675
676    C.initLRU(*TRI);
677    LiveRegUnits LRU = C.LRU;
678    return !LRU.available(RISCV::X5);
679  };
680
681  RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
682                                            RepeatedSequenceLocs.end(),
683                                            CannotInsertCall),
684                             RepeatedSequenceLocs.end());
685
686  // If the sequence doesn't have enough candidates left, then we're done.
687  if (RepeatedSequenceLocs.size() < 2)
688    return outliner::OutlinedFunction();
689
690  unsigned SequenceSize = 0;
691
692  auto I = RepeatedSequenceLocs[0].front();
693  auto E = std::next(RepeatedSequenceLocs[0].back());
694  for (; I != E; ++I)
695    SequenceSize += getInstSizeInBytes(*I);
696
697  // call t0, function = 8 bytes.
698  unsigned CallOverhead = 8;
699  for (auto &C : RepeatedSequenceLocs)
700    C.setCallInfo(MachineOutlinerDefault, CallOverhead);
701
702  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
703  unsigned FrameOverhead = 4;
704  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
705          .getFeatureBits()[RISCV::FeatureStdExtC])
706    FrameOverhead = 2;
707
708  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
709                                    FrameOverhead, MachineOutlinerDefault);
710}
711
712outliner::InstrType
713RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
714                                 unsigned Flags) const {
715  MachineInstr &MI = *MBBI;
716  MachineBasicBlock *MBB = MI.getParent();
717  const TargetRegisterInfo *TRI =
718      MBB->getParent()->getSubtarget().getRegisterInfo();
719
720  // Positions generally can't safely be outlined.
721  if (MI.isPosition()) {
722    // We can manually strip out CFI instructions later.
723    if (MI.isCFIInstruction())
724      return outliner::InstrType::Invisible;
725
726    return outliner::InstrType::Illegal;
727  }
728
729  // Don't trust the user to write safe inline assembly.
730  if (MI.isInlineAsm())
731    return outliner::InstrType::Illegal;
732
733  // We can't outline branches to other basic blocks.
734  if (MI.isTerminator() && !MBB->succ_empty())
735    return outliner::InstrType::Illegal;
736
737  // We need support for tail calls to outlined functions before return
738  // statements can be allowed.
739  if (MI.isReturn())
740    return outliner::InstrType::Illegal;
741
742  // Don't allow modifying the X5 register which we use for return addresses for
743  // these outlined functions.
744  if (MI.modifiesRegister(RISCV::X5, TRI) ||
745      MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
746    return outliner::InstrType::Illegal;
747
748  // Make sure the operands don't reference something unsafe.
749  for (const auto &MO : MI.operands())
750    if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
751      return outliner::InstrType::Illegal;
752
753  // Don't allow instructions which won't be materialized to impact outlining
754  // analysis.
755  if (MI.isMetaInstruction())
756    return outliner::InstrType::Invisible;
757
758  return outliner::InstrType::Legal;
759}
760
761void RISCVInstrInfo::buildOutlinedFrame(
762    MachineBasicBlock &MBB, MachineFunction &MF,
763    const outliner::OutlinedFunction &OF) const {
764
765  // Strip out any CFI instructions
766  bool Changed = true;
767  while (Changed) {
768    Changed = false;
769    auto I = MBB.begin();
770    auto E = MBB.end();
771    for (; I != E; ++I) {
772      if (I->isCFIInstruction()) {
773        I->removeFromParent();
774        Changed = true;
775        break;
776      }
777    }
778  }
779
780  // Add in a return instruction to the end of the outlined frame.
781  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
782      .addReg(RISCV::X0, RegState::Define)
783      .addReg(RISCV::X5)
784      .addImm(0));
785}
786
787MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
788    Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
789    MachineFunction &MF, const outliner::Candidate &C) const {
790
791  // Add in a call instruction to the outlined function at the given location.
792  It = MBB.insert(It,
793                  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
794                      .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
795                                        RISCVII::MO_CALL));
796  return It;
797}
798