1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallBitVector.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/MemoryLocation.h"
21#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineInstrBundle.h"
26#include "llvm/CodeGen/MachineMemOperand.h"
27#include "llvm/CodeGen/MachineModuleInfo.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/CodeGen/StackMaps.h"
32#include "llvm/CodeGen/TargetInstrInfo.h"
33#include "llvm/CodeGen/TargetRegisterInfo.h"
34#include "llvm/CodeGen/TargetSubtargetInfo.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugInfoMetadata.h"
37#include "llvm/IR/DebugLoc.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/Metadata.h"
42#include "llvm/IR/Module.h"
43#include "llvm/IR/ModuleSlotTracker.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/MC/MCInstrDesc.h"
46#include "llvm/MC/MCRegisterInfo.h"
47#include "llvm/Support/Casting.h"
48#include "llvm/Support/Compiler.h"
49#include "llvm/Support/Debug.h"
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/FormattedStream.h"
52#include "llvm/Support/LowLevelTypeImpl.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetMachine.h"
55#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <cstring>
59#include <utility>
60
61using namespace llvm;
62
63static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
64  if (const MachineBasicBlock *MBB = MI.getParent())
65    if (const MachineFunction *MF = MBB->getParent())
66      return MF;
67  return nullptr;
68}
69
70// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
71// it.
72static void tryToGetTargetInfo(const MachineInstr &MI,
73                               const TargetRegisterInfo *&TRI,
74                               const MachineRegisterInfo *&MRI,
75                               const TargetIntrinsicInfo *&IntrinsicInfo,
76                               const TargetInstrInfo *&TII) {
77
78  if (const MachineFunction *MF = getMFIfAvailable(MI)) {
79    TRI = MF->getSubtarget().getRegisterInfo();
80    MRI = &MF->getRegInfo();
81    IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
82    TII = MF->getSubtarget().getInstrInfo();
83  }
84}
85
86void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
87  for (MCPhysReg ImpDef : MCID->implicit_defs())
88    addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
89  for (MCPhysReg ImpUse : MCID->implicit_uses())
90    addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
91}
92
93/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
94/// implicit operands. It reserves space for the number of operands specified by
95/// the MCInstrDesc.
96MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
97                           DebugLoc DL, bool NoImp)
98    : MCID(&TID), DbgLoc(std::move(DL)), DebugInstrNum(0) {
99  assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
100
101  // Reserve space for the expected number of operands.
102  if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
103                        MCID->implicit_uses().size()) {
104    CapOperands = OperandCapacity::get(NumOps);
105    Operands = MF.allocateOperandArray(CapOperands);
106  }
107
108  if (!NoImp)
109    addImplicitDefUseOperands(MF);
110}
111
112/// MachineInstr ctor - Copies MachineInstr arg exactly.
113/// Does not copy the number from debug instruction numbering, to preserve
114/// uniqueness.
115MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
116    : MCID(&MI.getDesc()), Info(MI.Info), DbgLoc(MI.getDebugLoc()),
117      DebugInstrNum(0) {
118  assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
119
120  CapOperands = OperandCapacity::get(MI.getNumOperands());
121  Operands = MF.allocateOperandArray(CapOperands);
122
123  // Copy operands.
124  for (const MachineOperand &MO : MI.operands())
125    addOperand(MF, MO);
126
127  // Replicate ties between the operands, which addOperand was not
128  // able to do reliably.
129  for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
130    MachineOperand &NewMO = getOperand(i);
131    const MachineOperand &OrigMO = MI.getOperand(i);
132    NewMO.TiedTo = OrigMO.TiedTo;
133  }
134
135  // Copy all the sensible flags.
136  setFlags(MI.Flags);
137}
138
139void MachineInstr::moveBefore(MachineInstr *MovePos) {
140  MovePos->getParent()->splice(MovePos, getParent(), getIterator());
141}
142
143/// getRegInfo - If this instruction is embedded into a MachineFunction,
144/// return the MachineRegisterInfo object for the current function, otherwise
145/// return null.
146MachineRegisterInfo *MachineInstr::getRegInfo() {
147  if (MachineBasicBlock *MBB = getParent())
148    return &MBB->getParent()->getRegInfo();
149  return nullptr;
150}
151
152void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
153  for (MachineOperand &MO : operands())
154    if (MO.isReg())
155      MRI.removeRegOperandFromUseList(&MO);
156}
157
158void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
159  for (MachineOperand &MO : operands())
160    if (MO.isReg())
161      MRI.addRegOperandToUseList(&MO);
162}
163
164void MachineInstr::addOperand(const MachineOperand &Op) {
165  MachineBasicBlock *MBB = getParent();
166  assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
167  MachineFunction *MF = MBB->getParent();
168  assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
169  addOperand(*MF, Op);
170}
171
172/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
173/// ranges. If MRI is non-null also update use-def chains.
174static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
175                         unsigned NumOps, MachineRegisterInfo *MRI) {
176  if (MRI)
177    return MRI->moveOperands(Dst, Src, NumOps);
178  // MachineOperand is a trivially copyable type so we can just use memmove.
179  assert(Dst && Src && "Unknown operands");
180  std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
181}
182
183/// addOperand - Add the specified operand to the instruction.  If it is an
184/// implicit operand, it is added to the end of the operand list.  If it is
185/// an explicit operand it is added at the end of the explicit operand list
186/// (before the first implicit operand).
187void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
188  assert(MCID && "Cannot add operands before providing an instr descriptor");
189
190  // Check if we're adding one of our existing operands.
191  if (&Op >= Operands && &Op < Operands + NumOperands) {
192    // This is unusual: MI->addOperand(MI->getOperand(i)).
193    // If adding Op requires reallocating or moving existing operands around,
194    // the Op reference could go stale. Support it by copying Op.
195    MachineOperand CopyOp(Op);
196    return addOperand(MF, CopyOp);
197  }
198
199  // Find the insert location for the new operand.  Implicit registers go at
200  // the end, everything else goes before the implicit regs.
201  //
202  // FIXME: Allow mixed explicit and implicit operands on inline asm.
203  // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
204  // implicit-defs, but they must not be moved around.  See the FIXME in
205  // InstrEmitter.cpp.
206  unsigned OpNo = getNumOperands();
207  bool isImpReg = Op.isReg() && Op.isImplicit();
208  if (!isImpReg && !isInlineAsm()) {
209    while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
210      --OpNo;
211      assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
212    }
213  }
214
215  // OpNo now points as the desired insertion point.  Unless this is a variadic
216  // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
217  // RegMask operands go between the explicit and implicit operands.
218  assert((MCID->isVariadic() || OpNo < MCID->getNumOperands() ||
219          Op.isValidExcessOperand()) &&
220         "Trying to add an operand to a machine instr that is already done!");
221
222  MachineRegisterInfo *MRI = getRegInfo();
223
224  // Determine if the Operands array needs to be reallocated.
225  // Save the old capacity and operand array.
226  OperandCapacity OldCap = CapOperands;
227  MachineOperand *OldOperands = Operands;
228  if (!OldOperands || OldCap.getSize() == getNumOperands()) {
229    CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
230    Operands = MF.allocateOperandArray(CapOperands);
231    // Move the operands before the insertion point.
232    if (OpNo)
233      moveOperands(Operands, OldOperands, OpNo, MRI);
234  }
235
236  // Move the operands following the insertion point.
237  if (OpNo != NumOperands)
238    moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
239                 MRI);
240  ++NumOperands;
241
242  // Deallocate the old operand array.
243  if (OldOperands != Operands && OldOperands)
244    MF.deallocateOperandArray(OldCap, OldOperands);
245
246  // Copy Op into place. It still needs to be inserted into the MRI use lists.
247  MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
248  NewMO->ParentMI = this;
249
250  // When adding a register operand, tell MRI about it.
251  if (NewMO->isReg()) {
252    // Ensure isOnRegUseList() returns false, regardless of Op's status.
253    NewMO->Contents.Reg.Prev = nullptr;
254    // Ignore existing ties. This is not a property that can be copied.
255    NewMO->TiedTo = 0;
256    // Add the new operand to MRI, but only for instructions in an MBB.
257    if (MRI)
258      MRI->addRegOperandToUseList(NewMO);
259    // The MCID operand information isn't accurate until we start adding
260    // explicit operands. The implicit operands are added first, then the
261    // explicits are inserted before them.
262    if (!isImpReg) {
263      // Tie uses to defs as indicated in MCInstrDesc.
264      if (NewMO->isUse()) {
265        int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
266        if (DefIdx != -1)
267          tieOperands(DefIdx, OpNo);
268      }
269      // If the register operand is flagged as early, mark the operand as such.
270      if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
271        NewMO->setIsEarlyClobber(true);
272    }
273    // Ensure debug instructions set debug flag on register uses.
274    if (NewMO->isUse() && isDebugInstr())
275      NewMO->setIsDebug();
276  }
277}
278
279void MachineInstr::removeOperand(unsigned OpNo) {
280  assert(OpNo < getNumOperands() && "Invalid operand number");
281  untieRegOperand(OpNo);
282
283#ifndef NDEBUG
284  // Moving tied operands would break the ties.
285  for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
286    if (Operands[i].isReg())
287      assert(!Operands[i].isTied() && "Cannot move tied operands");
288#endif
289
290  MachineRegisterInfo *MRI = getRegInfo();
291  if (MRI && Operands[OpNo].isReg())
292    MRI->removeRegOperandFromUseList(Operands + OpNo);
293
294  // Don't call the MachineOperand destructor. A lot of this code depends on
295  // MachineOperand having a trivial destructor anyway, and adding a call here
296  // wouldn't make it 'destructor-correct'.
297
298  if (unsigned N = NumOperands - 1 - OpNo)
299    moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
300  --NumOperands;
301}
302
303void MachineInstr::setExtraInfo(MachineFunction &MF,
304                                ArrayRef<MachineMemOperand *> MMOs,
305                                MCSymbol *PreInstrSymbol,
306                                MCSymbol *PostInstrSymbol,
307                                MDNode *HeapAllocMarker, MDNode *PCSections,
308                                uint32_t CFIType) {
309  bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
310  bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
311  bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
312  bool HasPCSections = PCSections != nullptr;
313  bool HasCFIType = CFIType != 0;
314  int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
315                    HasHeapAllocMarker + HasPCSections + HasCFIType;
316
317  // Drop all extra info if there is none.
318  if (NumPointers <= 0) {
319    Info.clear();
320    return;
321  }
322
323  // If more than one pointer, then store out of line. Store heap alloc markers
324  // out of line because PointerSumType cannot hold more than 4 tag types with
325  // 32-bit pointers.
326  // FIXME: Maybe we should make the symbols in the extra info mutable?
327  else if (NumPointers > 1 || HasHeapAllocMarker || HasPCSections ||
328           HasCFIType) {
329    Info.set<EIIK_OutOfLine>(
330        MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
331                             HeapAllocMarker, PCSections, CFIType));
332    return;
333  }
334
335  // Otherwise store the single pointer inline.
336  if (HasPreInstrSymbol)
337    Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
338  else if (HasPostInstrSymbol)
339    Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
340  else
341    Info.set<EIIK_MMO>(MMOs[0]);
342}
343
344void MachineInstr::dropMemRefs(MachineFunction &MF) {
345  if (memoperands_empty())
346    return;
347
348  setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
349               getHeapAllocMarker(), getPCSections(), getCFIType());
350}
351
352void MachineInstr::setMemRefs(MachineFunction &MF,
353                              ArrayRef<MachineMemOperand *> MMOs) {
354  if (MMOs.empty()) {
355    dropMemRefs(MF);
356    return;
357  }
358
359  setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
360               getHeapAllocMarker(), getPCSections(), getCFIType());
361}
362
363void MachineInstr::addMemOperand(MachineFunction &MF,
364                                 MachineMemOperand *MO) {
365  SmallVector<MachineMemOperand *, 2> MMOs;
366  MMOs.append(memoperands_begin(), memoperands_end());
367  MMOs.push_back(MO);
368  setMemRefs(MF, MMOs);
369}
370
371void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
372  if (this == &MI)
373    // Nothing to do for a self-clone!
374    return;
375
376  assert(&MF == MI.getMF() &&
377         "Invalid machine functions when cloning memory refrences!");
378  // See if we can just steal the extra info already allocated for the
379  // instruction. We can do this whenever the pre- and post-instruction symbols
380  // are the same (including null).
381  if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
382      getPostInstrSymbol() == MI.getPostInstrSymbol() &&
383      getHeapAllocMarker() == MI.getHeapAllocMarker() &&
384      getPCSections() == MI.getPCSections()) {
385    Info = MI.Info;
386    return;
387  }
388
389  // Otherwise, fall back on a copy-based clone.
390  setMemRefs(MF, MI.memoperands());
391}
392
393/// Check to see if the MMOs pointed to by the two MemRefs arrays are
394/// identical.
395static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
396                             ArrayRef<MachineMemOperand *> RHS) {
397  if (LHS.size() != RHS.size())
398    return false;
399
400  auto LHSPointees = make_pointee_range(LHS);
401  auto RHSPointees = make_pointee_range(RHS);
402  return std::equal(LHSPointees.begin(), LHSPointees.end(),
403                    RHSPointees.begin());
404}
405
406void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
407                                      ArrayRef<const MachineInstr *> MIs) {
408  // Try handling easy numbers of MIs with simpler mechanisms.
409  if (MIs.empty()) {
410    dropMemRefs(MF);
411    return;
412  }
413  if (MIs.size() == 1) {
414    cloneMemRefs(MF, *MIs[0]);
415    return;
416  }
417  // Because an empty memoperands list provides *no* information and must be
418  // handled conservatively (assuming the instruction can do anything), the only
419  // way to merge with it is to drop all other memoperands.
420  if (MIs[0]->memoperands_empty()) {
421    dropMemRefs(MF);
422    return;
423  }
424
425  // Handle the general case.
426  SmallVector<MachineMemOperand *, 2> MergedMMOs;
427  // Start with the first instruction.
428  assert(&MF == MIs[0]->getMF() &&
429         "Invalid machine functions when cloning memory references!");
430  MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
431  // Now walk all the other instructions and accumulate any different MMOs.
432  for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
433    assert(&MF == MI.getMF() &&
434           "Invalid machine functions when cloning memory references!");
435
436    // Skip MIs with identical operands to the first. This is a somewhat
437    // arbitrary hack but will catch common cases without being quadratic.
438    // TODO: We could fully implement merge semantics here if needed.
439    if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
440      continue;
441
442    // Because an empty memoperands list provides *no* information and must be
443    // handled conservatively (assuming the instruction can do anything), the
444    // only way to merge with it is to drop all other memoperands.
445    if (MI.memoperands_empty()) {
446      dropMemRefs(MF);
447      return;
448    }
449
450    // Otherwise accumulate these into our temporary buffer of the merged state.
451    MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
452  }
453
454  setMemRefs(MF, MergedMMOs);
455}
456
457void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
458  // Do nothing if old and new symbols are the same.
459  if (Symbol == getPreInstrSymbol())
460    return;
461
462  // If there was only one symbol and we're removing it, just clear info.
463  if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
464    Info.clear();
465    return;
466  }
467
468  setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
469               getHeapAllocMarker(), getPCSections(), getCFIType());
470}
471
472void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
473  // Do nothing if old and new symbols are the same.
474  if (Symbol == getPostInstrSymbol())
475    return;
476
477  // If there was only one symbol and we're removing it, just clear info.
478  if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
479    Info.clear();
480    return;
481  }
482
483  setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
484               getHeapAllocMarker(), getPCSections(), getCFIType());
485}
486
487void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
488  // Do nothing if old and new symbols are the same.
489  if (Marker == getHeapAllocMarker())
490    return;
491
492  setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
493               Marker, getPCSections(), getCFIType());
494}
495
496void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) {
497  // Do nothing if old and new symbols are the same.
498  if (PCSections == getPCSections())
499    return;
500
501  setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
502               getHeapAllocMarker(), PCSections, getCFIType());
503}
504
505void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) {
506  // Do nothing if old and new types are the same.
507  if (Type == getCFIType())
508    return;
509
510  setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
511               getHeapAllocMarker(), getPCSections(), Type);
512}
513
514void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
515                                     const MachineInstr &MI) {
516  if (this == &MI)
517    // Nothing to do for a self-clone!
518    return;
519
520  assert(&MF == MI.getMF() &&
521         "Invalid machine functions when cloning instruction symbols!");
522
523  setPreInstrSymbol(MF, MI.getPreInstrSymbol());
524  setPostInstrSymbol(MF, MI.getPostInstrSymbol());
525  setHeapAllocMarker(MF, MI.getHeapAllocMarker());
526  setPCSections(MF, MI.getPCSections());
527}
528
529uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
530  // For now, the just return the union of the flags. If the flags get more
531  // complicated over time, we might need more logic here.
532  return getFlags() | Other.getFlags();
533}
534
535uint16_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
536  uint16_t MIFlags = 0;
537  // Copy the wrapping flags.
538  if (const OverflowingBinaryOperator *OB =
539          dyn_cast<OverflowingBinaryOperator>(&I)) {
540    if (OB->hasNoSignedWrap())
541      MIFlags |= MachineInstr::MIFlag::NoSWrap;
542    if (OB->hasNoUnsignedWrap())
543      MIFlags |= MachineInstr::MIFlag::NoUWrap;
544  }
545
546  // Copy the exact flag.
547  if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
548    if (PE->isExact())
549      MIFlags |= MachineInstr::MIFlag::IsExact;
550
551  // Copy the fast-math flags.
552  if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
553    const FastMathFlags Flags = FP->getFastMathFlags();
554    if (Flags.noNaNs())
555      MIFlags |= MachineInstr::MIFlag::FmNoNans;
556    if (Flags.noInfs())
557      MIFlags |= MachineInstr::MIFlag::FmNoInfs;
558    if (Flags.noSignedZeros())
559      MIFlags |= MachineInstr::MIFlag::FmNsz;
560    if (Flags.allowReciprocal())
561      MIFlags |= MachineInstr::MIFlag::FmArcp;
562    if (Flags.allowContract())
563      MIFlags |= MachineInstr::MIFlag::FmContract;
564    if (Flags.approxFunc())
565      MIFlags |= MachineInstr::MIFlag::FmAfn;
566    if (Flags.allowReassoc())
567      MIFlags |= MachineInstr::MIFlag::FmReassoc;
568  }
569
570  return MIFlags;
571}
572
573void MachineInstr::copyIRFlags(const Instruction &I) {
574  Flags = copyFlagsFromInstruction(I);
575}
576
577bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
578  assert(!isBundledWithPred() && "Must be called on bundle header");
579  for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
580    if (MII->getDesc().getFlags() & Mask) {
581      if (Type == AnyInBundle)
582        return true;
583    } else {
584      if (Type == AllInBundle && !MII->isBundle())
585        return false;
586    }
587    // This was the last instruction in the bundle.
588    if (!MII->isBundledWithSucc())
589      return Type == AllInBundle;
590  }
591}
592
593bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
594                                 MICheckType Check) const {
595  // If opcodes or number of operands are not the same then the two
596  // instructions are obviously not identical.
597  if (Other.getOpcode() != getOpcode() ||
598      Other.getNumOperands() != getNumOperands())
599    return false;
600
601  if (isBundle()) {
602    // We have passed the test above that both instructions have the same
603    // opcode, so we know that both instructions are bundles here. Let's compare
604    // MIs inside the bundle.
605    assert(Other.isBundle() && "Expected that both instructions are bundles.");
606    MachineBasicBlock::const_instr_iterator I1 = getIterator();
607    MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
608    // Loop until we analysed the last intruction inside at least one of the
609    // bundles.
610    while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
611      ++I1;
612      ++I2;
613      if (!I1->isIdenticalTo(*I2, Check))
614        return false;
615    }
616    // If we've reached the end of just one of the two bundles, but not both,
617    // the instructions are not identical.
618    if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
619      return false;
620  }
621
622  // Check operands to make sure they match.
623  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
624    const MachineOperand &MO = getOperand(i);
625    const MachineOperand &OMO = Other.getOperand(i);
626    if (!MO.isReg()) {
627      if (!MO.isIdenticalTo(OMO))
628        return false;
629      continue;
630    }
631
632    // Clients may or may not want to ignore defs when testing for equality.
633    // For example, machine CSE pass only cares about finding common
634    // subexpressions, so it's safe to ignore virtual register defs.
635    if (MO.isDef()) {
636      if (Check == IgnoreDefs)
637        continue;
638      else if (Check == IgnoreVRegDefs) {
639        if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
640          if (!MO.isIdenticalTo(OMO))
641            return false;
642      } else {
643        if (!MO.isIdenticalTo(OMO))
644          return false;
645        if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
646          return false;
647      }
648    } else {
649      if (!MO.isIdenticalTo(OMO))
650        return false;
651      if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
652        return false;
653    }
654  }
655  // If DebugLoc does not match then two debug instructions are not identical.
656  if (isDebugInstr())
657    if (getDebugLoc() && Other.getDebugLoc() &&
658        getDebugLoc() != Other.getDebugLoc())
659      return false;
660  // If pre- or post-instruction symbols do not match then the two instructions
661  // are not identical.
662  if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
663      getPostInstrSymbol() != Other.getPostInstrSymbol())
664    return false;
665  // Call instructions with different CFI types are not identical.
666  if (isCall() && getCFIType() != Other.getCFIType())
667    return false;
668
669  return true;
670}
671
672bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
673  if (!isDebugValueLike() || !Other.isDebugValueLike())
674    return false;
675  if (getDebugLoc() != Other.getDebugLoc())
676    return false;
677  if (getDebugVariable() != Other.getDebugVariable())
678    return false;
679  if (getNumDebugOperands() != Other.getNumDebugOperands())
680    return false;
681  for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
682    if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
683      return false;
684  if (!DIExpression::isEqualExpression(
685          getDebugExpression(), isIndirectDebugValue(),
686          Other.getDebugExpression(), Other.isIndirectDebugValue()))
687    return false;
688  return true;
689}
690
691const MachineFunction *MachineInstr::getMF() const {
692  return getParent()->getParent();
693}
694
695MachineInstr *MachineInstr::removeFromParent() {
696  assert(getParent() && "Not embedded in a basic block!");
697  return getParent()->remove(this);
698}
699
700MachineInstr *MachineInstr::removeFromBundle() {
701  assert(getParent() && "Not embedded in a basic block!");
702  return getParent()->remove_instr(this);
703}
704
705void MachineInstr::eraseFromParent() {
706  assert(getParent() && "Not embedded in a basic block!");
707  getParent()->erase(this);
708}
709
710void MachineInstr::eraseFromBundle() {
711  assert(getParent() && "Not embedded in a basic block!");
712  getParent()->erase_instr(this);
713}
714
715bool MachineInstr::isCandidateForCallSiteEntry(QueryType Type) const {
716  if (!isCall(Type))
717    return false;
718  switch (getOpcode()) {
719  case TargetOpcode::PATCHPOINT:
720  case TargetOpcode::STACKMAP:
721  case TargetOpcode::STATEPOINT:
722  case TargetOpcode::FENTRY_CALL:
723    return false;
724  }
725  return true;
726}
727
728bool MachineInstr::shouldUpdateCallSiteInfo() const {
729  if (isBundle())
730    return isCandidateForCallSiteEntry(MachineInstr::AnyInBundle);
731  return isCandidateForCallSiteEntry();
732}
733
734unsigned MachineInstr::getNumExplicitOperands() const {
735  unsigned NumOperands = MCID->getNumOperands();
736  if (!MCID->isVariadic())
737    return NumOperands;
738
739  for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
740    const MachineOperand &MO = getOperand(I);
741    // The operands must always be in the following order:
742    // - explicit reg defs,
743    // - other explicit operands (reg uses, immediates, etc.),
744    // - implicit reg defs
745    // - implicit reg uses
746    if (MO.isReg() && MO.isImplicit())
747      break;
748    ++NumOperands;
749  }
750  return NumOperands;
751}
752
753unsigned MachineInstr::getNumExplicitDefs() const {
754  unsigned NumDefs = MCID->getNumDefs();
755  if (!MCID->isVariadic())
756    return NumDefs;
757
758  for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
759    const MachineOperand &MO = getOperand(I);
760    if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
761      break;
762    ++NumDefs;
763  }
764  return NumDefs;
765}
766
767void MachineInstr::bundleWithPred() {
768  assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
769  setFlag(BundledPred);
770  MachineBasicBlock::instr_iterator Pred = getIterator();
771  --Pred;
772  assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
773  Pred->setFlag(BundledSucc);
774}
775
776void MachineInstr::bundleWithSucc() {
777  assert(!isBundledWithSucc() && "MI is already bundled with its successor");
778  setFlag(BundledSucc);
779  MachineBasicBlock::instr_iterator Succ = getIterator();
780  ++Succ;
781  assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
782  Succ->setFlag(BundledPred);
783}
784
785void MachineInstr::unbundleFromPred() {
786  assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
787  clearFlag(BundledPred);
788  MachineBasicBlock::instr_iterator Pred = getIterator();
789  --Pred;
790  assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
791  Pred->clearFlag(BundledSucc);
792}
793
794void MachineInstr::unbundleFromSucc() {
795  assert(isBundledWithSucc() && "MI isn't bundled with its successor");
796  clearFlag(BundledSucc);
797  MachineBasicBlock::instr_iterator Succ = getIterator();
798  ++Succ;
799  assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
800  Succ->clearFlag(BundledPred);
801}
802
803bool MachineInstr::isStackAligningInlineAsm() const {
804  if (isInlineAsm()) {
805    unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
806    if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
807      return true;
808  }
809  return false;
810}
811
812InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
813  assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
814  unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
815  return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
816}
817
818int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
819                                       unsigned *GroupNo) const {
820  assert(isInlineAsm() && "Expected an inline asm instruction");
821  assert(OpIdx < getNumOperands() && "OpIdx out of range");
822
823  // Ignore queries about the initial operands.
824  if (OpIdx < InlineAsm::MIOp_FirstOperand)
825    return -1;
826
827  unsigned Group = 0;
828  unsigned NumOps;
829  for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
830       i += NumOps) {
831    const MachineOperand &FlagMO = getOperand(i);
832    // If we reach the implicit register operands, stop looking.
833    if (!FlagMO.isImm())
834      return -1;
835    NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
836    if (i + NumOps > OpIdx) {
837      if (GroupNo)
838        *GroupNo = Group;
839      return i;
840    }
841    ++Group;
842  }
843  return -1;
844}
845
846const DILabel *MachineInstr::getDebugLabel() const {
847  assert(isDebugLabel() && "not a DBG_LABEL");
848  return cast<DILabel>(getOperand(0).getMetadata());
849}
850
851const MachineOperand &MachineInstr::getDebugVariableOp() const {
852  assert((isDebugValueLike()) && "not a DBG_VALUE*");
853  unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
854  return getOperand(VariableOp);
855}
856
857MachineOperand &MachineInstr::getDebugVariableOp() {
858  assert((isDebugValueLike()) && "not a DBG_VALUE*");
859  unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
860  return getOperand(VariableOp);
861}
862
863const DILocalVariable *MachineInstr::getDebugVariable() const {
864  return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
865}
866
867const MachineOperand &MachineInstr::getDebugExpressionOp() const {
868  assert((isDebugValueLike()) && "not a DBG_VALUE*");
869  unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
870  return getOperand(ExpressionOp);
871}
872
873MachineOperand &MachineInstr::getDebugExpressionOp() {
874  assert((isDebugValueLike()) && "not a DBG_VALUE*");
875  unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
876  return getOperand(ExpressionOp);
877}
878
879const DIExpression *MachineInstr::getDebugExpression() const {
880  return cast<DIExpression>(getDebugExpressionOp().getMetadata());
881}
882
883bool MachineInstr::isDebugEntryValue() const {
884  return isDebugValue() && getDebugExpression()->isEntryValue();
885}
886
887const TargetRegisterClass*
888MachineInstr::getRegClassConstraint(unsigned OpIdx,
889                                    const TargetInstrInfo *TII,
890                                    const TargetRegisterInfo *TRI) const {
891  assert(getParent() && "Can't have an MBB reference here!");
892  assert(getMF() && "Can't have an MF reference here!");
893  const MachineFunction &MF = *getMF();
894
895  // Most opcodes have fixed constraints in their MCInstrDesc.
896  if (!isInlineAsm())
897    return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
898
899  if (!getOperand(OpIdx).isReg())
900    return nullptr;
901
902  // For tied uses on inline asm, get the constraint from the def.
903  unsigned DefIdx;
904  if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
905    OpIdx = DefIdx;
906
907  // Inline asm stores register class constraints in the flag word.
908  int FlagIdx = findInlineAsmFlagIdx(OpIdx);
909  if (FlagIdx < 0)
910    return nullptr;
911
912  unsigned Flag = getOperand(FlagIdx).getImm();
913  unsigned RCID;
914  if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
915       InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
916       InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
917      InlineAsm::hasRegClassConstraint(Flag, RCID))
918    return TRI->getRegClass(RCID);
919
920  // Assume that all registers in a memory operand are pointers.
921  if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
922    return TRI->getPointerRegClass(MF);
923
924  return nullptr;
925}
926
927const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
928    Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
929    const TargetRegisterInfo *TRI, bool ExploreBundle) const {
930  // Check every operands inside the bundle if we have
931  // been asked to.
932  if (ExploreBundle)
933    for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
934         ++OpndIt)
935      CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
936          OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
937  else
938    // Otherwise, just check the current operands.
939    for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
940      CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
941  return CurRC;
942}
943
944const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
945    unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
946    const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
947  assert(CurRC && "Invalid initial register class");
948  // Check if Reg is constrained by some of its use/def from MI.
949  const MachineOperand &MO = getOperand(OpIdx);
950  if (!MO.isReg() || MO.getReg() != Reg)
951    return CurRC;
952  // If yes, accumulate the constraints through the operand.
953  return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
954}
955
956const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
957    unsigned OpIdx, const TargetRegisterClass *CurRC,
958    const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
959  const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
960  const MachineOperand &MO = getOperand(OpIdx);
961  assert(MO.isReg() &&
962         "Cannot get register constraints for non-register operand");
963  assert(CurRC && "Invalid initial register class");
964  if (unsigned SubIdx = MO.getSubReg()) {
965    if (OpRC)
966      CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
967    else
968      CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
969  } else if (OpRC)
970    CurRC = TRI->getCommonSubClass(CurRC, OpRC);
971  return CurRC;
972}
973
974/// Return the number of instructions inside the MI bundle, not counting the
975/// header instruction.
976unsigned MachineInstr::getBundleSize() const {
977  MachineBasicBlock::const_instr_iterator I = getIterator();
978  unsigned Size = 0;
979  while (I->isBundledWithSucc()) {
980    ++Size;
981    ++I;
982  }
983  return Size;
984}
985
986/// Returns true if the MachineInstr has an implicit-use operand of exactly
987/// the given register (not considering sub/super-registers).
988bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
989  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
990    const MachineOperand &MO = getOperand(i);
991    if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
992      return true;
993  }
994  return false;
995}
996
997/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
998/// the specific register or -1 if it is not found. It further tightens
999/// the search criteria to a use that kills the register if isKill is true.
1000int MachineInstr::findRegisterUseOperandIdx(
1001    Register Reg, bool isKill, const TargetRegisterInfo *TRI) const {
1002  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1003    const MachineOperand &MO = getOperand(i);
1004    if (!MO.isReg() || !MO.isUse())
1005      continue;
1006    Register MOReg = MO.getReg();
1007    if (!MOReg)
1008      continue;
1009    if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1010      if (!isKill || MO.isKill())
1011        return i;
1012  }
1013  return -1;
1014}
1015
1016/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1017/// indicating if this instruction reads or writes Reg. This also considers
1018/// partial defines.
1019std::pair<bool,bool>
1020MachineInstr::readsWritesVirtualRegister(Register Reg,
1021                                         SmallVectorImpl<unsigned> *Ops) const {
1022  bool PartDef = false; // Partial redefine.
1023  bool FullDef = false; // Full define.
1024  bool Use = false;
1025
1026  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1027    const MachineOperand &MO = getOperand(i);
1028    if (!MO.isReg() || MO.getReg() != Reg)
1029      continue;
1030    if (Ops)
1031      Ops->push_back(i);
1032    if (MO.isUse())
1033      Use |= !MO.isUndef();
1034    else if (MO.getSubReg() && !MO.isUndef())
1035      // A partial def undef doesn't count as reading the register.
1036      PartDef = true;
1037    else
1038      FullDef = true;
1039  }
1040  // A partial redefine uses Reg unless there is also a full define.
1041  return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1042}
1043
1044/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1045/// the specified register or -1 if it is not found. If isDead is true, defs
1046/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1047/// also checks if there is a def of a super-register.
1048int
1049MachineInstr::findRegisterDefOperandIdx(Register Reg, bool isDead, bool Overlap,
1050                                        const TargetRegisterInfo *TRI) const {
1051  bool isPhys = Reg.isPhysical();
1052  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1053    const MachineOperand &MO = getOperand(i);
1054    // Accept regmask operands when Overlap is set.
1055    // Ignore them when looking for a specific def operand (Overlap == false).
1056    if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1057      return i;
1058    if (!MO.isReg() || !MO.isDef())
1059      continue;
1060    Register MOReg = MO.getReg();
1061    bool Found = (MOReg == Reg);
1062    if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1063      if (Overlap)
1064        Found = TRI->regsOverlap(MOReg, Reg);
1065      else
1066        Found = TRI->isSubRegister(MOReg, Reg);
1067    }
1068    if (Found && (!isDead || MO.isDead()))
1069      return i;
1070  }
1071  return -1;
1072}
1073
1074/// findFirstPredOperandIdx() - Find the index of the first operand in the
1075/// operand list that is used to represent the predicate. It returns -1 if
1076/// none is found.
1077int MachineInstr::findFirstPredOperandIdx() const {
1078  // Don't call MCID.findFirstPredOperandIdx() because this variant
1079  // is sometimes called on an instruction that's not yet complete, and
1080  // so the number of operands is less than the MCID indicates. In
1081  // particular, the PTX target does this.
1082  const MCInstrDesc &MCID = getDesc();
1083  if (MCID.isPredicable()) {
1084    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1085      if (MCID.operands()[i].isPredicate())
1086        return i;
1087  }
1088
1089  return -1;
1090}
1091
1092// MachineOperand::TiedTo is 4 bits wide.
1093const unsigned TiedMax = 15;
1094
1095/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1096///
1097/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1098/// field. TiedTo can have these values:
1099///
1100/// 0:              Operand is not tied to anything.
1101/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1102/// TiedMax:        Tied to an operand >= TiedMax-1.
1103///
1104/// The tied def must be one of the first TiedMax operands on a normal
1105/// instruction. INLINEASM instructions allow more tied defs.
1106///
1107void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1108  MachineOperand &DefMO = getOperand(DefIdx);
1109  MachineOperand &UseMO = getOperand(UseIdx);
1110  assert(DefMO.isDef() && "DefIdx must be a def operand");
1111  assert(UseMO.isUse() && "UseIdx must be a use operand");
1112  assert(!DefMO.isTied() && "Def is already tied to another use");
1113  assert(!UseMO.isTied() && "Use is already tied to another def");
1114
1115  if (DefIdx < TiedMax)
1116    UseMO.TiedTo = DefIdx + 1;
1117  else {
1118    // Inline asm can use the group descriptors to find tied operands,
1119    // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1120    // but on normal instruction, the tied def must be within the first TiedMax
1121    // operands.
1122    assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1123           "DefIdx out of range");
1124    UseMO.TiedTo = TiedMax;
1125  }
1126
1127  // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1128  DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1129}
1130
1131/// Given the index of a tied register operand, find the operand it is tied to.
1132/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1133/// which must exist.
1134unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1135  const MachineOperand &MO = getOperand(OpIdx);
1136  assert(MO.isTied() && "Operand isn't tied");
1137
1138  // Normally TiedTo is in range.
1139  if (MO.TiedTo < TiedMax)
1140    return MO.TiedTo - 1;
1141
1142  // Uses on normal instructions can be out of range.
1143  if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1144    // Normal tied defs must be in the 0..TiedMax-1 range.
1145    if (MO.isUse())
1146      return TiedMax - 1;
1147    // MO is a def. Search for the tied use.
1148    for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1149      const MachineOperand &UseMO = getOperand(i);
1150      if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1151        return i;
1152    }
1153    llvm_unreachable("Can't find tied use");
1154  }
1155
1156  if (getOpcode() == TargetOpcode::STATEPOINT) {
1157    // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1158    // on registers.
1159    StatepointOpers SO(this);
1160    unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1161    assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1162    unsigned NumDefs = getNumDefs();
1163    for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1164      while (!getOperand(CurUseIdx).isReg())
1165        CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1166      if (OpIdx == CurDefIdx)
1167        return CurUseIdx;
1168      if (OpIdx == CurUseIdx)
1169        return CurDefIdx;
1170      CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1171    }
1172    llvm_unreachable("Can't find tied use");
1173  }
1174
1175  // Now deal with inline asm by parsing the operand group descriptor flags.
1176  // Find the beginning of each operand group.
1177  SmallVector<unsigned, 8> GroupIdx;
1178  unsigned OpIdxGroup = ~0u;
1179  unsigned NumOps;
1180  for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1181       i += NumOps) {
1182    const MachineOperand &FlagMO = getOperand(i);
1183    assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1184    unsigned CurGroup = GroupIdx.size();
1185    GroupIdx.push_back(i);
1186    NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1187    // OpIdx belongs to this operand group.
1188    if (OpIdx > i && OpIdx < i + NumOps)
1189      OpIdxGroup = CurGroup;
1190    unsigned TiedGroup;
1191    if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1192      continue;
1193    // Operands in this group are tied to operands in TiedGroup which must be
1194    // earlier. Find the number of operands between the two groups.
1195    unsigned Delta = i - GroupIdx[TiedGroup];
1196
1197    // OpIdx is a use tied to TiedGroup.
1198    if (OpIdxGroup == CurGroup)
1199      return OpIdx - Delta;
1200
1201    // OpIdx is a def tied to this use group.
1202    if (OpIdxGroup == TiedGroup)
1203      return OpIdx + Delta;
1204  }
1205  llvm_unreachable("Invalid tied operand on inline asm");
1206}
1207
1208/// clearKillInfo - Clears kill flags on all operands.
1209///
1210void MachineInstr::clearKillInfo() {
1211  for (MachineOperand &MO : operands()) {
1212    if (MO.isReg() && MO.isUse())
1213      MO.setIsKill(false);
1214  }
1215}
1216
1217void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1218                                      unsigned SubIdx,
1219                                      const TargetRegisterInfo &RegInfo) {
1220  if (ToReg.isPhysical()) {
1221    if (SubIdx)
1222      ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1223    for (MachineOperand &MO : operands()) {
1224      if (!MO.isReg() || MO.getReg() != FromReg)
1225        continue;
1226      MO.substPhysReg(ToReg, RegInfo);
1227    }
1228  } else {
1229    for (MachineOperand &MO : operands()) {
1230      if (!MO.isReg() || MO.getReg() != FromReg)
1231        continue;
1232      MO.substVirtReg(ToReg, SubIdx, RegInfo);
1233    }
1234  }
1235}
1236
1237/// isSafeToMove - Return true if it is safe to move this instruction. If
1238/// SawStore is set to true, it means that there is a store (or call) between
1239/// the instruction's location and its intended destination.
1240bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const {
1241  // Ignore stuff that we obviously can't move.
1242  //
1243  // Treat volatile loads as stores. This is not strictly necessary for
1244  // volatiles, but it is required for atomic loads. It is not allowed to move
1245  // a load across an atomic load with Ordering > Monotonic.
1246  if (mayStore() || isCall() || isPHI() ||
1247      (mayLoad() && hasOrderedMemoryRef())) {
1248    SawStore = true;
1249    return false;
1250  }
1251
1252  if (isPosition() || isDebugInstr() || isTerminator() ||
1253      mayRaiseFPException() || hasUnmodeledSideEffects())
1254    return false;
1255
1256  // See if this instruction does a load.  If so, we have to guarantee that the
1257  // loaded value doesn't change between the load and the its intended
1258  // destination. The check for isInvariantLoad gives the target the chance to
1259  // classify the load as always returning a constant, e.g. a constant pool
1260  // load.
1261  if (mayLoad() && !isDereferenceableInvariantLoad())
1262    // Otherwise, this is a real load.  If there is a store between the load and
1263    // end of block, we can't move it.
1264    return !SawStore;
1265
1266  return true;
1267}
1268
1269static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
1270                                 bool UseTBAA, const MachineMemOperand *MMOa,
1271                                 const MachineMemOperand *MMOb) {
1272  // The following interface to AA is fashioned after DAGCombiner::isAlias and
1273  // operates with MachineMemOperand offset with some important assumptions:
1274  //   - LLVM fundamentally assumes flat address spaces.
1275  //   - MachineOperand offset can *only* result from legalization and cannot
1276  //     affect queries other than the trivial case of overlap checking.
1277  //   - These offsets never wrap and never step outside of allocated objects.
1278  //   - There should never be any negative offsets here.
1279  //
1280  // FIXME: Modify API to hide this math from "user"
1281  // Even before we go to AA we can reason locally about some memory objects. It
1282  // can save compile time, and possibly catch some corner cases not currently
1283  // covered.
1284
1285  int64_t OffsetA = MMOa->getOffset();
1286  int64_t OffsetB = MMOb->getOffset();
1287  int64_t MinOffset = std::min(OffsetA, OffsetB);
1288
1289  uint64_t WidthA = MMOa->getSize();
1290  uint64_t WidthB = MMOb->getSize();
1291  bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
1292  bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
1293
1294  const Value *ValA = MMOa->getValue();
1295  const Value *ValB = MMOb->getValue();
1296  bool SameVal = (ValA && ValB && (ValA == ValB));
1297  if (!SameVal) {
1298    const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1299    const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1300    if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1301      return false;
1302    if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1303      return false;
1304    if (PSVa && PSVb && (PSVa == PSVb))
1305      SameVal = true;
1306  }
1307
1308  if (SameVal) {
1309    if (!KnownWidthA || !KnownWidthB)
1310      return true;
1311    int64_t MaxOffset = std::max(OffsetA, OffsetB);
1312    int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1313    return (MinOffset + LowWidth > MaxOffset);
1314  }
1315
1316  if (!AA)
1317    return true;
1318
1319  if (!ValA || !ValB)
1320    return true;
1321
1322  assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1323  assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1324
1325  int64_t OverlapA =
1326      KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize;
1327  int64_t OverlapB =
1328      KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize;
1329
1330  return !AA->isNoAlias(
1331      MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1332      MemoryLocation(ValB, OverlapB,
1333                     UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1334}
1335
1336bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1337                            bool UseTBAA) const {
1338  const MachineFunction *MF = getMF();
1339  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1340  const MachineFrameInfo &MFI = MF->getFrameInfo();
1341
1342  // Exclude call instruction which may alter the memory but can not be handled
1343  // by this function.
1344  if (isCall() || Other.isCall())
1345    return true;
1346
1347  // If neither instruction stores to memory, they can't alias in any
1348  // meaningful way, even if they read from the same address.
1349  if (!mayStore() && !Other.mayStore())
1350    return false;
1351
1352  // Both instructions must be memory operations to be able to alias.
1353  if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1354    return false;
1355
1356  // Let the target decide if memory accesses cannot possibly overlap.
1357  if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1358    return false;
1359
1360  // Memory operations without memory operands may access anything. Be
1361  // conservative and assume `MayAlias`.
1362  if (memoperands_empty() || Other.memoperands_empty())
1363    return true;
1364
1365  // Skip if there are too many memory operands.
1366  auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1367  if (NumChecks > TII->getMemOperandAACheckLimit())
1368    return true;
1369
1370  // Check each pair of memory operands from both instructions, which can't
1371  // alias only if all pairs won't alias.
1372  for (auto *MMOa : memoperands())
1373    for (auto *MMOb : Other.memoperands())
1374      if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1375        return true;
1376
1377  return false;
1378}
1379
1380/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1381/// or volatile memory reference, or if the information describing the memory
1382/// reference is not available. Return false if it is known to have no ordered
1383/// memory references.
1384bool MachineInstr::hasOrderedMemoryRef() const {
1385  // An instruction known never to access memory won't have a volatile access.
1386  if (!mayStore() &&
1387      !mayLoad() &&
1388      !isCall() &&
1389      !hasUnmodeledSideEffects())
1390    return false;
1391
1392  // Otherwise, if the instruction has no memory reference information,
1393  // conservatively assume it wasn't preserved.
1394  if (memoperands_empty())
1395    return true;
1396
1397  // Check if any of our memory operands are ordered.
1398  return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1399    return !MMO->isUnordered();
1400  });
1401}
1402
1403/// isDereferenceableInvariantLoad - Return true if this instruction will never
1404/// trap and is loading from a location whose value is invariant across a run of
1405/// this function.
1406bool MachineInstr::isDereferenceableInvariantLoad() const {
1407  // If the instruction doesn't load at all, it isn't an invariant load.
1408  if (!mayLoad())
1409    return false;
1410
1411  // If the instruction has lost its memoperands, conservatively assume that
1412  // it may not be an invariant load.
1413  if (memoperands_empty())
1414    return false;
1415
1416  const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1417
1418  for (MachineMemOperand *MMO : memoperands()) {
1419    if (!MMO->isUnordered())
1420      // If the memory operand has ordering side effects, we can't move the
1421      // instruction.  Such an instruction is technically an invariant load,
1422      // but the caller code would need updated to expect that.
1423      return false;
1424    if (MMO->isStore()) return false;
1425    if (MMO->isInvariant() && MMO->isDereferenceable())
1426      continue;
1427
1428    // A load from a constant PseudoSourceValue is invariant.
1429    if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1430      if (PSV->isConstant(&MFI))
1431        continue;
1432    }
1433
1434    // Otherwise assume conservatively.
1435    return false;
1436  }
1437
1438  // Everything checks out.
1439  return true;
1440}
1441
1442/// isConstantValuePHI - If the specified instruction is a PHI that always
1443/// merges together the same virtual register, return the register, otherwise
1444/// return 0.
1445unsigned MachineInstr::isConstantValuePHI() const {
1446  if (!isPHI())
1447    return 0;
1448  assert(getNumOperands() >= 3 &&
1449         "It's illegal to have a PHI without source operands");
1450
1451  Register Reg = getOperand(1).getReg();
1452  for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1453    if (getOperand(i).getReg() != Reg)
1454      return 0;
1455  return Reg;
1456}
1457
1458bool MachineInstr::hasUnmodeledSideEffects() const {
1459  if (hasProperty(MCID::UnmodeledSideEffects))
1460    return true;
1461  if (isInlineAsm()) {
1462    unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1463    if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1464      return true;
1465  }
1466
1467  return false;
1468}
1469
1470bool MachineInstr::isLoadFoldBarrier() const {
1471  return mayStore() || isCall() ||
1472         (hasUnmodeledSideEffects() && !isPseudoProbe());
1473}
1474
1475/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1476///
1477bool MachineInstr::allDefsAreDead() const {
1478  for (const MachineOperand &MO : operands()) {
1479    if (!MO.isReg() || MO.isUse())
1480      continue;
1481    if (!MO.isDead())
1482      return false;
1483  }
1484  return true;
1485}
1486
1487/// copyImplicitOps - Copy implicit register operands from specified
1488/// instruction to this instruction.
1489void MachineInstr::copyImplicitOps(MachineFunction &MF,
1490                                   const MachineInstr &MI) {
1491  for (const MachineOperand &MO :
1492       llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1493    if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1494      addOperand(MF, MO);
1495}
1496
1497bool MachineInstr::hasComplexRegisterTies() const {
1498  const MCInstrDesc &MCID = getDesc();
1499  if (MCID.Opcode == TargetOpcode::STATEPOINT)
1500    return true;
1501  for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1502    const auto &Operand = getOperand(I);
1503    if (!Operand.isReg() || Operand.isDef())
1504      // Ignore the defined registers as MCID marks only the uses as tied.
1505      continue;
1506    int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1507    int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1508    if (ExpectedTiedIdx != TiedIdx)
1509      return true;
1510  }
1511  return false;
1512}
1513
1514LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1515                                 const MachineRegisterInfo &MRI) const {
1516  const MachineOperand &Op = getOperand(OpIdx);
1517  if (!Op.isReg())
1518    return LLT{};
1519
1520  if (isVariadic() || OpIdx >= getNumExplicitOperands())
1521    return MRI.getType(Op.getReg());
1522
1523  auto &OpInfo = getDesc().operands()[OpIdx];
1524  if (!OpInfo.isGenericType())
1525    return MRI.getType(Op.getReg());
1526
1527  if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1528    return LLT{};
1529
1530  LLT TypeToPrint = MRI.getType(Op.getReg());
1531  // Don't mark the type index printed if it wasn't actually printed: maybe
1532  // another operand with the same type index has an actual type attached:
1533  if (TypeToPrint.isValid())
1534    PrintedTypes.set(OpInfo.getGenericTypeIndex());
1535  return TypeToPrint;
1536}
1537
1538#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1539LLVM_DUMP_METHOD void MachineInstr::dump() const {
1540  dbgs() << "  ";
1541  print(dbgs());
1542}
1543
1544LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1545    const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1546    SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1547  if (Depth >= MaxDepth)
1548    return;
1549  if (!AlreadySeenInstrs.insert(this).second)
1550    return;
1551  // PadToColumn always inserts at least one space.
1552  // Don't mess up the alignment if we don't want any space.
1553  if (Depth)
1554    fdbgs().PadToColumn(Depth * 2);
1555  print(fdbgs());
1556  for (const MachineOperand &MO : operands()) {
1557    if (!MO.isReg() || MO.isDef())
1558      continue;
1559    Register Reg = MO.getReg();
1560    if (Reg.isPhysical())
1561      continue;
1562    const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1563    if (NewMI == nullptr)
1564      continue;
1565    NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1566  }
1567}
1568
1569LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1570                                          unsigned MaxDepth) const {
1571  SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1572  dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1573}
1574#endif
1575
1576void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1577                         bool SkipDebugLoc, bool AddNewLine,
1578                         const TargetInstrInfo *TII) const {
1579  const Module *M = nullptr;
1580  const Function *F = nullptr;
1581  if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1582    F = &MF->getFunction();
1583    M = F->getParent();
1584    if (!TII)
1585      TII = MF->getSubtarget().getInstrInfo();
1586  }
1587
1588  ModuleSlotTracker MST(M);
1589  if (F)
1590    MST.incorporateFunction(*F);
1591  print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1592}
1593
1594void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1595                         bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1596                         bool AddNewLine, const TargetInstrInfo *TII) const {
1597  // We can be a bit tidier if we know the MachineFunction.
1598  const TargetRegisterInfo *TRI = nullptr;
1599  const MachineRegisterInfo *MRI = nullptr;
1600  const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1601  tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1602
1603  if (isCFIInstruction())
1604    assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1605
1606  SmallBitVector PrintedTypes(8);
1607  bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1608  auto getTiedOperandIdx = [&](unsigned OpIdx) {
1609    if (!ShouldPrintRegisterTies)
1610      return 0U;
1611    const MachineOperand &MO = getOperand(OpIdx);
1612    if (MO.isReg() && MO.isTied() && !MO.isDef())
1613      return findTiedOperandIdx(OpIdx);
1614    return 0U;
1615  };
1616  unsigned StartOp = 0;
1617  unsigned e = getNumOperands();
1618
1619  // Print explicitly defined operands on the left of an assignment syntax.
1620  while (StartOp < e) {
1621    const MachineOperand &MO = getOperand(StartOp);
1622    if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1623      break;
1624
1625    if (StartOp != 0)
1626      OS << ", ";
1627
1628    LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1629    unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1630    MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1631             ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1632    ++StartOp;
1633  }
1634
1635  if (StartOp != 0)
1636    OS << " = ";
1637
1638  if (getFlag(MachineInstr::FrameSetup))
1639    OS << "frame-setup ";
1640  if (getFlag(MachineInstr::FrameDestroy))
1641    OS << "frame-destroy ";
1642  if (getFlag(MachineInstr::FmNoNans))
1643    OS << "nnan ";
1644  if (getFlag(MachineInstr::FmNoInfs))
1645    OS << "ninf ";
1646  if (getFlag(MachineInstr::FmNsz))
1647    OS << "nsz ";
1648  if (getFlag(MachineInstr::FmArcp))
1649    OS << "arcp ";
1650  if (getFlag(MachineInstr::FmContract))
1651    OS << "contract ";
1652  if (getFlag(MachineInstr::FmAfn))
1653    OS << "afn ";
1654  if (getFlag(MachineInstr::FmReassoc))
1655    OS << "reassoc ";
1656  if (getFlag(MachineInstr::NoUWrap))
1657    OS << "nuw ";
1658  if (getFlag(MachineInstr::NoSWrap))
1659    OS << "nsw ";
1660  if (getFlag(MachineInstr::IsExact))
1661    OS << "exact ";
1662  if (getFlag(MachineInstr::NoFPExcept))
1663    OS << "nofpexcept ";
1664  if (getFlag(MachineInstr::NoMerge))
1665    OS << "nomerge ";
1666
1667  // Print the opcode name.
1668  if (TII)
1669    OS << TII->getName(getOpcode());
1670  else
1671    OS << "UNKNOWN";
1672
1673  if (SkipOpers)
1674    return;
1675
1676  // Print the rest of the operands.
1677  bool FirstOp = true;
1678  unsigned AsmDescOp = ~0u;
1679  unsigned AsmOpCount = 0;
1680
1681  if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1682    // Print asm string.
1683    OS << " ";
1684    const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1685    LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1686    unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1687    getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1688                            ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1689                            IntrinsicInfo);
1690
1691    // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1692    unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1693    if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1694      OS << " [sideeffect]";
1695    if (ExtraInfo & InlineAsm::Extra_MayLoad)
1696      OS << " [mayload]";
1697    if (ExtraInfo & InlineAsm::Extra_MayStore)
1698      OS << " [maystore]";
1699    if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1700      OS << " [isconvergent]";
1701    if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1702      OS << " [alignstack]";
1703    if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1704      OS << " [attdialect]";
1705    if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1706      OS << " [inteldialect]";
1707
1708    StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1709    FirstOp = false;
1710  }
1711
1712  for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1713    const MachineOperand &MO = getOperand(i);
1714
1715    if (FirstOp) FirstOp = false; else OS << ",";
1716    OS << " ";
1717
1718    if (isDebugValue() && MO.isMetadata()) {
1719      // Pretty print DBG_VALUE* instructions.
1720      auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1721      if (DIV && !DIV->getName().empty())
1722        OS << "!\"" << DIV->getName() << '\"';
1723      else {
1724        LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1725        unsigned TiedOperandIdx = getTiedOperandIdx(i);
1726        MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1727                 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1728      }
1729    } else if (isDebugLabel() && MO.isMetadata()) {
1730      // Pretty print DBG_LABEL instructions.
1731      auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1732      if (DIL && !DIL->getName().empty())
1733        OS << "\"" << DIL->getName() << '\"';
1734      else {
1735        LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1736        unsigned TiedOperandIdx = getTiedOperandIdx(i);
1737        MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1738                 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1739      }
1740    } else if (i == AsmDescOp && MO.isImm()) {
1741      // Pretty print the inline asm operand descriptor.
1742      OS << '$' << AsmOpCount++;
1743      unsigned Flag = MO.getImm();
1744      OS << ":[";
1745      OS << InlineAsm::getKindName(InlineAsm::getKind(Flag));
1746
1747      unsigned RCID = 0;
1748      if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1749          InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1750        if (TRI) {
1751          OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1752        } else
1753          OS << ":RC" << RCID;
1754      }
1755
1756      if (InlineAsm::isMemKind(Flag)) {
1757        unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1758        OS << ":" << InlineAsm::getMemConstraintName(MCID);
1759      }
1760
1761      unsigned TiedTo = 0;
1762      if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1763        OS << " tiedto:$" << TiedTo;
1764
1765      OS << ']';
1766
1767      // Compute the index of the next operand descriptor.
1768      AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1769    } else {
1770      LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1771      unsigned TiedOperandIdx = getTiedOperandIdx(i);
1772      if (MO.isImm() && isOperandSubregIdx(i))
1773        MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1774      else
1775        MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1776                 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1777    }
1778  }
1779
1780  // Print any optional symbols attached to this instruction as-if they were
1781  // operands.
1782  if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1783    if (!FirstOp) {
1784      FirstOp = false;
1785      OS << ',';
1786    }
1787    OS << " pre-instr-symbol ";
1788    MachineOperand::printSymbol(OS, *PreInstrSymbol);
1789  }
1790  if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1791    if (!FirstOp) {
1792      FirstOp = false;
1793      OS << ',';
1794    }
1795    OS << " post-instr-symbol ";
1796    MachineOperand::printSymbol(OS, *PostInstrSymbol);
1797  }
1798  if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1799    if (!FirstOp) {
1800      FirstOp = false;
1801      OS << ',';
1802    }
1803    OS << " heap-alloc-marker ";
1804    HeapAllocMarker->printAsOperand(OS, MST);
1805  }
1806  if (MDNode *PCSections = getPCSections()) {
1807    if (!FirstOp) {
1808      FirstOp = false;
1809      OS << ',';
1810    }
1811    OS << " pcsections ";
1812    PCSections->printAsOperand(OS, MST);
1813  }
1814  if (uint32_t CFIType = getCFIType()) {
1815    if (!FirstOp)
1816      OS << ',';
1817    OS << " cfi-type " << CFIType;
1818  }
1819
1820  if (DebugInstrNum) {
1821    if (!FirstOp)
1822      OS << ",";
1823    OS << " debug-instr-number " << DebugInstrNum;
1824  }
1825
1826  if (!SkipDebugLoc) {
1827    if (const DebugLoc &DL = getDebugLoc()) {
1828      if (!FirstOp)
1829        OS << ',';
1830      OS << " debug-location ";
1831      DL->printAsOperand(OS, MST);
1832    }
1833  }
1834
1835  if (!memoperands_empty()) {
1836    SmallVector<StringRef, 0> SSNs;
1837    const LLVMContext *Context = nullptr;
1838    std::unique_ptr<LLVMContext> CtxPtr;
1839    const MachineFrameInfo *MFI = nullptr;
1840    if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1841      MFI = &MF->getFrameInfo();
1842      Context = &MF->getFunction().getContext();
1843    } else {
1844      CtxPtr = std::make_unique<LLVMContext>();
1845      Context = CtxPtr.get();
1846    }
1847
1848    OS << " :: ";
1849    bool NeedComma = false;
1850    for (const MachineMemOperand *Op : memoperands()) {
1851      if (NeedComma)
1852        OS << ", ";
1853      Op->print(OS, MST, SSNs, *Context, MFI, TII);
1854      NeedComma = true;
1855    }
1856  }
1857
1858  if (SkipDebugLoc)
1859    return;
1860
1861  bool HaveSemi = false;
1862
1863  // Print debug location information.
1864  if (const DebugLoc &DL = getDebugLoc()) {
1865    if (!HaveSemi) {
1866      OS << ';';
1867      HaveSemi = true;
1868    }
1869    OS << ' ';
1870    DL.print(OS);
1871  }
1872
1873  // Print extra comments for DEBUG_VALUE.
1874  if (isDebugValue() && getDebugVariableOp().isMetadata()) {
1875    if (!HaveSemi) {
1876      OS << ";";
1877      HaveSemi = true;
1878    }
1879    auto *DV = getDebugVariable();
1880    OS << " line no:" <<  DV->getLine();
1881    if (isIndirectDebugValue())
1882      OS << " indirect";
1883  }
1884  // TODO: DBG_LABEL
1885
1886  if (AddNewLine)
1887    OS << '\n';
1888}
1889
1890bool MachineInstr::addRegisterKilled(Register IncomingReg,
1891                                     const TargetRegisterInfo *RegInfo,
1892                                     bool AddIfNotFound) {
1893  bool isPhysReg = IncomingReg.isPhysical();
1894  bool hasAliases = isPhysReg &&
1895    MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1896  bool Found = false;
1897  SmallVector<unsigned,4> DeadOps;
1898  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1899    MachineOperand &MO = getOperand(i);
1900    if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1901      continue;
1902
1903    // DEBUG_VALUE nodes do not contribute to code generation and should
1904    // always be ignored. Failure to do so may result in trying to modify
1905    // KILL flags on DEBUG_VALUE nodes.
1906    if (MO.isDebug())
1907      continue;
1908
1909    Register Reg = MO.getReg();
1910    if (!Reg)
1911      continue;
1912
1913    if (Reg == IncomingReg) {
1914      if (!Found) {
1915        if (MO.isKill())
1916          // The register is already marked kill.
1917          return true;
1918        if (isPhysReg && isRegTiedToDefOperand(i))
1919          // Two-address uses of physregs must not be marked kill.
1920          return true;
1921        MO.setIsKill();
1922        Found = true;
1923      }
1924    } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
1925      // A super-register kill already exists.
1926      if (RegInfo->isSuperRegister(IncomingReg, Reg))
1927        return true;
1928      if (RegInfo->isSubRegister(IncomingReg, Reg))
1929        DeadOps.push_back(i);
1930    }
1931  }
1932
1933  // Trim unneeded kill operands.
1934  while (!DeadOps.empty()) {
1935    unsigned OpIdx = DeadOps.back();
1936    if (getOperand(OpIdx).isImplicit() &&
1937        (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1938      removeOperand(OpIdx);
1939    else
1940      getOperand(OpIdx).setIsKill(false);
1941    DeadOps.pop_back();
1942  }
1943
1944  // If not found, this means an alias of one of the operands is killed. Add a
1945  // new implicit operand if required.
1946  if (!Found && AddIfNotFound) {
1947    addOperand(MachineOperand::CreateReg(IncomingReg,
1948                                         false /*IsDef*/,
1949                                         true  /*IsImp*/,
1950                                         true  /*IsKill*/));
1951    return true;
1952  }
1953  return Found;
1954}
1955
1956void MachineInstr::clearRegisterKills(Register Reg,
1957                                      const TargetRegisterInfo *RegInfo) {
1958  if (!Reg.isPhysical())
1959    RegInfo = nullptr;
1960  for (MachineOperand &MO : operands()) {
1961    if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1962      continue;
1963    Register OpReg = MO.getReg();
1964    if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1965      MO.setIsKill(false);
1966  }
1967}
1968
1969bool MachineInstr::addRegisterDead(Register Reg,
1970                                   const TargetRegisterInfo *RegInfo,
1971                                   bool AddIfNotFound) {
1972  bool isPhysReg = Reg.isPhysical();
1973  bool hasAliases = isPhysReg &&
1974    MCRegAliasIterator(Reg, RegInfo, false).isValid();
1975  bool Found = false;
1976  SmallVector<unsigned,4> DeadOps;
1977  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1978    MachineOperand &MO = getOperand(i);
1979    if (!MO.isReg() || !MO.isDef())
1980      continue;
1981    Register MOReg = MO.getReg();
1982    if (!MOReg)
1983      continue;
1984
1985    if (MOReg == Reg) {
1986      MO.setIsDead();
1987      Found = true;
1988    } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
1989      // There exists a super-register that's marked dead.
1990      if (RegInfo->isSuperRegister(Reg, MOReg))
1991        return true;
1992      if (RegInfo->isSubRegister(Reg, MOReg))
1993        DeadOps.push_back(i);
1994    }
1995  }
1996
1997  // Trim unneeded dead operands.
1998  while (!DeadOps.empty()) {
1999    unsigned OpIdx = DeadOps.back();
2000    if (getOperand(OpIdx).isImplicit() &&
2001        (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2002      removeOperand(OpIdx);
2003    else
2004      getOperand(OpIdx).setIsDead(false);
2005    DeadOps.pop_back();
2006  }
2007
2008  // If not found, this means an alias of one of the operands is dead. Add a
2009  // new implicit operand if required.
2010  if (Found || !AddIfNotFound)
2011    return Found;
2012
2013  addOperand(MachineOperand::CreateReg(Reg,
2014                                       true  /*IsDef*/,
2015                                       true  /*IsImp*/,
2016                                       false /*IsKill*/,
2017                                       true  /*IsDead*/));
2018  return true;
2019}
2020
2021void MachineInstr::clearRegisterDeads(Register Reg) {
2022  for (MachineOperand &MO : operands()) {
2023    if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
2024      continue;
2025    MO.setIsDead(false);
2026  }
2027}
2028
2029void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
2030  for (MachineOperand &MO : operands()) {
2031    if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
2032      continue;
2033    MO.setIsUndef(IsUndef);
2034  }
2035}
2036
2037void MachineInstr::addRegisterDefined(Register Reg,
2038                                      const TargetRegisterInfo *RegInfo) {
2039  if (Reg.isPhysical()) {
2040    MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
2041    if (MO)
2042      return;
2043  } else {
2044    for (const MachineOperand &MO : operands()) {
2045      if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
2046          MO.getSubReg() == 0)
2047        return;
2048    }
2049  }
2050  addOperand(MachineOperand::CreateReg(Reg,
2051                                       true  /*IsDef*/,
2052                                       true  /*IsImp*/));
2053}
2054
2055void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
2056                                         const TargetRegisterInfo &TRI) {
2057  bool HasRegMask = false;
2058  for (MachineOperand &MO : operands()) {
2059    if (MO.isRegMask()) {
2060      HasRegMask = true;
2061      continue;
2062    }
2063    if (!MO.isReg() || !MO.isDef()) continue;
2064    Register Reg = MO.getReg();
2065    if (!Reg.isPhysical())
2066      continue;
2067    // If there are no uses, including partial uses, the def is dead.
2068    if (llvm::none_of(UsedRegs,
2069                      [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2070      MO.setIsDead();
2071  }
2072
2073  // This is a call with a register mask operand.
2074  // Mask clobbers are always dead, so add defs for the non-dead defines.
2075  if (HasRegMask)
2076    for (const Register &UsedReg : UsedRegs)
2077      addRegisterDefined(UsedReg, &TRI);
2078}
2079
2080unsigned
2081MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2082  // Build up a buffer of hash code components.
2083  SmallVector<size_t, 16> HashComponents;
2084  HashComponents.reserve(MI->getNumOperands() + 1);
2085  HashComponents.push_back(MI->getOpcode());
2086  for (const MachineOperand &MO : MI->operands()) {
2087    if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2088      continue;  // Skip virtual register defs.
2089
2090    HashComponents.push_back(hash_value(MO));
2091  }
2092  return hash_combine_range(HashComponents.begin(), HashComponents.end());
2093}
2094
2095void MachineInstr::emitError(StringRef Msg) const {
2096  // Find the source location cookie.
2097  uint64_t LocCookie = 0;
2098  const MDNode *LocMD = nullptr;
2099  for (unsigned i = getNumOperands(); i != 0; --i) {
2100    if (getOperand(i-1).isMetadata() &&
2101        (LocMD = getOperand(i-1).getMetadata()) &&
2102        LocMD->getNumOperands() != 0) {
2103      if (const ConstantInt *CI =
2104              mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
2105        LocCookie = CI->getZExtValue();
2106        break;
2107      }
2108    }
2109  }
2110
2111  if (const MachineBasicBlock *MBB = getParent())
2112    if (const MachineFunction *MF = MBB->getParent())
2113      return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
2114  report_fatal_error(Msg);
2115}
2116
2117MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2118                                  const MCInstrDesc &MCID, bool IsIndirect,
2119                                  Register Reg, const MDNode *Variable,
2120                                  const MDNode *Expr) {
2121  assert(isa<DILocalVariable>(Variable) && "not a variable");
2122  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2123  assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2124         "Expected inlined-at fields to agree");
2125  auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2126  if (IsIndirect)
2127    MIB.addImm(0U);
2128  else
2129    MIB.addReg(0U);
2130  return MIB.addMetadata(Variable).addMetadata(Expr);
2131}
2132
2133MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2134                                  const MCInstrDesc &MCID, bool IsIndirect,
2135                                  ArrayRef<MachineOperand> DebugOps,
2136                                  const MDNode *Variable, const MDNode *Expr) {
2137  assert(isa<DILocalVariable>(Variable) && "not a variable");
2138  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2139  assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2140         "Expected inlined-at fields to agree");
2141  if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2142    assert(DebugOps.size() == 1 &&
2143           "DBG_VALUE must contain exactly one debug operand");
2144    MachineOperand DebugOp = DebugOps[0];
2145    if (DebugOp.isReg())
2146      return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2147                     Expr);
2148
2149    auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2150    if (IsIndirect)
2151      MIB.addImm(0U);
2152    else
2153      MIB.addReg(0U);
2154    return MIB.addMetadata(Variable).addMetadata(Expr);
2155  }
2156
2157  auto MIB = BuildMI(MF, DL, MCID);
2158  MIB.addMetadata(Variable).addMetadata(Expr);
2159  for (const MachineOperand &DebugOp : DebugOps)
2160    if (DebugOp.isReg())
2161      MIB.addReg(DebugOp.getReg());
2162    else
2163      MIB.add(DebugOp);
2164  return MIB;
2165}
2166
2167MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2168                                  MachineBasicBlock::iterator I,
2169                                  const DebugLoc &DL, const MCInstrDesc &MCID,
2170                                  bool IsIndirect, Register Reg,
2171                                  const MDNode *Variable, const MDNode *Expr) {
2172  MachineFunction &MF = *BB.getParent();
2173  MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2174  BB.insert(I, MI);
2175  return MachineInstrBuilder(MF, MI);
2176}
2177
2178MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2179                                  MachineBasicBlock::iterator I,
2180                                  const DebugLoc &DL, const MCInstrDesc &MCID,
2181                                  bool IsIndirect,
2182                                  ArrayRef<MachineOperand> DebugOps,
2183                                  const MDNode *Variable, const MDNode *Expr) {
2184  MachineFunction &MF = *BB.getParent();
2185  MachineInstr *MI =
2186      BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2187  BB.insert(I, MI);
2188  return MachineInstrBuilder(MF, *MI);
2189}
2190
2191/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2192/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2193static const DIExpression *
2194computeExprForSpill(const MachineInstr &MI,
2195                    SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2196  assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2197         "Expected inlined-at fields to agree");
2198
2199  const DIExpression *Expr = MI.getDebugExpression();
2200  if (MI.isIndirectDebugValue()) {
2201    assert(MI.getDebugOffset().getImm() == 0 &&
2202           "DBG_VALUE with nonzero offset");
2203    Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2204  } else if (MI.isDebugValueList()) {
2205    // We will replace the spilled register with a frame index, so
2206    // immediately deref all references to the spilled register.
2207    std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2208    for (const MachineOperand *Op : SpilledOperands) {
2209      unsigned OpIdx = MI.getDebugOperandIndex(Op);
2210      Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2211    }
2212  }
2213  return Expr;
2214}
2215static const DIExpression *computeExprForSpill(const MachineInstr &MI,
2216                                               Register SpillReg) {
2217  assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2218  SmallVector<const MachineOperand *> SpillOperands;
2219  for (const MachineOperand &Op : MI.getDebugOperandsForReg(SpillReg))
2220    SpillOperands.push_back(&Op);
2221  return computeExprForSpill(MI, SpillOperands);
2222}
2223
2224MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2225                                          MachineBasicBlock::iterator I,
2226                                          const MachineInstr &Orig,
2227                                          int FrameIndex, Register SpillReg) {
2228  assert(!Orig.isDebugRef() &&
2229         "DBG_INSTR_REF should not reference a virtual register.");
2230  const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2231  MachineInstrBuilder NewMI =
2232      BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2233  // Non-Variadic Operands: Location, Offset, Variable, Expression
2234  // Variadic Operands:     Variable, Expression, Locations...
2235  if (Orig.isNonListDebugValue())
2236    NewMI.addFrameIndex(FrameIndex).addImm(0U);
2237  NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2238  if (Orig.isDebugValueList()) {
2239    for (const MachineOperand &Op : Orig.debug_operands())
2240      if (Op.isReg() && Op.getReg() == SpillReg)
2241        NewMI.addFrameIndex(FrameIndex);
2242      else
2243        NewMI.add(MachineOperand(Op));
2244  }
2245  return NewMI;
2246}
2247MachineInstr *llvm::buildDbgValueForSpill(
2248    MachineBasicBlock &BB, MachineBasicBlock::iterator I,
2249    const MachineInstr &Orig, int FrameIndex,
2250    SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2251  const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2252  MachineInstrBuilder NewMI =
2253      BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2254  // Non-Variadic Operands: Location, Offset, Variable, Expression
2255  // Variadic Operands:     Variable, Expression, Locations...
2256  if (Orig.isNonListDebugValue())
2257    NewMI.addFrameIndex(FrameIndex).addImm(0U);
2258  NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2259  if (Orig.isDebugValueList()) {
2260    for (const MachineOperand &Op : Orig.debug_operands())
2261      if (is_contained(SpilledOperands, &Op))
2262        NewMI.addFrameIndex(FrameIndex);
2263      else
2264        NewMI.add(MachineOperand(Op));
2265  }
2266  return NewMI;
2267}
2268
2269void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex,
2270                                  Register Reg) {
2271  const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2272  if (Orig.isNonListDebugValue())
2273    Orig.getDebugOffset().ChangeToImmediate(0U);
2274  for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2275    Op.ChangeToFrameIndex(FrameIndex);
2276  Orig.getDebugExpressionOp().setMetadata(Expr);
2277}
2278
2279void MachineInstr::collectDebugValues(
2280                                SmallVectorImpl<MachineInstr *> &DbgValues) {
2281  MachineInstr &MI = *this;
2282  if (!MI.getOperand(0).isReg())
2283    return;
2284
2285  MachineBasicBlock::iterator DI = MI; ++DI;
2286  for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2287       DI != DE; ++DI) {
2288    if (!DI->isDebugValue())
2289      return;
2290    if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2291      DbgValues.push_back(&*DI);
2292  }
2293}
2294
2295void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2296  // Collect matching debug values.
2297  SmallVector<MachineInstr *, 2> DbgValues;
2298
2299  if (!getOperand(0).isReg())
2300    return;
2301
2302  Register DefReg = getOperand(0).getReg();
2303  auto *MRI = getRegInfo();
2304  for (auto &MO : MRI->use_operands(DefReg)) {
2305    auto *DI = MO.getParent();
2306    if (!DI->isDebugValue())
2307      continue;
2308    if (DI->hasDebugOperandForReg(DefReg)) {
2309      DbgValues.push_back(DI);
2310    }
2311  }
2312
2313  // Propagate Reg to debug value instructions.
2314  for (auto *DBI : DbgValues)
2315    for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2316      Op.setReg(Reg);
2317}
2318
2319using MMOList = SmallVector<const MachineMemOperand *, 2>;
2320
2321static unsigned getSpillSlotSize(const MMOList &Accesses,
2322                                 const MachineFrameInfo &MFI) {
2323  unsigned Size = 0;
2324  for (const auto *A : Accesses)
2325    if (MFI.isSpillSlotObjectIndex(
2326            cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2327                ->getFrameIndex()))
2328      Size += A->getSize();
2329  return Size;
2330}
2331
2332std::optional<unsigned>
2333MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2334  int FI;
2335  if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2336    const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2337    if (MFI.isSpillSlotObjectIndex(FI))
2338      return (*memoperands_begin())->getSize();
2339  }
2340  return std::nullopt;
2341}
2342
2343std::optional<unsigned>
2344MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2345  MMOList Accesses;
2346  if (TII->hasStoreToStackSlot(*this, Accesses))
2347    return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2348  return std::nullopt;
2349}
2350
2351std::optional<unsigned>
2352MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2353  int FI;
2354  if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2355    const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2356    if (MFI.isSpillSlotObjectIndex(FI))
2357      return (*memoperands_begin())->getSize();
2358  }
2359  return std::nullopt;
2360}
2361
2362std::optional<unsigned>
2363MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2364  MMOList Accesses;
2365  if (TII->hasLoadFromStackSlot(*this, Accesses))
2366    return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2367  return std::nullopt;
2368}
2369
2370unsigned MachineInstr::getDebugInstrNum() {
2371  if (DebugInstrNum == 0)
2372    DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2373  return DebugInstrNum;
2374}
2375
2376unsigned MachineInstr::getDebugInstrNum(MachineFunction &MF) {
2377  if (DebugInstrNum == 0)
2378    DebugInstrNum = MF.getNewDebugInstrNum();
2379  return DebugInstrNum;
2380}
2381