1249259Sdim//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2249259Sdim//
3249259Sdim//                     The LLVM Compiler Infrastructure
4249259Sdim//
5249259Sdim// This file is distributed under the University of Illinois Open Source
6249259Sdim// License. See LICENSE.TXT for details.
7249259Sdim//
8249259Sdim//===----------------------------------------------------------------------===//
9249259Sdim//
10249259Sdim// This file implements the TargetInstrInfo class.
11249259Sdim//
12249259Sdim//===----------------------------------------------------------------------===//
13249259Sdim
14249259Sdim#include "llvm/Target/TargetInstrInfo.h"
15249259Sdim#include "llvm/CodeGen/MachineFrameInfo.h"
16276479Sdim#include "llvm/CodeGen/MachineInstrBuilder.h"
17249259Sdim#include "llvm/CodeGen/MachineMemOperand.h"
18249259Sdim#include "llvm/CodeGen/MachineRegisterInfo.h"
19249259Sdim#include "llvm/CodeGen/PseudoSourceValue.h"
20249259Sdim#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21276479Sdim#include "llvm/CodeGen/StackMaps.h"
22288943Sdim#include "llvm/CodeGen/TargetSchedule.h"
23261991Sdim#include "llvm/IR/DataLayout.h"
24249259Sdim#include "llvm/MC/MCAsmInfo.h"
25249259Sdim#include "llvm/MC/MCInstrItineraries.h"
26249259Sdim#include "llvm/Support/CommandLine.h"
27249259Sdim#include "llvm/Support/ErrorHandling.h"
28249259Sdim#include "llvm/Support/raw_ostream.h"
29280031Sdim#include "llvm/Target/TargetFrameLowering.h"
30249259Sdim#include "llvm/Target/TargetLowering.h"
31249259Sdim#include "llvm/Target/TargetMachine.h"
32249259Sdim#include "llvm/Target/TargetRegisterInfo.h"
33249259Sdim#include <cctype>
34249259Sdimusing namespace llvm;
35249259Sdim
36249259Sdimstatic cl::opt<bool> DisableHazardRecognizer(
37249259Sdim  "disable-sched-hazard", cl::Hidden, cl::init(false),
38249259Sdim  cl::desc("Disable hazard detection during preRA scheduling"));
39249259Sdim
40249259SdimTargetInstrInfo::~TargetInstrInfo() {
41249259Sdim}
42249259Sdim
43249259Sdimconst TargetRegisterClass*
44249259SdimTargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
45249259Sdim                             const TargetRegisterInfo *TRI,
46249259Sdim                             const MachineFunction &MF) const {
47249259Sdim  if (OpNum >= MCID.getNumOperands())
48276479Sdim    return nullptr;
49249259Sdim
50249259Sdim  short RegClass = MCID.OpInfo[OpNum].RegClass;
51249259Sdim  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
52249259Sdim    return TRI->getPointerRegClass(MF, RegClass);
53249259Sdim
54249259Sdim  // Instructions like INSERT_SUBREG do not have fixed register classes.
55249259Sdim  if (RegClass < 0)
56276479Sdim    return nullptr;
57249259Sdim
58249259Sdim  // Otherwise just look it up normally.
59249259Sdim  return TRI->getRegClass(RegClass);
60249259Sdim}
61249259Sdim
62249259Sdim/// insertNoop - Insert a noop into the instruction stream at the specified
63249259Sdim/// point.
64249259Sdimvoid TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
65249259Sdim                                 MachineBasicBlock::iterator MI) const {
66249259Sdim  llvm_unreachable("Target didn't implement insertNoop!");
67249259Sdim}
68249259Sdim
69249259Sdim/// Measure the specified inline asm to determine an approximation of its
70249259Sdim/// length.
71249259Sdim/// Comments (which run till the next SeparatorString or newline) do not
72249259Sdim/// count as an instruction.
73249259Sdim/// Any other non-whitespace text is considered an instruction, with
74249259Sdim/// multiple instructions separated by SeparatorString or newlines.
75249259Sdim/// Variable-length instructions are not handled here; this function
76249259Sdim/// may be overloaded in the target code to do that.
77249259Sdimunsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
78249259Sdim                                             const MCAsmInfo &MAI) const {
79249259Sdim
80249259Sdim
81249259Sdim  // Count the number of instructions in the asm.
82249259Sdim  bool atInsnStart = true;
83249259Sdim  unsigned Length = 0;
84249259Sdim  for (; *Str; ++Str) {
85249259Sdim    if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
86249259Sdim                                strlen(MAI.getSeparatorString())) == 0)
87249259Sdim      atInsnStart = true;
88249259Sdim    if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
89249259Sdim      Length += MAI.getMaxInstLength();
90249259Sdim      atInsnStart = false;
91249259Sdim    }
92249259Sdim    if (atInsnStart && strncmp(Str, MAI.getCommentString(),
93249259Sdim                               strlen(MAI.getCommentString())) == 0)
94249259Sdim      atInsnStart = false;
95249259Sdim  }
96249259Sdim
97249259Sdim  return Length;
98249259Sdim}
99249259Sdim
100249259Sdim/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
101249259Sdim/// after it, replacing it with an unconditional branch to NewDest.
102249259Sdimvoid
103249259SdimTargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
104249259Sdim                                         MachineBasicBlock *NewDest) const {
105249259Sdim  MachineBasicBlock *MBB = Tail->getParent();
106249259Sdim
107249259Sdim  // Remove all the old successors of MBB from the CFG.
108249259Sdim  while (!MBB->succ_empty())
109249259Sdim    MBB->removeSuccessor(MBB->succ_begin());
110249259Sdim
111249259Sdim  // Remove all the dead instructions from the end of MBB.
112249259Sdim  MBB->erase(Tail, MBB->end());
113249259Sdim
114249259Sdim  // If MBB isn't immediately before MBB, insert a branch to it.
115249259Sdim  if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
116276479Sdim    InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
117249259Sdim                 Tail->getDebugLoc());
118249259Sdim  MBB->addSuccessor(NewDest);
119249259Sdim}
120249259Sdim
121296417SdimMachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI,
122296417Sdim                                                      bool NewMI,
123296417Sdim                                                      unsigned Idx1,
124296417Sdim                                                      unsigned Idx2) const {
125249259Sdim  const MCInstrDesc &MCID = MI->getDesc();
126249259Sdim  bool HasDef = MCID.getNumDefs();
127249259Sdim  if (HasDef && !MI->getOperand(0).isReg())
128249259Sdim    // No idea how to commute this instruction. Target should implement its own.
129276479Sdim    return nullptr;
130249259Sdim
131296417Sdim  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
132296417Sdim  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
133296417Sdim  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
134296417Sdim         CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
135296417Sdim         "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
136249259Sdim  assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
137249259Sdim         "This only knows how to commute register operands so far");
138296417Sdim
139249259Sdim  unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
140249259Sdim  unsigned Reg1 = MI->getOperand(Idx1).getReg();
141249259Sdim  unsigned Reg2 = MI->getOperand(Idx2).getReg();
142249259Sdim  unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
143249259Sdim  unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
144249259Sdim  unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
145249259Sdim  bool Reg1IsKill = MI->getOperand(Idx1).isKill();
146249259Sdim  bool Reg2IsKill = MI->getOperand(Idx2).isKill();
147288943Sdim  bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
148288943Sdim  bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
149288943Sdim  bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
150288943Sdim  bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
151249259Sdim  // If destination is tied to either of the commuted source register, then
152249259Sdim  // it must be updated.
153249259Sdim  if (HasDef && Reg0 == Reg1 &&
154249259Sdim      MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
155249259Sdim    Reg2IsKill = false;
156249259Sdim    Reg0 = Reg2;
157249259Sdim    SubReg0 = SubReg2;
158249259Sdim  } else if (HasDef && Reg0 == Reg2 &&
159249259Sdim             MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
160249259Sdim    Reg1IsKill = false;
161249259Sdim    Reg0 = Reg1;
162249259Sdim    SubReg0 = SubReg1;
163249259Sdim  }
164249259Sdim
165249259Sdim  if (NewMI) {
166249259Sdim    // Create a new instruction.
167249259Sdim    MachineFunction &MF = *MI->getParent()->getParent();
168249259Sdim    MI = MF.CloneMachineInstr(MI);
169249259Sdim  }
170249259Sdim
171249259Sdim  if (HasDef) {
172249259Sdim    MI->getOperand(0).setReg(Reg0);
173249259Sdim    MI->getOperand(0).setSubReg(SubReg0);
174249259Sdim  }
175249259Sdim  MI->getOperand(Idx2).setReg(Reg1);
176249259Sdim  MI->getOperand(Idx1).setReg(Reg2);
177249259Sdim  MI->getOperand(Idx2).setSubReg(SubReg1);
178249259Sdim  MI->getOperand(Idx1).setSubReg(SubReg2);
179249259Sdim  MI->getOperand(Idx2).setIsKill(Reg1IsKill);
180249259Sdim  MI->getOperand(Idx1).setIsKill(Reg2IsKill);
181288943Sdim  MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
182288943Sdim  MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
183288943Sdim  MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
184288943Sdim  MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
185249259Sdim  return MI;
186249259Sdim}
187249259Sdim
188296417SdimMachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
189296417Sdim                                                  bool NewMI,
190296417Sdim                                                  unsigned OpIdx1,
191296417Sdim                                                  unsigned OpIdx2) const {
192296417Sdim  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
193296417Sdim  // any commutable operand, which is done in findCommutedOpIndices() method
194296417Sdim  // called below.
195296417Sdim  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
196296417Sdim      !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
197296417Sdim    assert(MI->isCommutable() &&
198296417Sdim           "Precondition violation: MI must be commutable.");
199296417Sdim    return nullptr;
200296417Sdim  }
201296417Sdim  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
202296417Sdim}
203296417Sdim
204296417Sdimbool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
205296417Sdim                                           unsigned &ResultIdx2,
206296417Sdim                                           unsigned CommutableOpIdx1,
207296417Sdim                                           unsigned CommutableOpIdx2) {
208296417Sdim  if (ResultIdx1 == CommuteAnyOperandIndex &&
209296417Sdim      ResultIdx2 == CommuteAnyOperandIndex) {
210296417Sdim    ResultIdx1 = CommutableOpIdx1;
211296417Sdim    ResultIdx2 = CommutableOpIdx2;
212296417Sdim  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
213296417Sdim    if (ResultIdx2 == CommutableOpIdx1)
214296417Sdim      ResultIdx1 = CommutableOpIdx2;
215296417Sdim    else if (ResultIdx2 == CommutableOpIdx2)
216296417Sdim      ResultIdx1 = CommutableOpIdx1;
217296417Sdim    else
218296417Sdim      return false;
219296417Sdim  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
220296417Sdim    if (ResultIdx1 == CommutableOpIdx1)
221296417Sdim      ResultIdx2 = CommutableOpIdx2;
222296417Sdim    else if (ResultIdx1 == CommutableOpIdx2)
223296417Sdim      ResultIdx2 = CommutableOpIdx1;
224296417Sdim    else
225296417Sdim      return false;
226296417Sdim  } else
227296417Sdim    // Check that the result operand indices match the given commutable
228296417Sdim    // operand indices.
229296417Sdim    return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
230296417Sdim           (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
231296417Sdim
232296417Sdim  return true;
233296417Sdim}
234296417Sdim
235249259Sdimbool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
236249259Sdim                                            unsigned &SrcOpIdx1,
237249259Sdim                                            unsigned &SrcOpIdx2) const {
238249259Sdim  assert(!MI->isBundle() &&
239249259Sdim         "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
240249259Sdim
241249259Sdim  const MCInstrDesc &MCID = MI->getDesc();
242249259Sdim  if (!MCID.isCommutable())
243249259Sdim    return false;
244296417Sdim
245249259Sdim  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
246249259Sdim  // is not true, then the target must implement this.
247296417Sdim  unsigned CommutableOpIdx1 = MCID.getNumDefs();
248296417Sdim  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
249296417Sdim  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
250296417Sdim                            CommutableOpIdx1, CommutableOpIdx2))
251296417Sdim    return false;
252296417Sdim
253249259Sdim  if (!MI->getOperand(SrcOpIdx1).isReg() ||
254249259Sdim      !MI->getOperand(SrcOpIdx2).isReg())
255249259Sdim    // No idea.
256249259Sdim    return false;
257249259Sdim  return true;
258249259Sdim}
259249259Sdim
260249259Sdimbool
261249259SdimTargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
262249259Sdim  if (!MI->isTerminator()) return false;
263249259Sdim
264249259Sdim  // Conditional branch is a special case.
265249259Sdim  if (MI->isBranch() && !MI->isBarrier())
266249259Sdim    return true;
267249259Sdim  if (!MI->isPredicable())
268249259Sdim    return true;
269249259Sdim  return !isPredicated(MI);
270249259Sdim}
271249259Sdim
272288943Sdimbool TargetInstrInfo::PredicateInstruction(
273288943Sdim    MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
274249259Sdim  bool MadeChange = false;
275249259Sdim
276249259Sdim  assert(!MI->isBundle() &&
277249259Sdim         "TargetInstrInfo::PredicateInstruction() can't handle bundles");
278249259Sdim
279249259Sdim  const MCInstrDesc &MCID = MI->getDesc();
280249259Sdim  if (!MI->isPredicable())
281249259Sdim    return false;
282249259Sdim
283249259Sdim  for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
284249259Sdim    if (MCID.OpInfo[i].isPredicate()) {
285249259Sdim      MachineOperand &MO = MI->getOperand(i);
286249259Sdim      if (MO.isReg()) {
287249259Sdim        MO.setReg(Pred[j].getReg());
288249259Sdim        MadeChange = true;
289249259Sdim      } else if (MO.isImm()) {
290249259Sdim        MO.setImm(Pred[j].getImm());
291249259Sdim        MadeChange = true;
292249259Sdim      } else if (MO.isMBB()) {
293249259Sdim        MO.setMBB(Pred[j].getMBB());
294249259Sdim        MadeChange = true;
295249259Sdim      }
296249259Sdim      ++j;
297249259Sdim    }
298249259Sdim  }
299249259Sdim  return MadeChange;
300249259Sdim}
301249259Sdim
302249259Sdimbool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
303249259Sdim                                           const MachineMemOperand *&MMO,
304249259Sdim                                           int &FrameIndex) const {
305249259Sdim  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
306249259Sdim         oe = MI->memoperands_end();
307249259Sdim       o != oe;
308249259Sdim       ++o) {
309276479Sdim    if ((*o)->isLoad()) {
310249259Sdim      if (const FixedStackPseudoSourceValue *Value =
311276479Sdim          dyn_cast_or_null<FixedStackPseudoSourceValue>(
312276479Sdim              (*o)->getPseudoValue())) {
313249259Sdim        FrameIndex = Value->getFrameIndex();
314249259Sdim        MMO = *o;
315249259Sdim        return true;
316249259Sdim      }
317276479Sdim    }
318249259Sdim  }
319249259Sdim  return false;
320249259Sdim}
321249259Sdim
322249259Sdimbool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
323249259Sdim                                          const MachineMemOperand *&MMO,
324249259Sdim                                          int &FrameIndex) const {
325249259Sdim  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
326249259Sdim         oe = MI->memoperands_end();
327249259Sdim       o != oe;
328249259Sdim       ++o) {
329276479Sdim    if ((*o)->isStore()) {
330249259Sdim      if (const FixedStackPseudoSourceValue *Value =
331276479Sdim          dyn_cast_or_null<FixedStackPseudoSourceValue>(
332276479Sdim              (*o)->getPseudoValue())) {
333249259Sdim        FrameIndex = Value->getFrameIndex();
334249259Sdim        MMO = *o;
335249259Sdim        return true;
336249259Sdim      }
337276479Sdim    }
338249259Sdim  }
339249259Sdim  return false;
340249259Sdim}
341249259Sdim
342261991Sdimbool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
343261991Sdim                                        unsigned SubIdx, unsigned &Size,
344261991Sdim                                        unsigned &Offset,
345288943Sdim                                        const MachineFunction &MF) const {
346261991Sdim  if (!SubIdx) {
347261991Sdim    Size = RC->getSize();
348261991Sdim    Offset = 0;
349261991Sdim    return true;
350261991Sdim  }
351288943Sdim  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
352288943Sdim  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
353261991Sdim  // Convert bit size to byte size to be consistent with
354261991Sdim  // MCRegisterClass::getSize().
355261991Sdim  if (BitSize % 8)
356261991Sdim    return false;
357261991Sdim
358288943Sdim  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
359261991Sdim  if (BitOffset < 0 || BitOffset % 8)
360261991Sdim    return false;
361261991Sdim
362261991Sdim  Size = BitSize /= 8;
363261991Sdim  Offset = (unsigned)BitOffset / 8;
364261991Sdim
365261991Sdim  assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
366261991Sdim
367296417Sdim  if (!MF.getDataLayout().isLittleEndian()) {
368261991Sdim    Offset = RC->getSize() - (Offset + Size);
369261991Sdim  }
370261991Sdim  return true;
371261991Sdim}
372261991Sdim
373249259Sdimvoid TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
374249259Sdim                                    MachineBasicBlock::iterator I,
375249259Sdim                                    unsigned DestReg,
376249259Sdim                                    unsigned SubIdx,
377249259Sdim                                    const MachineInstr *Orig,
378249259Sdim                                    const TargetRegisterInfo &TRI) const {
379249259Sdim  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
380249259Sdim  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
381249259Sdim  MBB.insert(I, MI);
382249259Sdim}
383249259Sdim
384249259Sdimbool
385249259SdimTargetInstrInfo::produceSameValue(const MachineInstr *MI0,
386249259Sdim                                  const MachineInstr *MI1,
387249259Sdim                                  const MachineRegisterInfo *MRI) const {
388249259Sdim  return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
389249259Sdim}
390249259Sdim
391249259SdimMachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
392249259Sdim                                         MachineFunction &MF) const {
393249259Sdim  assert(!Orig->isNotDuplicable() &&
394249259Sdim         "Instruction cannot be duplicated");
395249259Sdim  return MF.CloneMachineInstr(Orig);
396249259Sdim}
397249259Sdim
398249259Sdim// If the COPY instruction in MI can be folded to a stack operation, return
399249259Sdim// the register class to use.
400249259Sdimstatic const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
401249259Sdim                                              unsigned FoldIdx) {
402249259Sdim  assert(MI->isCopy() && "MI must be a COPY instruction");
403249259Sdim  if (MI->getNumOperands() != 2)
404276479Sdim    return nullptr;
405249259Sdim  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
406249259Sdim
407249259Sdim  const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
408249259Sdim  const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
409249259Sdim
410249259Sdim  if (FoldOp.getSubReg() || LiveOp.getSubReg())
411276479Sdim    return nullptr;
412249259Sdim
413249259Sdim  unsigned FoldReg = FoldOp.getReg();
414249259Sdim  unsigned LiveReg = LiveOp.getReg();
415249259Sdim
416249259Sdim  assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
417249259Sdim         "Cannot fold physregs");
418249259Sdim
419249259Sdim  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
420249259Sdim  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
421249259Sdim
422249259Sdim  if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
423276479Sdim    return RC->contains(LiveOp.getReg()) ? RC : nullptr;
424249259Sdim
425249259Sdim  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
426249259Sdim    return RC;
427249259Sdim
428249259Sdim  // FIXME: Allow folding when register classes are memory compatible.
429276479Sdim  return nullptr;
430249259Sdim}
431249259Sdim
432280031Sdimvoid TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
433280031Sdim  llvm_unreachable("Not a MachO target");
434280031Sdim}
435280031Sdim
436288943Sdimstatic MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
437288943Sdim                                    ArrayRef<unsigned> Ops, int FrameIndex,
438276479Sdim                                    const TargetInstrInfo &TII) {
439276479Sdim  unsigned StartIdx = 0;
440276479Sdim  switch (MI->getOpcode()) {
441276479Sdim  case TargetOpcode::STACKMAP:
442276479Sdim    StartIdx = 2; // Skip ID, nShadowBytes.
443276479Sdim    break;
444276479Sdim  case TargetOpcode::PATCHPOINT: {
445276479Sdim    // For PatchPoint, the call args are not foldable.
446276479Sdim    PatchPointOpers opers(MI);
447276479Sdim    StartIdx = opers.getVarIdx();
448276479Sdim    break;
449276479Sdim  }
450276479Sdim  default:
451276479Sdim    llvm_unreachable("unexpected stackmap opcode");
452276479Sdim  }
453276479Sdim
454276479Sdim  // Return false if any operands requested for folding are not foldable (not
455276479Sdim  // part of the stackmap's live values).
456288943Sdim  for (unsigned Op : Ops) {
457288943Sdim    if (Op < StartIdx)
458276479Sdim      return nullptr;
459276479Sdim  }
460276479Sdim
461276479Sdim  MachineInstr *NewMI =
462276479Sdim    MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
463276479Sdim  MachineInstrBuilder MIB(MF, NewMI);
464276479Sdim
465276479Sdim  // No need to fold return, the meta data, and function arguments
466276479Sdim  for (unsigned i = 0; i < StartIdx; ++i)
467276479Sdim    MIB.addOperand(MI->getOperand(i));
468276479Sdim
469276479Sdim  for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
470276479Sdim    MachineOperand &MO = MI->getOperand(i);
471276479Sdim    if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
472276479Sdim      unsigned SpillSize;
473276479Sdim      unsigned SpillOffset;
474276479Sdim      // Compute the spill slot size and offset.
475276479Sdim      const TargetRegisterClass *RC =
476276479Sdim        MF.getRegInfo().getRegClass(MO.getReg());
477288943Sdim      bool Valid =
478288943Sdim          TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
479276479Sdim      if (!Valid)
480276479Sdim        report_fatal_error("cannot spill patchpoint subregister operand");
481276479Sdim      MIB.addImm(StackMaps::IndirectMemRefOp);
482276479Sdim      MIB.addImm(SpillSize);
483276479Sdim      MIB.addFrameIndex(FrameIndex);
484276479Sdim      MIB.addImm(SpillOffset);
485276479Sdim    }
486276479Sdim    else
487276479Sdim      MIB.addOperand(MO);
488276479Sdim  }
489276479Sdim  return NewMI;
490276479Sdim}
491276479Sdim
492249259Sdim/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
493249259Sdim/// slot into the specified machine instruction for the specified operand(s).
494249259Sdim/// If this is possible, a new instruction is returned with the specified
495249259Sdim/// operand folded, otherwise NULL is returned. The client is responsible for
496249259Sdim/// removing the old instruction and adding the new one in the instruction
497249259Sdim/// stream.
498288943SdimMachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
499288943Sdim                                                 ArrayRef<unsigned> Ops,
500288943Sdim                                                 int FI) const {
501249259Sdim  unsigned Flags = 0;
502249259Sdim  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
503249259Sdim    if (MI->getOperand(Ops[i]).isDef())
504249259Sdim      Flags |= MachineMemOperand::MOStore;
505249259Sdim    else
506249259Sdim      Flags |= MachineMemOperand::MOLoad;
507249259Sdim
508249259Sdim  MachineBasicBlock *MBB = MI->getParent();
509249259Sdim  assert(MBB && "foldMemoryOperand needs an inserted instruction");
510249259Sdim  MachineFunction &MF = *MBB->getParent();
511249259Sdim
512276479Sdim  MachineInstr *NewMI = nullptr;
513276479Sdim
514276479Sdim  if (MI->getOpcode() == TargetOpcode::STACKMAP ||
515276479Sdim      MI->getOpcode() == TargetOpcode::PATCHPOINT) {
516276479Sdim    // Fold stackmap/patchpoint.
517276479Sdim    NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
518288943Sdim    if (NewMI)
519288943Sdim      MBB->insert(MI, NewMI);
520276479Sdim  } else {
521276479Sdim    // Ask the target to do the actual folding.
522288943Sdim    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
523276479Sdim  }
524288943Sdim
525276479Sdim  if (NewMI) {
526261991Sdim    NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
527249259Sdim    // Add a memory operand, foldMemoryOperandImpl doesn't do that.
528249259Sdim    assert((!(Flags & MachineMemOperand::MOStore) ||
529249259Sdim            NewMI->mayStore()) &&
530249259Sdim           "Folded a def to a non-store!");
531249259Sdim    assert((!(Flags & MachineMemOperand::MOLoad) ||
532249259Sdim            NewMI->mayLoad()) &&
533249259Sdim           "Folded a use to a non-load!");
534249259Sdim    const MachineFrameInfo &MFI = *MF.getFrameInfo();
535249259Sdim    assert(MFI.getObjectOffset(FI) != -1);
536296417Sdim    MachineMemOperand *MMO = MF.getMachineMemOperand(
537296417Sdim        MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI),
538296417Sdim        MFI.getObjectAlignment(FI));
539249259Sdim    NewMI->addMemOperand(MF, MMO);
540249259Sdim
541288943Sdim    return NewMI;
542249259Sdim  }
543249259Sdim
544249259Sdim  // Straight COPY may fold as load/store.
545249259Sdim  if (!MI->isCopy() || Ops.size() != 1)
546276479Sdim    return nullptr;
547249259Sdim
548249259Sdim  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
549249259Sdim  if (!RC)
550276479Sdim    return nullptr;
551249259Sdim
552249259Sdim  const MachineOperand &MO = MI->getOperand(1-Ops[0]);
553249259Sdim  MachineBasicBlock::iterator Pos = MI;
554280031Sdim  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
555249259Sdim
556249259Sdim  if (Flags == MachineMemOperand::MOStore)
557249259Sdim    storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
558249259Sdim  else
559249259Sdim    loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
560249259Sdim  return --Pos;
561249259Sdim}
562249259Sdim
563296417Sdimbool TargetInstrInfo::hasReassociableOperands(
564296417Sdim    const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
565296417Sdim  const MachineOperand &Op1 = Inst.getOperand(1);
566296417Sdim  const MachineOperand &Op2 = Inst.getOperand(2);
567296417Sdim  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
568296417Sdim
569296417Sdim  // We need virtual register definitions for the operands that we will
570296417Sdim  // reassociate.
571296417Sdim  MachineInstr *MI1 = nullptr;
572296417Sdim  MachineInstr *MI2 = nullptr;
573296417Sdim  if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
574296417Sdim    MI1 = MRI.getUniqueVRegDef(Op1.getReg());
575296417Sdim  if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
576296417Sdim    MI2 = MRI.getUniqueVRegDef(Op2.getReg());
577296417Sdim
578296417Sdim  // And they need to be in the trace (otherwise, they won't have a depth).
579296417Sdim  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
580296417Sdim}
581296417Sdim
582296417Sdimbool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
583296417Sdim                                             bool &Commuted) const {
584296417Sdim  const MachineBasicBlock *MBB = Inst.getParent();
585296417Sdim  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
586296417Sdim  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
587296417Sdim  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
588296417Sdim  unsigned AssocOpcode = Inst.getOpcode();
589296417Sdim
590296417Sdim  // If only one operand has the same opcode and it's the second source operand,
591296417Sdim  // the operands must be commuted.
592296417Sdim  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
593296417Sdim  if (Commuted)
594296417Sdim    std::swap(MI1, MI2);
595296417Sdim
596296417Sdim  // 1. The previous instruction must be the same type as Inst.
597296417Sdim  // 2. The previous instruction must have virtual register definitions for its
598296417Sdim  //    operands in the same basic block as Inst.
599296417Sdim  // 3. The previous instruction's result must only be used by Inst.
600296417Sdim  return MI1->getOpcode() == AssocOpcode &&
601296417Sdim         hasReassociableOperands(*MI1, MBB) &&
602296417Sdim         MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
603296417Sdim}
604296417Sdim
605296417Sdim// 1. The operation must be associative and commutative.
606296417Sdim// 2. The instruction must have virtual register definitions for its
607296417Sdim//    operands in the same basic block.
608296417Sdim// 3. The instruction must have a reassociable sibling.
609296417Sdimbool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
610296417Sdim                                               bool &Commuted) const {
611296417Sdim  return isAssociativeAndCommutative(Inst) &&
612296417Sdim         hasReassociableOperands(Inst, Inst.getParent()) &&
613296417Sdim         hasReassociableSibling(Inst, Commuted);
614296417Sdim}
615296417Sdim
616296417Sdim// The concept of the reassociation pass is that these operations can benefit
617296417Sdim// from this kind of transformation:
618296417Sdim//
619296417Sdim// A = ? op ?
620296417Sdim// B = A op X (Prev)
621296417Sdim// C = B op Y (Root)
622296417Sdim// -->
623296417Sdim// A = ? op ?
624296417Sdim// B = X op Y
625296417Sdim// C = A op B
626296417Sdim//
627296417Sdim// breaking the dependency between A and B, allowing them to be executed in
628296417Sdim// parallel (or back-to-back in a pipeline) instead of depending on each other.
629296417Sdim
630296417Sdim// FIXME: This has the potential to be expensive (compile time) while not
631296417Sdim// improving the code at all. Some ways to limit the overhead:
632296417Sdim// 1. Track successful transforms; bail out if hit rate gets too low.
633296417Sdim// 2. Only enable at -O3 or some other non-default optimization level.
634296417Sdim// 3. Pre-screen pattern candidates here: if an operand of the previous
635296417Sdim//    instruction is known to not increase the critical path, then don't match
636296417Sdim//    that pattern.
637296417Sdimbool TargetInstrInfo::getMachineCombinerPatterns(
638296417Sdim    MachineInstr &Root,
639296417Sdim    SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
640296417Sdim
641296417Sdim  bool Commute;
642296417Sdim  if (isReassociationCandidate(Root, Commute)) {
643296417Sdim    // We found a sequence of instructions that may be suitable for a
644296417Sdim    // reassociation of operands to increase ILP. Specify each commutation
645296417Sdim    // possibility for the Prev instruction in the sequence and let the
646296417Sdim    // machine combiner decide if changing the operands is worthwhile.
647296417Sdim    if (Commute) {
648296417Sdim      Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
649296417Sdim      Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
650296417Sdim    } else {
651296417Sdim      Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
652296417Sdim      Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
653296417Sdim    }
654296417Sdim    return true;
655296417Sdim  }
656296417Sdim
657296417Sdim  return false;
658296417Sdim}
659296417Sdim
660296417Sdim/// Attempt the reassociation transformation to reduce critical path length.
661296417Sdim/// See the above comments before getMachineCombinerPatterns().
662296417Sdimvoid TargetInstrInfo::reassociateOps(
663296417Sdim    MachineInstr &Root, MachineInstr &Prev,
664296417Sdim    MachineCombinerPattern Pattern,
665296417Sdim    SmallVectorImpl<MachineInstr *> &InsInstrs,
666296417Sdim    SmallVectorImpl<MachineInstr *> &DelInstrs,
667296417Sdim    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
668296417Sdim  MachineFunction *MF = Root.getParent()->getParent();
669296417Sdim  MachineRegisterInfo &MRI = MF->getRegInfo();
670296417Sdim  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
671296417Sdim  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
672296417Sdim  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
673296417Sdim
674296417Sdim  // This array encodes the operand index for each parameter because the
675296417Sdim  // operands may be commuted. Each row corresponds to a pattern value,
676296417Sdim  // and each column specifies the index of A, B, X, Y.
677296417Sdim  unsigned OpIdx[4][4] = {
678296417Sdim    { 1, 1, 2, 2 },
679296417Sdim    { 1, 2, 2, 1 },
680296417Sdim    { 2, 1, 1, 2 },
681296417Sdim    { 2, 2, 1, 1 }
682296417Sdim  };
683296417Sdim
684296417Sdim  int Row;
685296417Sdim  switch (Pattern) {
686296417Sdim  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
687296417Sdim  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
688296417Sdim  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
689296417Sdim  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
690296417Sdim  default: llvm_unreachable("unexpected MachineCombinerPattern");
691296417Sdim  }
692296417Sdim
693296417Sdim  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
694296417Sdim  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
695296417Sdim  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
696296417Sdim  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
697296417Sdim  MachineOperand &OpC = Root.getOperand(0);
698296417Sdim
699296417Sdim  unsigned RegA = OpA.getReg();
700296417Sdim  unsigned RegB = OpB.getReg();
701296417Sdim  unsigned RegX = OpX.getReg();
702296417Sdim  unsigned RegY = OpY.getReg();
703296417Sdim  unsigned RegC = OpC.getReg();
704296417Sdim
705296417Sdim  if (TargetRegisterInfo::isVirtualRegister(RegA))
706296417Sdim    MRI.constrainRegClass(RegA, RC);
707296417Sdim  if (TargetRegisterInfo::isVirtualRegister(RegB))
708296417Sdim    MRI.constrainRegClass(RegB, RC);
709296417Sdim  if (TargetRegisterInfo::isVirtualRegister(RegX))
710296417Sdim    MRI.constrainRegClass(RegX, RC);
711296417Sdim  if (TargetRegisterInfo::isVirtualRegister(RegY))
712296417Sdim    MRI.constrainRegClass(RegY, RC);
713296417Sdim  if (TargetRegisterInfo::isVirtualRegister(RegC))
714296417Sdim    MRI.constrainRegClass(RegC, RC);
715296417Sdim
716296417Sdim  // Create a new virtual register for the result of (X op Y) instead of
717296417Sdim  // recycling RegB because the MachineCombiner's computation of the critical
718296417Sdim  // path requires a new register definition rather than an existing one.
719296417Sdim  unsigned NewVR = MRI.createVirtualRegister(RC);
720296417Sdim  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
721296417Sdim
722296417Sdim  unsigned Opcode = Root.getOpcode();
723296417Sdim  bool KillA = OpA.isKill();
724296417Sdim  bool KillX = OpX.isKill();
725296417Sdim  bool KillY = OpY.isKill();
726296417Sdim
727296417Sdim  // Create new instructions for insertion.
728296417Sdim  MachineInstrBuilder MIB1 =
729296417Sdim      BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
730296417Sdim          .addReg(RegX, getKillRegState(KillX))
731296417Sdim          .addReg(RegY, getKillRegState(KillY));
732296417Sdim  MachineInstrBuilder MIB2 =
733296417Sdim      BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
734296417Sdim          .addReg(RegA, getKillRegState(KillA))
735296417Sdim          .addReg(NewVR, getKillRegState(true));
736296417Sdim
737296417Sdim  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
738296417Sdim
739296417Sdim  // Record new instructions for insertion and old instructions for deletion.
740296417Sdim  InsInstrs.push_back(MIB1);
741296417Sdim  InsInstrs.push_back(MIB2);
742296417Sdim  DelInstrs.push_back(&Prev);
743296417Sdim  DelInstrs.push_back(&Root);
744296417Sdim}
745296417Sdim
746296417Sdimvoid TargetInstrInfo::genAlternativeCodeSequence(
747296417Sdim    MachineInstr &Root, MachineCombinerPattern Pattern,
748296417Sdim    SmallVectorImpl<MachineInstr *> &InsInstrs,
749296417Sdim    SmallVectorImpl<MachineInstr *> &DelInstrs,
750296417Sdim    DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
751296417Sdim  MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
752296417Sdim
753296417Sdim  // Select the previous instruction in the sequence based on the input pattern.
754296417Sdim  MachineInstr *Prev = nullptr;
755296417Sdim  switch (Pattern) {
756296417Sdim  case MachineCombinerPattern::REASSOC_AX_BY:
757296417Sdim  case MachineCombinerPattern::REASSOC_XA_BY:
758296417Sdim    Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
759296417Sdim    break;
760296417Sdim  case MachineCombinerPattern::REASSOC_AX_YB:
761296417Sdim  case MachineCombinerPattern::REASSOC_XA_YB:
762296417Sdim    Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
763296417Sdim    break;
764296417Sdim  default:
765296417Sdim    break;
766296417Sdim  }
767296417Sdim
768296417Sdim  assert(Prev && "Unknown pattern for machine combiner");
769296417Sdim
770296417Sdim  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
771296417Sdim  return;
772296417Sdim}
773296417Sdim
774249259Sdim/// foldMemoryOperand - Same as the previous version except it allows folding
775249259Sdim/// of any load and store from / to any address, not just from a specific
776249259Sdim/// stack slot.
777288943SdimMachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
778288943Sdim                                                 ArrayRef<unsigned> Ops,
779288943Sdim                                                 MachineInstr *LoadMI) const {
780249259Sdim  assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
781249259Sdim#ifndef NDEBUG
782249259Sdim  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
783249259Sdim    assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
784249259Sdim#endif
785249259Sdim  MachineBasicBlock &MBB = *MI->getParent();
786249259Sdim  MachineFunction &MF = *MBB.getParent();
787249259Sdim
788249259Sdim  // Ask the target to do the actual folding.
789276479Sdim  MachineInstr *NewMI = nullptr;
790276479Sdim  int FrameIndex = 0;
791249259Sdim
792276479Sdim  if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
793276479Sdim       MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
794276479Sdim      isLoadFromStackSlot(LoadMI, FrameIndex)) {
795276479Sdim    // Fold stackmap/patchpoint.
796276479Sdim    NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
797288943Sdim    if (NewMI)
798288943Sdim      NewMI = MBB.insert(MI, NewMI);
799276479Sdim  } else {
800276479Sdim    // Ask the target to do the actual folding.
801288943Sdim    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
802276479Sdim  }
803276479Sdim
804276479Sdim  if (!NewMI) return nullptr;
805276479Sdim
806249259Sdim  // Copy the memoperands from the load to the folded instruction.
807261991Sdim  if (MI->memoperands_empty()) {
808261991Sdim    NewMI->setMemRefs(LoadMI->memoperands_begin(),
809261991Sdim                      LoadMI->memoperands_end());
810261991Sdim  }
811261991Sdim  else {
812261991Sdim    // Handle the rare case of folding multiple loads.
813261991Sdim    NewMI->setMemRefs(MI->memoperands_begin(),
814261991Sdim                      MI->memoperands_end());
815261991Sdim    for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
816261991Sdim           E = LoadMI->memoperands_end(); I != E; ++I) {
817261991Sdim      NewMI->addMemOperand(MF, *I);
818261991Sdim    }
819261991Sdim  }
820249259Sdim  return NewMI;
821249259Sdim}
822249259Sdim
823249259Sdimbool TargetInstrInfo::
824249259SdimisReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
825249259Sdim                                         AliasAnalysis *AA) const {
826249259Sdim  const MachineFunction &MF = *MI->getParent()->getParent();
827249259Sdim  const MachineRegisterInfo &MRI = MF.getRegInfo();
828249259Sdim
829249259Sdim  // Remat clients assume operand 0 is the defined register.
830249259Sdim  if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
831249259Sdim    return false;
832249259Sdim  unsigned DefReg = MI->getOperand(0).getReg();
833249259Sdim
834249259Sdim  // A sub-register definition can only be rematerialized if the instruction
835249259Sdim  // doesn't read the other parts of the register.  Otherwise it is really a
836249259Sdim  // read-modify-write operation on the full virtual register which cannot be
837249259Sdim  // moved safely.
838249259Sdim  if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
839249259Sdim      MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
840249259Sdim    return false;
841249259Sdim
842249259Sdim  // A load from a fixed stack slot can be rematerialized. This may be
843249259Sdim  // redundant with subsequent checks, but it's target-independent,
844249259Sdim  // simple, and a common case.
845249259Sdim  int FrameIdx = 0;
846280031Sdim  if (isLoadFromStackSlot(MI, FrameIdx) &&
847249259Sdim      MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
848249259Sdim    return true;
849249259Sdim
850249259Sdim  // Avoid instructions obviously unsafe for remat.
851249259Sdim  if (MI->isNotDuplicable() || MI->mayStore() ||
852249259Sdim      MI->hasUnmodeledSideEffects())
853249259Sdim    return false;
854249259Sdim
855249259Sdim  // Don't remat inline asm. We have no idea how expensive it is
856249259Sdim  // even if it's side effect free.
857249259Sdim  if (MI->isInlineAsm())
858249259Sdim    return false;
859249259Sdim
860249259Sdim  // Avoid instructions which load from potentially varying memory.
861249259Sdim  if (MI->mayLoad() && !MI->isInvariantLoad(AA))
862249259Sdim    return false;
863249259Sdim
864249259Sdim  // If any of the registers accessed are non-constant, conservatively assume
865249259Sdim  // the instruction is not rematerializable.
866249259Sdim  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
867249259Sdim    const MachineOperand &MO = MI->getOperand(i);
868249259Sdim    if (!MO.isReg()) continue;
869249259Sdim    unsigned Reg = MO.getReg();
870249259Sdim    if (Reg == 0)
871249259Sdim      continue;
872249259Sdim
873249259Sdim    // Check for a well-behaved physical register.
874249259Sdim    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
875249259Sdim      if (MO.isUse()) {
876249259Sdim        // If the physreg has no defs anywhere, it's just an ambient register
877249259Sdim        // and we can freely move its uses. Alternatively, if it's allocatable,
878249259Sdim        // it could get allocated to something with a def during allocation.
879249259Sdim        if (!MRI.isConstantPhysReg(Reg, MF))
880249259Sdim          return false;
881249259Sdim      } else {
882249259Sdim        // A physreg def. We can't remat it.
883249259Sdim        return false;
884249259Sdim      }
885249259Sdim      continue;
886249259Sdim    }
887249259Sdim
888249259Sdim    // Only allow one virtual-register def.  There may be multiple defs of the
889249259Sdim    // same virtual register, though.
890249259Sdim    if (MO.isDef() && Reg != DefReg)
891249259Sdim      return false;
892249259Sdim
893249259Sdim    // Don't allow any virtual-register uses. Rematting an instruction with
894249259Sdim    // virtual register uses would length the live ranges of the uses, which
895249259Sdim    // is not necessarily a good idea, certainly not "trivial".
896249259Sdim    if (MO.isUse())
897249259Sdim      return false;
898249259Sdim  }
899249259Sdim
900249259Sdim  // Everything checked out.
901249259Sdim  return true;
902249259Sdim}
903249259Sdim
904280031Sdimint TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
905280031Sdim  const MachineFunction *MF = MI->getParent()->getParent();
906280031Sdim  const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
907280031Sdim  bool StackGrowsDown =
908280031Sdim    TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
909280031Sdim
910288943Sdim  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
911288943Sdim  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
912280031Sdim
913280031Sdim  if (MI->getOpcode() != FrameSetupOpcode &&
914280031Sdim      MI->getOpcode() != FrameDestroyOpcode)
915280031Sdim    return 0;
916280031Sdim
917280031Sdim  int SPAdj = MI->getOperand(0).getImm();
918296417Sdim  SPAdj = TFI->alignSPAdjust(SPAdj);
919280031Sdim
920280031Sdim  if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
921280031Sdim       (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
922280031Sdim    SPAdj = -SPAdj;
923280031Sdim
924280031Sdim  return SPAdj;
925280031Sdim}
926280031Sdim
927249259Sdim/// isSchedulingBoundary - Test if the given instruction should be
928249259Sdim/// considered a scheduling boundary. This primarily includes labels
929249259Sdim/// and terminators.
930249259Sdimbool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
931249259Sdim                                           const MachineBasicBlock *MBB,
932249259Sdim                                           const MachineFunction &MF) const {
933249259Sdim  // Terminators and labels can't be scheduled around.
934276479Sdim  if (MI->isTerminator() || MI->isPosition())
935249259Sdim    return true;
936249259Sdim
937249259Sdim  // Don't attempt to schedule around any instruction that defines
938249259Sdim  // a stack-oriented pointer, as it's unlikely to be profitable. This
939249259Sdim  // saves compile time, because it doesn't require every single
940249259Sdim  // stack slot reference to depend on the instruction that does the
941249259Sdim  // modification.
942280031Sdim  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
943280031Sdim  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
944296417Sdim  return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
945249259Sdim}
946249259Sdim
947249259Sdim// Provide a global flag for disabling the PreRA hazard recognizer that targets
948249259Sdim// may choose to honor.
949249259Sdimbool TargetInstrInfo::usePreRAHazardRecognizer() const {
950249259Sdim  return !DisableHazardRecognizer;
951249259Sdim}
952249259Sdim
953249259Sdim// Default implementation of CreateTargetRAHazardRecognizer.
954249259SdimScheduleHazardRecognizer *TargetInstrInfo::
955276479SdimCreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
956249259Sdim                             const ScheduleDAG *DAG) const {
957249259Sdim  // Dummy hazard recognizer allows all instructions to issue.
958249259Sdim  return new ScheduleHazardRecognizer();
959249259Sdim}
960249259Sdim
961249259Sdim// Default implementation of CreateTargetMIHazardRecognizer.
962249259SdimScheduleHazardRecognizer *TargetInstrInfo::
963249259SdimCreateTargetMIHazardRecognizer(const InstrItineraryData *II,
964249259Sdim                               const ScheduleDAG *DAG) const {
965249259Sdim  return (ScheduleHazardRecognizer *)
966249259Sdim    new ScoreboardHazardRecognizer(II, DAG, "misched");
967249259Sdim}
968249259Sdim
969249259Sdim// Default implementation of CreateTargetPostRAHazardRecognizer.
970249259SdimScheduleHazardRecognizer *TargetInstrInfo::
971249259SdimCreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
972249259Sdim                                   const ScheduleDAG *DAG) const {
973249259Sdim  return (ScheduleHazardRecognizer *)
974249259Sdim    new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
975249259Sdim}
976249259Sdim
977249259Sdim//===----------------------------------------------------------------------===//
978249259Sdim//  SelectionDAG latency interface.
979249259Sdim//===----------------------------------------------------------------------===//
980249259Sdim
981249259Sdimint
982249259SdimTargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
983249259Sdim                                   SDNode *DefNode, unsigned DefIdx,
984249259Sdim                                   SDNode *UseNode, unsigned UseIdx) const {
985249259Sdim  if (!ItinData || ItinData->isEmpty())
986249259Sdim    return -1;
987249259Sdim
988249259Sdim  if (!DefNode->isMachineOpcode())
989249259Sdim    return -1;
990249259Sdim
991249259Sdim  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
992249259Sdim  if (!UseNode->isMachineOpcode())
993249259Sdim    return ItinData->getOperandCycle(DefClass, DefIdx);
994249259Sdim  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
995249259Sdim  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
996249259Sdim}
997249259Sdim
998249259Sdimint TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
999249259Sdim                                     SDNode *N) const {
1000249259Sdim  if (!ItinData || ItinData->isEmpty())
1001249259Sdim    return 1;
1002249259Sdim
1003249259Sdim  if (!N->isMachineOpcode())
1004249259Sdim    return 1;
1005249259Sdim
1006249259Sdim  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1007249259Sdim}
1008249259Sdim
1009249259Sdim//===----------------------------------------------------------------------===//
1010249259Sdim//  MachineInstr latency interface.
1011249259Sdim//===----------------------------------------------------------------------===//
1012249259Sdim
1013249259Sdimunsigned
1014249259SdimTargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1015249259Sdim                                const MachineInstr *MI) const {
1016249259Sdim  if (!ItinData || ItinData->isEmpty())
1017249259Sdim    return 1;
1018249259Sdim
1019249259Sdim  unsigned Class = MI->getDesc().getSchedClass();
1020249259Sdim  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1021249259Sdim  if (UOps >= 0)
1022249259Sdim    return UOps;
1023249259Sdim
1024249259Sdim  // The # of u-ops is dynamically determined. The specific target should
1025249259Sdim  // override this function to return the right number.
1026249259Sdim  return 1;
1027249259Sdim}
1028249259Sdim
1029249259Sdim/// Return the default expected latency for a def based on it's opcode.
1030280031Sdimunsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1031249259Sdim                                            const MachineInstr *DefMI) const {
1032249259Sdim  if (DefMI->isTransient())
1033249259Sdim    return 0;
1034249259Sdim  if (DefMI->mayLoad())
1035280031Sdim    return SchedModel.LoadLatency;
1036249259Sdim  if (isHighLatencyDef(DefMI->getOpcode()))
1037280031Sdim    return SchedModel.HighLatency;
1038249259Sdim  return 1;
1039249259Sdim}
1040249259Sdim
1041261991Sdimunsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
1042261991Sdim  return 0;
1043261991Sdim}
1044261991Sdim
1045249259Sdimunsigned TargetInstrInfo::
1046249259SdimgetInstrLatency(const InstrItineraryData *ItinData,
1047249259Sdim                const MachineInstr *MI,
1048249259Sdim                unsigned *PredCost) const {
1049249259Sdim  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1050249259Sdim  // still have a MinLatency property, which getStageLatency checks.
1051249259Sdim  if (!ItinData)
1052249259Sdim    return MI->mayLoad() ? 2 : 1;
1053249259Sdim
1054249259Sdim  return ItinData->getStageLatency(MI->getDesc().getSchedClass());
1055249259Sdim}
1056249259Sdim
1057288943Sdimbool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1058249259Sdim                                       const MachineInstr *DefMI,
1059249259Sdim                                       unsigned DefIdx) const {
1060288943Sdim  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1061249259Sdim  if (!ItinData || ItinData->isEmpty())
1062249259Sdim    return false;
1063249259Sdim
1064249259Sdim  unsigned DefClass = DefMI->getDesc().getSchedClass();
1065249259Sdim  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1066249259Sdim  return (DefCycle != -1 && DefCycle <= 1);
1067249259Sdim}
1068249259Sdim
1069249259Sdim/// Both DefMI and UseMI must be valid.  By default, call directly to the
1070249259Sdim/// itinerary. This may be overriden by the target.
1071249259Sdimint TargetInstrInfo::
1072249259SdimgetOperandLatency(const InstrItineraryData *ItinData,
1073249259Sdim                  const MachineInstr *DefMI, unsigned DefIdx,
1074249259Sdim                  const MachineInstr *UseMI, unsigned UseIdx) const {
1075249259Sdim  unsigned DefClass = DefMI->getDesc().getSchedClass();
1076249259Sdim  unsigned UseClass = UseMI->getDesc().getSchedClass();
1077249259Sdim  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1078249259Sdim}
1079249259Sdim
1080249259Sdim/// If we can determine the operand latency from the def only, without itinerary
1081249259Sdim/// lookup, do so. Otherwise return -1.
1082249259Sdimint TargetInstrInfo::computeDefOperandLatency(
1083249259Sdim  const InstrItineraryData *ItinData,
1084261991Sdim  const MachineInstr *DefMI) const {
1085249259Sdim
1086249259Sdim  // Let the target hook getInstrLatency handle missing itineraries.
1087249259Sdim  if (!ItinData)
1088249259Sdim    return getInstrLatency(ItinData, DefMI);
1089249259Sdim
1090261991Sdim  if(ItinData->isEmpty())
1091249259Sdim    return defaultDefLatency(ItinData->SchedModel, DefMI);
1092249259Sdim
1093249259Sdim  // ...operand lookup required
1094249259Sdim  return -1;
1095249259Sdim}
1096249259Sdim
1097249259Sdim/// computeOperandLatency - Compute and return the latency of the given data
1098249259Sdim/// dependent def and use when the operand indices are already known. UseMI may
1099249259Sdim/// be NULL for an unknown use.
1100249259Sdim///
1101249259Sdim/// FindMin may be set to get the minimum vs. expected latency. Minimum
1102249259Sdim/// latency is used for scheduling groups, while expected latency is for
1103249259Sdim/// instruction cost and critical path.
1104249259Sdim///
1105249259Sdim/// Depending on the subtarget's itinerary properties, this may or may not need
1106249259Sdim/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
1107249259Sdim/// UseIdx to compute min latency.
1108249259Sdimunsigned TargetInstrInfo::
1109249259SdimcomputeOperandLatency(const InstrItineraryData *ItinData,
1110249259Sdim                      const MachineInstr *DefMI, unsigned DefIdx,
1111261991Sdim                      const MachineInstr *UseMI, unsigned UseIdx) const {
1112249259Sdim
1113261991Sdim  int DefLatency = computeDefOperandLatency(ItinData, DefMI);
1114249259Sdim  if (DefLatency >= 0)
1115249259Sdim    return DefLatency;
1116249259Sdim
1117249259Sdim  assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
1118249259Sdim
1119249259Sdim  int OperLatency = 0;
1120249259Sdim  if (UseMI)
1121249259Sdim    OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
1122249259Sdim  else {
1123249259Sdim    unsigned DefClass = DefMI->getDesc().getSchedClass();
1124249259Sdim    OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
1125249259Sdim  }
1126249259Sdim  if (OperLatency >= 0)
1127249259Sdim    return OperLatency;
1128249259Sdim
1129249259Sdim  // No operand latency was found.
1130249259Sdim  unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
1131249259Sdim
1132249259Sdim  // Expected latency is the max of the stage latency and itinerary props.
1133261991Sdim  InstrLatency = std::max(InstrLatency,
1134261991Sdim                          defaultDefLatency(ItinData->SchedModel, DefMI));
1135249259Sdim  return InstrLatency;
1136249259Sdim}
1137280031Sdim
1138280031Sdimbool TargetInstrInfo::getRegSequenceInputs(
1139280031Sdim    const MachineInstr &MI, unsigned DefIdx,
1140280031Sdim    SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1141280031Sdim  assert((MI.isRegSequence() ||
1142280031Sdim          MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1143280031Sdim
1144280031Sdim  if (!MI.isRegSequence())
1145280031Sdim    return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1146280031Sdim
1147280031Sdim  // We are looking at:
1148280031Sdim  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1149280031Sdim  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1150280031Sdim  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1151280031Sdim       OpIdx += 2) {
1152280031Sdim    const MachineOperand &MOReg = MI.getOperand(OpIdx);
1153280031Sdim    const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1154280031Sdim    assert(MOSubIdx.isImm() &&
1155280031Sdim           "One of the subindex of the reg_sequence is not an immediate");
1156280031Sdim    // Record Reg:SubReg, SubIdx.
1157280031Sdim    InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1158280031Sdim                                            (unsigned)MOSubIdx.getImm()));
1159280031Sdim  }
1160280031Sdim  return true;
1161280031Sdim}
1162280031Sdim
1163280031Sdimbool TargetInstrInfo::getExtractSubregInputs(
1164280031Sdim    const MachineInstr &MI, unsigned DefIdx,
1165280031Sdim    RegSubRegPairAndIdx &InputReg) const {
1166280031Sdim  assert((MI.isExtractSubreg() ||
1167280031Sdim      MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1168280031Sdim
1169280031Sdim  if (!MI.isExtractSubreg())
1170280031Sdim    return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1171280031Sdim
1172280031Sdim  // We are looking at:
1173280031Sdim  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1174280031Sdim  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1175280031Sdim  const MachineOperand &MOReg = MI.getOperand(1);
1176280031Sdim  const MachineOperand &MOSubIdx = MI.getOperand(2);
1177280031Sdim  assert(MOSubIdx.isImm() &&
1178280031Sdim         "The subindex of the extract_subreg is not an immediate");
1179280031Sdim
1180280031Sdim  InputReg.Reg = MOReg.getReg();
1181280031Sdim  InputReg.SubReg = MOReg.getSubReg();
1182280031Sdim  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1183280031Sdim  return true;
1184280031Sdim}
1185280031Sdim
1186280031Sdimbool TargetInstrInfo::getInsertSubregInputs(
1187280031Sdim    const MachineInstr &MI, unsigned DefIdx,
1188280031Sdim    RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1189280031Sdim  assert((MI.isInsertSubreg() ||
1190280031Sdim      MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1191280031Sdim
1192280031Sdim  if (!MI.isInsertSubreg())
1193280031Sdim    return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1194280031Sdim
1195280031Sdim  // We are looking at:
1196280031Sdim  // Def = INSERT_SEQUENCE v0, v1, sub0.
1197280031Sdim  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1198280031Sdim  const MachineOperand &MOBaseReg = MI.getOperand(1);
1199280031Sdim  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1200280031Sdim  const MachineOperand &MOSubIdx = MI.getOperand(3);
1201280031Sdim  assert(MOSubIdx.isImm() &&
1202280031Sdim         "One of the subindex of the reg_sequence is not an immediate");
1203280031Sdim  BaseReg.Reg = MOBaseReg.getReg();
1204280031Sdim  BaseReg.SubReg = MOBaseReg.getSubReg();
1205280031Sdim
1206280031Sdim  InsertedReg.Reg = MOInsertedReg.getReg();
1207280031Sdim  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1208280031Sdim  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1209280031Sdim  return true;
1210280031Sdim}
1211