1//===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the TargetInstrInfoImpl class, it just provides default
11// implementations of various methods.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Target/TargetInstrInfo.h"
16#include "llvm/Target/TargetLowering.h"
17#include "llvm/Target/TargetMachine.h"
18#include "llvm/Target/TargetRegisterInfo.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineMemOperand.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
26#include "llvm/CodeGen/PseudoSourceValue.h"
27#include "llvm/MC/MCInstrItineraries.h"
28#include "llvm/Support/CommandLine.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/raw_ostream.h"
32using namespace llvm;
33
34static cl::opt<bool> DisableHazardRecognizer(
35  "disable-sched-hazard", cl::Hidden, cl::init(false),
36  cl::desc("Disable hazard detection during preRA scheduling"));
37
38/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
39/// after it, replacing it with an unconditional branch to NewDest.
40void
41TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
42                                             MachineBasicBlock *NewDest) const {
43  MachineBasicBlock *MBB = Tail->getParent();
44
45  // Remove all the old successors of MBB from the CFG.
46  while (!MBB->succ_empty())
47    MBB->removeSuccessor(MBB->succ_begin());
48
49  // Remove all the dead instructions from the end of MBB.
50  MBB->erase(Tail, MBB->end());
51
52  // If MBB isn't immediately before MBB, insert a branch to it.
53  if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
54    InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
55                 Tail->getDebugLoc());
56  MBB->addSuccessor(NewDest);
57}
58
59// commuteInstruction - The default implementation of this method just exchanges
60// the two operands returned by findCommutedOpIndices.
61MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
62                                                      bool NewMI) const {
63  const MCInstrDesc &MCID = MI->getDesc();
64  bool HasDef = MCID.getNumDefs();
65  if (HasDef && !MI->getOperand(0).isReg())
66    // No idea how to commute this instruction. Target should implement its own.
67    return 0;
68  unsigned Idx1, Idx2;
69  if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
70    std::string msg;
71    raw_string_ostream Msg(msg);
72    Msg << "Don't know how to commute: " << *MI;
73    report_fatal_error(Msg.str());
74  }
75
76  assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
77         "This only knows how to commute register operands so far");
78  unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
79  unsigned Reg1 = MI->getOperand(Idx1).getReg();
80  unsigned Reg2 = MI->getOperand(Idx2).getReg();
81  unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
82  unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
83  unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
84  bool Reg1IsKill = MI->getOperand(Idx1).isKill();
85  bool Reg2IsKill = MI->getOperand(Idx2).isKill();
86  // If destination is tied to either of the commuted source register, then
87  // it must be updated.
88  if (HasDef && Reg0 == Reg1 &&
89      MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
90    Reg2IsKill = false;
91    Reg0 = Reg2;
92    SubReg0 = SubReg2;
93  } else if (HasDef && Reg0 == Reg2 &&
94             MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
95    Reg1IsKill = false;
96    Reg0 = Reg1;
97    SubReg0 = SubReg1;
98  }
99
100  if (NewMI) {
101    // Create a new instruction.
102    MachineFunction &MF = *MI->getParent()->getParent();
103    MI = MF.CloneMachineInstr(MI);
104  }
105
106  if (HasDef) {
107    MI->getOperand(0).setReg(Reg0);
108    MI->getOperand(0).setSubReg(SubReg0);
109  }
110  MI->getOperand(Idx2).setReg(Reg1);
111  MI->getOperand(Idx1).setReg(Reg2);
112  MI->getOperand(Idx2).setSubReg(SubReg1);
113  MI->getOperand(Idx1).setSubReg(SubReg2);
114  MI->getOperand(Idx2).setIsKill(Reg1IsKill);
115  MI->getOperand(Idx1).setIsKill(Reg2IsKill);
116  return MI;
117}
118
119/// findCommutedOpIndices - If specified MI is commutable, return the two
120/// operand indices that would swap value. Return true if the instruction
121/// is not in a form which this routine understands.
122bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
123                                                unsigned &SrcOpIdx1,
124                                                unsigned &SrcOpIdx2) const {
125  assert(!MI->isBundle() &&
126         "TargetInstrInfoImpl::findCommutedOpIndices() can't handle bundles");
127
128  const MCInstrDesc &MCID = MI->getDesc();
129  if (!MCID.isCommutable())
130    return false;
131  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
132  // is not true, then the target must implement this.
133  SrcOpIdx1 = MCID.getNumDefs();
134  SrcOpIdx2 = SrcOpIdx1 + 1;
135  if (!MI->getOperand(SrcOpIdx1).isReg() ||
136      !MI->getOperand(SrcOpIdx2).isReg())
137    // No idea.
138    return false;
139  return true;
140}
141
142
143bool
144TargetInstrInfoImpl::isUnpredicatedTerminator(const MachineInstr *MI) const {
145  if (!MI->isTerminator()) return false;
146
147  // Conditional branch is a special case.
148  if (MI->isBranch() && !MI->isBarrier())
149    return true;
150  if (!MI->isPredicable())
151    return true;
152  return !isPredicated(MI);
153}
154
155
156bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
157                            const SmallVectorImpl<MachineOperand> &Pred) const {
158  bool MadeChange = false;
159
160  assert(!MI->isBundle() &&
161         "TargetInstrInfoImpl::PredicateInstruction() can't handle bundles");
162
163  const MCInstrDesc &MCID = MI->getDesc();
164  if (!MI->isPredicable())
165    return false;
166
167  for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
168    if (MCID.OpInfo[i].isPredicate()) {
169      MachineOperand &MO = MI->getOperand(i);
170      if (MO.isReg()) {
171        MO.setReg(Pred[j].getReg());
172        MadeChange = true;
173      } else if (MO.isImm()) {
174        MO.setImm(Pred[j].getImm());
175        MadeChange = true;
176      } else if (MO.isMBB()) {
177        MO.setMBB(Pred[j].getMBB());
178        MadeChange = true;
179      }
180      ++j;
181    }
182  }
183  return MadeChange;
184}
185
186bool TargetInstrInfoImpl::hasLoadFromStackSlot(const MachineInstr *MI,
187                                        const MachineMemOperand *&MMO,
188                                        int &FrameIndex) const {
189  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
190         oe = MI->memoperands_end();
191       o != oe;
192       ++o) {
193    if ((*o)->isLoad() && (*o)->getValue())
194      if (const FixedStackPseudoSourceValue *Value =
195          dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
196        FrameIndex = Value->getFrameIndex();
197        MMO = *o;
198        return true;
199      }
200  }
201  return false;
202}
203
204bool TargetInstrInfoImpl::hasStoreToStackSlot(const MachineInstr *MI,
205                                       const MachineMemOperand *&MMO,
206                                       int &FrameIndex) const {
207  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
208         oe = MI->memoperands_end();
209       o != oe;
210       ++o) {
211    if ((*o)->isStore() && (*o)->getValue())
212      if (const FixedStackPseudoSourceValue *Value =
213          dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
214        FrameIndex = Value->getFrameIndex();
215        MMO = *o;
216        return true;
217      }
218  }
219  return false;
220}
221
222void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
223                                        MachineBasicBlock::iterator I,
224                                        unsigned DestReg,
225                                        unsigned SubIdx,
226                                        const MachineInstr *Orig,
227                                        const TargetRegisterInfo &TRI) const {
228  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
229  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
230  MBB.insert(I, MI);
231}
232
233bool
234TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
235                                      const MachineInstr *MI1,
236                                      const MachineRegisterInfo *MRI) const {
237  return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
238}
239
240MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
241                                             MachineFunction &MF) const {
242  assert(!Orig->isNotDuplicable() &&
243         "Instruction cannot be duplicated");
244  return MF.CloneMachineInstr(Orig);
245}
246
247// If the COPY instruction in MI can be folded to a stack operation, return
248// the register class to use.
249static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
250                                              unsigned FoldIdx) {
251  assert(MI->isCopy() && "MI must be a COPY instruction");
252  if (MI->getNumOperands() != 2)
253    return 0;
254  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
255
256  const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
257  const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
258
259  if (FoldOp.getSubReg() || LiveOp.getSubReg())
260    return 0;
261
262  unsigned FoldReg = FoldOp.getReg();
263  unsigned LiveReg = LiveOp.getReg();
264
265  assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
266         "Cannot fold physregs");
267
268  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
269  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
270
271  if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
272    return RC->contains(LiveOp.getReg()) ? RC : 0;
273
274  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
275    return RC;
276
277  // FIXME: Allow folding when register classes are memory compatible.
278  return 0;
279}
280
281bool TargetInstrInfoImpl::
282canFoldMemoryOperand(const MachineInstr *MI,
283                     const SmallVectorImpl<unsigned> &Ops) const {
284  return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
285}
286
287/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
288/// slot into the specified machine instruction for the specified operand(s).
289/// If this is possible, a new instruction is returned with the specified
290/// operand folded, otherwise NULL is returned. The client is responsible for
291/// removing the old instruction and adding the new one in the instruction
292/// stream.
293MachineInstr*
294TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
295                                   const SmallVectorImpl<unsigned> &Ops,
296                                   int FI) const {
297  unsigned Flags = 0;
298  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
299    if (MI->getOperand(Ops[i]).isDef())
300      Flags |= MachineMemOperand::MOStore;
301    else
302      Flags |= MachineMemOperand::MOLoad;
303
304  MachineBasicBlock *MBB = MI->getParent();
305  assert(MBB && "foldMemoryOperand needs an inserted instruction");
306  MachineFunction &MF = *MBB->getParent();
307
308  // Ask the target to do the actual folding.
309  if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
310    // Add a memory operand, foldMemoryOperandImpl doesn't do that.
311    assert((!(Flags & MachineMemOperand::MOStore) ||
312            NewMI->mayStore()) &&
313           "Folded a def to a non-store!");
314    assert((!(Flags & MachineMemOperand::MOLoad) ||
315            NewMI->mayLoad()) &&
316           "Folded a use to a non-load!");
317    const MachineFrameInfo &MFI = *MF.getFrameInfo();
318    assert(MFI.getObjectOffset(FI) != -1);
319    MachineMemOperand *MMO =
320      MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
321                              Flags, MFI.getObjectSize(FI),
322                              MFI.getObjectAlignment(FI));
323    NewMI->addMemOperand(MF, MMO);
324
325    // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
326    return MBB->insert(MI, NewMI);
327  }
328
329  // Straight COPY may fold as load/store.
330  if (!MI->isCopy() || Ops.size() != 1)
331    return 0;
332
333  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
334  if (!RC)
335    return 0;
336
337  const MachineOperand &MO = MI->getOperand(1-Ops[0]);
338  MachineBasicBlock::iterator Pos = MI;
339  const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
340
341  if (Flags == MachineMemOperand::MOStore)
342    storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
343  else
344    loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
345  return --Pos;
346}
347
348/// foldMemoryOperand - Same as the previous version except it allows folding
349/// of any load and store from / to any address, not just from a specific
350/// stack slot.
351MachineInstr*
352TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
353                                   const SmallVectorImpl<unsigned> &Ops,
354                                   MachineInstr* LoadMI) const {
355  assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
356#ifndef NDEBUG
357  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
358    assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
359#endif
360  MachineBasicBlock &MBB = *MI->getParent();
361  MachineFunction &MF = *MBB.getParent();
362
363  // Ask the target to do the actual folding.
364  MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
365  if (!NewMI) return 0;
366
367  NewMI = MBB.insert(MI, NewMI);
368
369  // Copy the memoperands from the load to the folded instruction.
370  NewMI->setMemRefs(LoadMI->memoperands_begin(),
371                    LoadMI->memoperands_end());
372
373  return NewMI;
374}
375
376bool TargetInstrInfo::
377isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
378                                         AliasAnalysis *AA) const {
379  const MachineFunction &MF = *MI->getParent()->getParent();
380  const MachineRegisterInfo &MRI = MF.getRegInfo();
381  const TargetMachine &TM = MF.getTarget();
382  const TargetInstrInfo &TII = *TM.getInstrInfo();
383
384  // Remat clients assume operand 0 is the defined register.
385  if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
386    return false;
387  unsigned DefReg = MI->getOperand(0).getReg();
388
389  // A sub-register definition can only be rematerialized if the instruction
390  // doesn't read the other parts of the register.  Otherwise it is really a
391  // read-modify-write operation on the full virtual register which cannot be
392  // moved safely.
393  if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
394      MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
395    return false;
396
397  // A load from a fixed stack slot can be rematerialized. This may be
398  // redundant with subsequent checks, but it's target-independent,
399  // simple, and a common case.
400  int FrameIdx = 0;
401  if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
402      MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
403    return true;
404
405  // Avoid instructions obviously unsafe for remat.
406  if (MI->isNotDuplicable() || MI->mayStore() ||
407      MI->hasUnmodeledSideEffects())
408    return false;
409
410  // Don't remat inline asm. We have no idea how expensive it is
411  // even if it's side effect free.
412  if (MI->isInlineAsm())
413    return false;
414
415  // Avoid instructions which load from potentially varying memory.
416  if (MI->mayLoad() && !MI->isInvariantLoad(AA))
417    return false;
418
419  // If any of the registers accessed are non-constant, conservatively assume
420  // the instruction is not rematerializable.
421  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
422    const MachineOperand &MO = MI->getOperand(i);
423    if (!MO.isReg()) continue;
424    unsigned Reg = MO.getReg();
425    if (Reg == 0)
426      continue;
427
428    // Check for a well-behaved physical register.
429    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
430      if (MO.isUse()) {
431        // If the physreg has no defs anywhere, it's just an ambient register
432        // and we can freely move its uses. Alternatively, if it's allocatable,
433        // it could get allocated to something with a def during allocation.
434        if (!MRI.isConstantPhysReg(Reg, MF))
435          return false;
436      } else {
437        // A physreg def. We can't remat it.
438        return false;
439      }
440      continue;
441    }
442
443    // Only allow one virtual-register def.  There may be multiple defs of the
444    // same virtual register, though.
445    if (MO.isDef() && Reg != DefReg)
446      return false;
447
448    // Don't allow any virtual-register uses. Rematting an instruction with
449    // virtual register uses would length the live ranges of the uses, which
450    // is not necessarily a good idea, certainly not "trivial".
451    if (MO.isUse())
452      return false;
453  }
454
455  // Everything checked out.
456  return true;
457}
458
459/// isSchedulingBoundary - Test if the given instruction should be
460/// considered a scheduling boundary. This primarily includes labels
461/// and terminators.
462bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
463                                               const MachineBasicBlock *MBB,
464                                               const MachineFunction &MF) const{
465  // Terminators and labels can't be scheduled around.
466  if (MI->isTerminator() || MI->isLabel())
467    return true;
468
469  // Don't attempt to schedule around any instruction that defines
470  // a stack-oriented pointer, as it's unlikely to be profitable. This
471  // saves compile time, because it doesn't require every single
472  // stack slot reference to depend on the instruction that does the
473  // modification.
474  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
475  if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
476    return true;
477
478  return false;
479}
480
481// Provide a global flag for disabling the PreRA hazard recognizer that targets
482// may choose to honor.
483bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
484  return !DisableHazardRecognizer;
485}
486
487// Default implementation of CreateTargetRAHazardRecognizer.
488ScheduleHazardRecognizer *TargetInstrInfoImpl::
489CreateTargetHazardRecognizer(const TargetMachine *TM,
490                             const ScheduleDAG *DAG) const {
491  // Dummy hazard recognizer allows all instructions to issue.
492  return new ScheduleHazardRecognizer();
493}
494
495// Default implementation of CreateTargetMIHazardRecognizer.
496ScheduleHazardRecognizer *TargetInstrInfoImpl::
497CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
498                               const ScheduleDAG *DAG) const {
499  return (ScheduleHazardRecognizer *)
500    new ScoreboardHazardRecognizer(II, DAG, "misched");
501}
502
503// Default implementation of CreateTargetPostRAHazardRecognizer.
504ScheduleHazardRecognizer *TargetInstrInfoImpl::
505CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
506                                   const ScheduleDAG *DAG) const {
507  return (ScheduleHazardRecognizer *)
508    new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
509}
510
511//===----------------------------------------------------------------------===//
512//  SelectionDAG latency interface.
513//===----------------------------------------------------------------------===//
514
515int
516TargetInstrInfoImpl::getOperandLatency(const InstrItineraryData *ItinData,
517                                       SDNode *DefNode, unsigned DefIdx,
518                                       SDNode *UseNode, unsigned UseIdx) const {
519  if (!ItinData || ItinData->isEmpty())
520    return -1;
521
522  if (!DefNode->isMachineOpcode())
523    return -1;
524
525  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
526  if (!UseNode->isMachineOpcode())
527    return ItinData->getOperandCycle(DefClass, DefIdx);
528  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
529  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
530}
531
532int TargetInstrInfoImpl::getInstrLatency(const InstrItineraryData *ItinData,
533                                         SDNode *N) const {
534  if (!ItinData || ItinData->isEmpty())
535    return 1;
536
537  if (!N->isMachineOpcode())
538    return 1;
539
540  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
541}
542
543//===----------------------------------------------------------------------===//
544//  MachineInstr latency interface.
545//===----------------------------------------------------------------------===//
546
547unsigned
548TargetInstrInfoImpl::getNumMicroOps(const InstrItineraryData *ItinData,
549                                    const MachineInstr *MI) const {
550  if (!ItinData || ItinData->isEmpty())
551    return 1;
552
553  unsigned Class = MI->getDesc().getSchedClass();
554  int UOps = ItinData->Itineraries[Class].NumMicroOps;
555  if (UOps >= 0)
556    return UOps;
557
558  // The # of u-ops is dynamically determined. The specific target should
559  // override this function to return the right number.
560  return 1;
561}
562
563/// Return the default expected latency for a def based on it's opcode.
564unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
565                                            const MachineInstr *DefMI) const {
566  if (DefMI->isTransient())
567    return 0;
568  if (DefMI->mayLoad())
569    return SchedModel->LoadLatency;
570  if (isHighLatencyDef(DefMI->getOpcode()))
571    return SchedModel->HighLatency;
572  return 1;
573}
574
575unsigned TargetInstrInfoImpl::
576getInstrLatency(const InstrItineraryData *ItinData,
577                const MachineInstr *MI,
578                unsigned *PredCost) const {
579  // Default to one cycle for no itinerary. However, an "empty" itinerary may
580  // still have a MinLatency property, which getStageLatency checks.
581  if (!ItinData)
582    return MI->mayLoad() ? 2 : 1;
583
584  return ItinData->getStageLatency(MI->getDesc().getSchedClass());
585}
586
587bool TargetInstrInfoImpl::hasLowDefLatency(const InstrItineraryData *ItinData,
588                                           const MachineInstr *DefMI,
589                                           unsigned DefIdx) const {
590  if (!ItinData || ItinData->isEmpty())
591    return false;
592
593  unsigned DefClass = DefMI->getDesc().getSchedClass();
594  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
595  return (DefCycle != -1 && DefCycle <= 1);
596}
597
598/// Both DefMI and UseMI must be valid.  By default, call directly to the
599/// itinerary. This may be overriden by the target.
600int TargetInstrInfoImpl::
601getOperandLatency(const InstrItineraryData *ItinData,
602                  const MachineInstr *DefMI, unsigned DefIdx,
603                  const MachineInstr *UseMI, unsigned UseIdx) const {
604  unsigned DefClass = DefMI->getDesc().getSchedClass();
605  unsigned UseClass = UseMI->getDesc().getSchedClass();
606  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
607}
608
609/// If we can determine the operand latency from the def only, without itinerary
610/// lookup, do so. Otherwise return -1.
611int TargetInstrInfo::computeDefOperandLatency(
612  const InstrItineraryData *ItinData,
613  const MachineInstr *DefMI, bool FindMin) const {
614
615  // Let the target hook getInstrLatency handle missing itineraries.
616  if (!ItinData)
617    return getInstrLatency(ItinData, DefMI);
618
619  // Return a latency based on the itinerary properties and defining instruction
620  // if possible. Some common subtargets don't require per-operand latency,
621  // especially for minimum latencies.
622  if (FindMin) {
623    // If MinLatency is valid, call getInstrLatency. This uses Stage latency if
624    // it exists before defaulting to MinLatency.
625    if (ItinData->SchedModel->MinLatency >= 0)
626      return getInstrLatency(ItinData, DefMI);
627
628    // If MinLatency is invalid, OperandLatency is interpreted as MinLatency.
629    // For empty itineraries, short-cirtuit the check and default to one cycle.
630    if (ItinData->isEmpty())
631      return 1;
632  }
633  else if(ItinData->isEmpty())
634    return defaultDefLatency(ItinData->SchedModel, DefMI);
635
636  // ...operand lookup required
637  return -1;
638}
639
640/// computeOperandLatency - Compute and return the latency of the given data
641/// dependent def and use when the operand indices are already known. UseMI may
642/// be NULL for an unknown use.
643///
644/// FindMin may be set to get the minimum vs. expected latency. Minimum
645/// latency is used for scheduling groups, while expected latency is for
646/// instruction cost and critical path.
647///
648/// Depending on the subtarget's itinerary properties, this may or may not need
649/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
650/// UseIdx to compute min latency.
651unsigned TargetInstrInfo::
652computeOperandLatency(const InstrItineraryData *ItinData,
653                      const MachineInstr *DefMI, unsigned DefIdx,
654                      const MachineInstr *UseMI, unsigned UseIdx,
655                      bool FindMin) const {
656
657  int DefLatency = computeDefOperandLatency(ItinData, DefMI, FindMin);
658  if (DefLatency >= 0)
659    return DefLatency;
660
661  assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
662
663  int OperLatency = 0;
664  if (UseMI)
665    OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
666  else {
667    unsigned DefClass = DefMI->getDesc().getSchedClass();
668    OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
669  }
670  if (OperLatency >= 0)
671    return OperLatency;
672
673  // No operand latency was found.
674  unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
675
676  // Expected latency is the max of the stage latency and itinerary props.
677  if (!FindMin)
678    InstrLatency = std::max(InstrLatency,
679                            defaultDefLatency(ItinData->SchedModel, DefMI));
680  return InstrLatency;
681}
682