PeepholeOptimizer.cpp revision 243830
1//===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Perform peephole optimizations on the machine code:
11//
12// - Optimize Extensions
13//
14//     Optimization of sign / zero extension instructions. It may be extended to
15//     handle other instructions with similar properties.
16//
17//     On some targets, some instructions, e.g. X86 sign / zero extension, may
18//     leave the source value in the lower part of the result. This optimization
19//     will replace some uses of the pre-extension value with uses of the
20//     sub-register of the results.
21//
22// - Optimize Comparisons
23//
24//     Optimization of comparison instructions. For instance, in this code:
25//
26//       sub r1, 1
27//       cmp r1, 0
28//       bz  L1
29//
30//     If the "sub" instruction all ready sets (or could be modified to set) the
31//     same flag that the "cmp" instruction sets and that "bz" uses, then we can
32//     eliminate the "cmp" instruction.
33//
34//     Another instance, in this code:
35//
36//       sub r1, r3 | sub r1, imm
37//       cmp r3, r1 or cmp r1, r3 | cmp r1, imm
38//       bge L1
39//
40//     If the branch instruction can use flag from "sub", then we can replace
41//     "sub" with "subs" and eliminate the "cmp" instruction.
42//
43// - Optimize Bitcast pairs:
44//
45//     v1 = bitcast v0
46//     v2 = bitcast v1
47//        = v2
48//   =>
49//     v1 = bitcast v0
50//        = v0
51//
52//===----------------------------------------------------------------------===//
53
54#define DEBUG_TYPE "peephole-opt"
55#include "llvm/CodeGen/Passes.h"
56#include "llvm/CodeGen/MachineDominators.h"
57#include "llvm/CodeGen/MachineInstrBuilder.h"
58#include "llvm/CodeGen/MachineRegisterInfo.h"
59#include "llvm/Target/TargetInstrInfo.h"
60#include "llvm/Target/TargetRegisterInfo.h"
61#include "llvm/Support/CommandLine.h"
62#include "llvm/ADT/DenseMap.h"
63#include "llvm/ADT/SmallPtrSet.h"
64#include "llvm/ADT/SmallSet.h"
65#include "llvm/ADT/Statistic.h"
66using namespace llvm;
67
68// Optimize Extensions
69static cl::opt<bool>
70Aggressive("aggressive-ext-opt", cl::Hidden,
71           cl::desc("Aggressive extension optimization"));
72
73static cl::opt<bool>
74DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
75                cl::desc("Disable the peephole optimizer"));
76
77STATISTIC(NumReuse,      "Number of extension results reused");
78STATISTIC(NumBitcasts,   "Number of bitcasts eliminated");
79STATISTIC(NumCmps,       "Number of compares eliminated");
80STATISTIC(NumImmFold,    "Number of move immediate folded");
81STATISTIC(NumLoadFold,   "Number of loads folded");
82STATISTIC(NumSelects,    "Number of selects optimized");
83
84namespace {
85  class PeepholeOptimizer : public MachineFunctionPass {
86    const TargetMachine   *TM;
87    const TargetInstrInfo *TII;
88    MachineRegisterInfo   *MRI;
89    MachineDominatorTree  *DT;  // Machine dominator tree
90
91  public:
92    static char ID; // Pass identification
93    PeepholeOptimizer() : MachineFunctionPass(ID) {
94      initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
95    }
96
97    virtual bool runOnMachineFunction(MachineFunction &MF);
98
99    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
100      AU.setPreservesCFG();
101      MachineFunctionPass::getAnalysisUsage(AU);
102      if (Aggressive) {
103        AU.addRequired<MachineDominatorTree>();
104        AU.addPreserved<MachineDominatorTree>();
105      }
106    }
107
108  private:
109    bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
110    bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
111    bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
112                          SmallPtrSet<MachineInstr*, 8> &LocalMIs);
113    bool optimizeSelect(MachineInstr *MI);
114    bool isMoveImmediate(MachineInstr *MI,
115                         SmallSet<unsigned, 4> &ImmDefRegs,
116                         DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
117    bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
118                       SmallSet<unsigned, 4> &ImmDefRegs,
119                       DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
120    bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg);
121  };
122}
123
124char PeepholeOptimizer::ID = 0;
125char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
126INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
127                "Peephole Optimizations", false, false)
128INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
129INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
130                "Peephole Optimizations", false, false)
131
132/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
133/// a single register and writes a single register and it does not modify the
134/// source, and if the source value is preserved as a sub-register of the
135/// result, then replace all reachable uses of the source with the subreg of the
136/// result.
137///
138/// Do not generate an EXTRACT that is used only in a debug use, as this changes
139/// the code. Since this code does not currently share EXTRACTs, just ignore all
140/// debug uses.
141bool PeepholeOptimizer::
142optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
143                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
144  unsigned SrcReg, DstReg, SubIdx;
145  if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
146    return false;
147
148  if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
149      TargetRegisterInfo::isPhysicalRegister(SrcReg))
150    return false;
151
152  if (MRI->hasOneNonDBGUse(SrcReg))
153    // No other uses.
154    return false;
155
156  // Ensure DstReg can get a register class that actually supports
157  // sub-registers. Don't change the class until we commit.
158  const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
159  DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
160  if (!DstRC)
161    return false;
162
163  // The ext instr may be operating on a sub-register of SrcReg as well.
164  // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
165  // register.
166  // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
167  // SrcReg:SubIdx should be replaced.
168  bool UseSrcSubIdx = TM->getRegisterInfo()->
169    getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;
170
171  // The source has other uses. See if we can replace the other uses with use of
172  // the result of the extension.
173  SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
174  for (MachineRegisterInfo::use_nodbg_iterator
175       UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
176       UI != UE; ++UI)
177    ReachedBBs.insert(UI->getParent());
178
179  // Uses that are in the same BB of uses of the result of the instruction.
180  SmallVector<MachineOperand*, 8> Uses;
181
182  // Uses that the result of the instruction can reach.
183  SmallVector<MachineOperand*, 8> ExtendedUses;
184
185  bool ExtendLife = true;
186  for (MachineRegisterInfo::use_nodbg_iterator
187       UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
188       UI != UE; ++UI) {
189    MachineOperand &UseMO = UI.getOperand();
190    MachineInstr *UseMI = &*UI;
191    if (UseMI == MI)
192      continue;
193
194    if (UseMI->isPHI()) {
195      ExtendLife = false;
196      continue;
197    }
198
199    // Only accept uses of SrcReg:SubIdx.
200    if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
201      continue;
202
203    // It's an error to translate this:
204    //
205    //    %reg1025 = <sext> %reg1024
206    //     ...
207    //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
208    //
209    // into this:
210    //
211    //    %reg1025 = <sext> %reg1024
212    //     ...
213    //    %reg1027 = COPY %reg1025:4
214    //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
215    //
216    // The problem here is that SUBREG_TO_REG is there to assert that an
217    // implicit zext occurs. It doesn't insert a zext instruction. If we allow
218    // the COPY here, it will give us the value after the <sext>, not the
219    // original value of %reg1024 before <sext>.
220    if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
221      continue;
222
223    MachineBasicBlock *UseMBB = UseMI->getParent();
224    if (UseMBB == MBB) {
225      // Local uses that come after the extension.
226      if (!LocalMIs.count(UseMI))
227        Uses.push_back(&UseMO);
228    } else if (ReachedBBs.count(UseMBB)) {
229      // Non-local uses where the result of the extension is used. Always
230      // replace these unless it's a PHI.
231      Uses.push_back(&UseMO);
232    } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
233      // We may want to extend the live range of the extension result in order
234      // to replace these uses.
235      ExtendedUses.push_back(&UseMO);
236    } else {
237      // Both will be live out of the def MBB anyway. Don't extend live range of
238      // the extension result.
239      ExtendLife = false;
240      break;
241    }
242  }
243
244  if (ExtendLife && !ExtendedUses.empty())
245    // Extend the liveness of the extension result.
246    std::copy(ExtendedUses.begin(), ExtendedUses.end(),
247              std::back_inserter(Uses));
248
249  // Now replace all uses.
250  bool Changed = false;
251  if (!Uses.empty()) {
252    SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
253
254    // Look for PHI uses of the extended result, we don't want to extend the
255    // liveness of a PHI input. It breaks all kinds of assumptions down
256    // stream. A PHI use is expected to be the kill of its source values.
257    for (MachineRegisterInfo::use_nodbg_iterator
258         UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
259         UI != UE; ++UI)
260      if (UI->isPHI())
261        PHIBBs.insert(UI->getParent());
262
263    const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
264    for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
265      MachineOperand *UseMO = Uses[i];
266      MachineInstr *UseMI = UseMO->getParent();
267      MachineBasicBlock *UseMBB = UseMI->getParent();
268      if (PHIBBs.count(UseMBB))
269        continue;
270
271      // About to add uses of DstReg, clear DstReg's kill flags.
272      if (!Changed) {
273        MRI->clearKillFlags(DstReg);
274        MRI->constrainRegClass(DstReg, DstRC);
275      }
276
277      unsigned NewVR = MRI->createVirtualRegister(RC);
278      MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
279                                   TII->get(TargetOpcode::COPY), NewVR)
280        .addReg(DstReg, 0, SubIdx);
281      // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
282      if (UseSrcSubIdx) {
283        Copy->getOperand(0).setSubReg(SubIdx);
284        Copy->getOperand(0).setIsUndef();
285      }
286      UseMO->setReg(NewVR);
287      ++NumReuse;
288      Changed = true;
289    }
290  }
291
292  return Changed;
293}
294
295/// optimizeBitcastInstr - If the instruction is a bitcast instruction A that
296/// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast
297/// a value cross register classes), and the source is defined by another
298/// bitcast instruction B. And if the register class of source of B matches
299/// the register class of instruction A, then it is legal to replace all uses
300/// of the def of A with source of B. e.g.
301///   %vreg0<def> = VMOVSR %vreg1
302///   %vreg3<def> = VMOVRS %vreg0
303///   Replace all uses of vreg3 with vreg1.
304
305bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI,
306                                             MachineBasicBlock *MBB) {
307  unsigned NumDefs = MI->getDesc().getNumDefs();
308  unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs;
309  if (NumDefs != 1)
310    return false;
311
312  unsigned Def = 0;
313  unsigned Src = 0;
314  for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
315    const MachineOperand &MO = MI->getOperand(i);
316    if (!MO.isReg())
317      continue;
318    unsigned Reg = MO.getReg();
319    if (!Reg)
320      continue;
321    if (MO.isDef())
322      Def = Reg;
323    else if (Src)
324      // Multiple sources?
325      return false;
326    else
327      Src = Reg;
328  }
329
330  assert(Def && Src && "Malformed bitcast instruction!");
331
332  MachineInstr *DefMI = MRI->getVRegDef(Src);
333  if (!DefMI || !DefMI->isBitcast())
334    return false;
335
336  unsigned SrcSrc = 0;
337  NumDefs = DefMI->getDesc().getNumDefs();
338  NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs;
339  if (NumDefs != 1)
340    return false;
341  for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
342    const MachineOperand &MO = DefMI->getOperand(i);
343    if (!MO.isReg() || MO.isDef())
344      continue;
345    unsigned Reg = MO.getReg();
346    if (!Reg)
347      continue;
348    if (!MO.isDef()) {
349      if (SrcSrc)
350        // Multiple sources?
351        return false;
352      else
353        SrcSrc = Reg;
354    }
355  }
356
357  if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def))
358    return false;
359
360  MRI->replaceRegWith(Def, SrcSrc);
361  MRI->clearKillFlags(SrcSrc);
362  MI->eraseFromParent();
363  ++NumBitcasts;
364  return true;
365}
366
367/// optimizeCmpInstr - If the instruction is a compare and the previous
368/// instruction it's comparing against all ready sets (or could be modified to
369/// set) the same flag as the compare, then we can remove the comparison and use
370/// the flag from the previous instruction.
371bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
372                                         MachineBasicBlock *MBB) {
373  // If this instruction is a comparison against zero and isn't comparing a
374  // physical register, we can try to optimize it.
375  unsigned SrcReg, SrcReg2;
376  int CmpMask, CmpValue;
377  if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
378      TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
379      (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
380    return false;
381
382  // Attempt to optimize the comparison instruction.
383  if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
384    ++NumCmps;
385    return true;
386  }
387
388  return false;
389}
390
391/// Optimize a select instruction.
392bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) {
393  unsigned TrueOp = 0;
394  unsigned FalseOp = 0;
395  bool Optimizable = false;
396  SmallVector<MachineOperand, 4> Cond;
397  if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable))
398    return false;
399  if (!Optimizable)
400    return false;
401  if (!TII->optimizeSelect(MI))
402    return false;
403  MI->eraseFromParent();
404  ++NumSelects;
405  return true;
406}
407
408/// isLoadFoldable - Check whether MI is a candidate for folding into a later
409/// instruction. We only fold loads to virtual registers and the virtual
410/// register defined has a single use.
411bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
412                                       unsigned &FoldAsLoadDefReg) {
413  if (!MI->canFoldAsLoad() || !MI->mayLoad())
414    return false;
415  const MCInstrDesc &MCID = MI->getDesc();
416  if (MCID.getNumDefs() != 1)
417    return false;
418
419  unsigned Reg = MI->getOperand(0).getReg();
420  // To reduce compilation time, we check MRI->hasOneUse when inserting
421  // loads. It should be checked when processing uses of the load, since
422  // uses can be removed during peephole.
423  if (!MI->getOperand(0).getSubReg() &&
424      TargetRegisterInfo::isVirtualRegister(Reg) &&
425      MRI->hasOneUse(Reg)) {
426    FoldAsLoadDefReg = Reg;
427    return true;
428  }
429  return false;
430}
431
432bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
433                                        SmallSet<unsigned, 4> &ImmDefRegs,
434                                 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
435  const MCInstrDesc &MCID = MI->getDesc();
436  if (!MI->isMoveImmediate())
437    return false;
438  if (MCID.getNumDefs() != 1)
439    return false;
440  unsigned Reg = MI->getOperand(0).getReg();
441  if (TargetRegisterInfo::isVirtualRegister(Reg)) {
442    ImmDefMIs.insert(std::make_pair(Reg, MI));
443    ImmDefRegs.insert(Reg);
444    return true;
445  }
446
447  return false;
448}
449
450/// foldImmediate - Try folding register operands that are defined by move
451/// immediate instructions, i.e. a trivial constant folding optimization, if
452/// and only if the def and use are in the same BB.
453bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
454                                      SmallSet<unsigned, 4> &ImmDefRegs,
455                                 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
456  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
457    MachineOperand &MO = MI->getOperand(i);
458    if (!MO.isReg() || MO.isDef())
459      continue;
460    unsigned Reg = MO.getReg();
461    if (!TargetRegisterInfo::isVirtualRegister(Reg))
462      continue;
463    if (ImmDefRegs.count(Reg) == 0)
464      continue;
465    DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
466    assert(II != ImmDefMIs.end());
467    if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
468      ++NumImmFold;
469      return true;
470    }
471  }
472  return false;
473}
474
475bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
476  if (DisablePeephole)
477    return false;
478
479  TM  = &MF.getTarget();
480  TII = TM->getInstrInfo();
481  MRI = &MF.getRegInfo();
482  DT  = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
483
484  bool Changed = false;
485
486  SmallPtrSet<MachineInstr*, 8> LocalMIs;
487  SmallSet<unsigned, 4> ImmDefRegs;
488  DenseMap<unsigned, MachineInstr*> ImmDefMIs;
489  unsigned FoldAsLoadDefReg;
490  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
491    MachineBasicBlock *MBB = &*I;
492
493    bool SeenMoveImm = false;
494    LocalMIs.clear();
495    ImmDefRegs.clear();
496    ImmDefMIs.clear();
497    FoldAsLoadDefReg = 0;
498
499    for (MachineBasicBlock::iterator
500           MII = I->begin(), MIE = I->end(); MII != MIE; ) {
501      MachineInstr *MI = &*MII;
502      // We may be erasing MI below, increment MII now.
503      ++MII;
504      LocalMIs.insert(MI);
505
506      // If there exists an instruction which belongs to the following
507      // categories, we will discard the load candidate.
508      if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
509          MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
510          MI->hasUnmodeledSideEffects()) {
511        FoldAsLoadDefReg = 0;
512        continue;
513      }
514      if (MI->mayStore() || MI->isCall())
515        FoldAsLoadDefReg = 0;
516
517      if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) ||
518          (MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
519          (MI->isSelect() && optimizeSelect(MI))) {
520        // MI is deleted.
521        LocalMIs.erase(MI);
522        Changed = true;
523        continue;
524      }
525
526      if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
527        SeenMoveImm = true;
528      } else {
529        Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
530        // optimizeExtInstr might have created new instructions after MI
531        // and before the already incremented MII. Adjust MII so that the
532        // next iteration sees the new instructions.
533        MII = MI;
534        ++MII;
535        if (SeenMoveImm)
536          Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
537      }
538
539      // Check whether MI is a load candidate for folding into a later
540      // instruction. If MI is not a candidate, check whether we can fold an
541      // earlier load into MI.
542      if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
543        // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
544        // can enable folding by converting SUB to CMP.
545        MachineInstr *DefMI = 0;
546        MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
547                                                      FoldAsLoadDefReg, DefMI);
548        if (FoldMI) {
549          // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
550          LocalMIs.erase(MI);
551          LocalMIs.erase(DefMI);
552          LocalMIs.insert(FoldMI);
553          MI->eraseFromParent();
554          DefMI->eraseFromParent();
555          ++NumLoadFold;
556
557          // MI is replaced with FoldMI.
558          Changed = true;
559          continue;
560        }
561      }
562    }
563  }
564
565  return Changed;
566}
567