1//===- HexagonBlockRanges.cpp ---------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "HexagonBlockRanges.h"
10#include "HexagonInstrInfo.h"
11#include "HexagonSubtarget.h"
12#include "llvm/ADT/BitVector.h"
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/CodeGen/MachineBasicBlock.h"
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/CodeGen/MachineInstr.h"
17#include "llvm/CodeGen/MachineOperand.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/CodeGen/TargetRegisterInfo.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/raw_ostream.h"
23#include <algorithm>
24#include <cassert>
25#include <cstdint>
26#include <iterator>
27#include <map>
28#include <utility>
29
30using namespace llvm;
31
32#define DEBUG_TYPE "hbr"
33
34bool HexagonBlockRanges::IndexRange::overlaps(const IndexRange &A) const {
35  // If A contains start(), or "this" contains A.start(), then overlap.
36  IndexType S = start(), E = end(), AS = A.start(), AE = A.end();
37  if (AS == S)
38    return true;
39  bool SbAE = (S < AE) || (S == AE && A.TiedEnd);  // S-before-AE.
40  bool ASbE = (AS < E) || (AS == E && TiedEnd);    // AS-before-E.
41  if ((AS < S && SbAE) || (S < AS && ASbE))
42    return true;
43  // Otherwise no overlap.
44  return false;
45}
46
47bool HexagonBlockRanges::IndexRange::contains(const IndexRange &A) const {
48  if (start() <= A.start()) {
49    // Treat "None" in the range end as equal to the range start.
50    IndexType E = (end() != IndexType::None) ? end() : start();
51    IndexType AE = (A.end() != IndexType::None) ? A.end() : A.start();
52    if (AE <= E)
53      return true;
54  }
55  return false;
56}
57
58void HexagonBlockRanges::IndexRange::merge(const IndexRange &A) {
59  // Allow merging adjacent ranges.
60  assert(end() == A.start() || overlaps(A));
61  IndexType AS = A.start(), AE = A.end();
62  if (AS < start() || start() == IndexType::None)
63    setStart(AS);
64  if (end() < AE || end() == IndexType::None) {
65    setEnd(AE);
66    TiedEnd = A.TiedEnd;
67  } else {
68    if (end() == AE)
69      TiedEnd |= A.TiedEnd;
70  }
71  if (A.Fixed)
72    Fixed = true;
73}
74
75void HexagonBlockRanges::RangeList::include(const RangeList &RL) {
76  for (auto &R : RL)
77    if (!is_contained(*this, R))
78      push_back(R);
79}
80
81// Merge all overlapping ranges in the list, so that all that remains
82// is a list of disjoint ranges.
83void HexagonBlockRanges::RangeList::unionize(bool MergeAdjacent) {
84  if (empty())
85    return;
86
87  llvm::sort(*this);
88  iterator Iter = begin();
89
90  while (Iter != end()-1) {
91    iterator Next = std::next(Iter);
92    // If MergeAdjacent is true, merge ranges A and B, where A.end == B.start.
93    // This allows merging dead ranges, but is not valid for live ranges.
94    bool Merge = MergeAdjacent && (Iter->end() == Next->start());
95    if (Merge || Iter->overlaps(*Next)) {
96      Iter->merge(*Next);
97      erase(Next);
98      continue;
99    }
100    ++Iter;
101  }
102}
103
104// Compute a range A-B and add it to the list.
105void HexagonBlockRanges::RangeList::addsub(const IndexRange &A,
106      const IndexRange &B) {
107  // Exclusion of non-overlapping ranges makes some checks simpler
108  // later in this function.
109  if (!A.overlaps(B)) {
110    // A - B = A.
111    add(A);
112    return;
113  }
114
115  IndexType AS = A.start(), AE = A.end();
116  IndexType BS = B.start(), BE = B.end();
117
118  // If AE is None, then A is included in B, since A and B overlap.
119  // The result of subtraction if empty, so just return.
120  if (AE == IndexType::None)
121    return;
122
123  if (AS < BS) {
124    // A starts before B.
125    // AE cannot be None since A and B overlap.
126    assert(AE != IndexType::None);
127    // Add the part of A that extends on the "less" side of B.
128    add(AS, BS, A.Fixed, false);
129  }
130
131  if (BE < AE) {
132    // BE cannot be Exit here.
133    if (BE == IndexType::None)
134      add(BS, AE, A.Fixed, false);
135    else
136      add(BE, AE, A.Fixed, false);
137  }
138}
139
140// Subtract a given range from each element in the list.
141void HexagonBlockRanges::RangeList::subtract(const IndexRange &Range) {
142  // Cannot assume that the list is unionized (i.e. contains only non-
143  // overlapping ranges.
144  RangeList T;
145  for (iterator Next, I = begin(); I != end(); I = Next) {
146    IndexRange &Rg = *I;
147    if (Rg.overlaps(Range)) {
148      T.addsub(Rg, Range);
149      Next = this->erase(I);
150    } else {
151      Next = std::next(I);
152    }
153  }
154  include(T);
155}
156
157HexagonBlockRanges::InstrIndexMap::InstrIndexMap(MachineBasicBlock &B)
158    : Block(B) {
159  IndexType Idx = IndexType::First;
160  First = Idx;
161  for (auto &In : B) {
162    if (In.isDebugInstr())
163      continue;
164    assert(getIndex(&In) == IndexType::None && "Instruction already in map");
165    Map.insert(std::make_pair(Idx, &In));
166    ++Idx;
167  }
168  Last = B.empty() ? IndexType::None : unsigned(Idx)-1;
169}
170
171MachineInstr *HexagonBlockRanges::InstrIndexMap::getInstr(IndexType Idx) const {
172  auto F = Map.find(Idx);
173  return (F != Map.end()) ? F->second : nullptr;
174}
175
176HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getIndex(
177      MachineInstr *MI) const {
178  for (auto &I : Map)
179    if (I.second == MI)
180      return I.first;
181  return IndexType::None;
182}
183
184HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getPrevIndex(
185      IndexType Idx) const {
186  assert (Idx != IndexType::None);
187  if (Idx == IndexType::Entry)
188    return IndexType::None;
189  if (Idx == IndexType::Exit)
190    return Last;
191  if (Idx == First)
192    return IndexType::Entry;
193  return unsigned(Idx)-1;
194}
195
196HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getNextIndex(
197      IndexType Idx) const {
198  assert (Idx != IndexType::None);
199  if (Idx == IndexType::Entry)
200    return IndexType::First;
201  if (Idx == IndexType::Exit || Idx == Last)
202    return IndexType::None;
203  return unsigned(Idx)+1;
204}
205
206void HexagonBlockRanges::InstrIndexMap::replaceInstr(MachineInstr *OldMI,
207      MachineInstr *NewMI) {
208  for (auto &I : Map) {
209    if (I.second != OldMI)
210      continue;
211    if (NewMI != nullptr)
212      I.second = NewMI;
213    else
214      Map.erase(I.first);
215    break;
216  }
217}
218
219HexagonBlockRanges::HexagonBlockRanges(MachineFunction &mf)
220  : MF(mf), HST(mf.getSubtarget<HexagonSubtarget>()),
221    TII(*HST.getInstrInfo()), TRI(*HST.getRegisterInfo()),
222    Reserved(TRI.getReservedRegs(mf)) {
223  // Consider all non-allocatable registers as reserved.
224  for (const TargetRegisterClass *RC : TRI.regclasses()) {
225    if (RC->isAllocatable())
226      continue;
227    for (unsigned R : *RC)
228      Reserved[R] = true;
229  }
230}
231
232HexagonBlockRanges::RegisterSet HexagonBlockRanges::getLiveIns(
233      const MachineBasicBlock &B, const MachineRegisterInfo &MRI,
234      const TargetRegisterInfo &TRI) {
235  RegisterSet LiveIns;
236  RegisterSet Tmp;
237
238  for (auto I : B.liveins()) {
239    MCSubRegIndexIterator S(I.PhysReg, &TRI);
240    if (I.LaneMask.all() || (I.LaneMask.any() && !S.isValid())) {
241      Tmp.insert({I.PhysReg, 0});
242      continue;
243    }
244    for (; S.isValid(); ++S) {
245      unsigned SI = S.getSubRegIndex();
246      if ((I.LaneMask & TRI.getSubRegIndexLaneMask(SI)).any())
247        Tmp.insert({S.getSubReg(), 0});
248    }
249  }
250
251  for (auto R : Tmp) {
252    if (!Reserved[R.Reg])
253      LiveIns.insert(R);
254    for (auto S : expandToSubRegs(R, MRI, TRI))
255      if (!Reserved[S.Reg])
256        LiveIns.insert(S);
257  }
258  return LiveIns;
259}
260
261HexagonBlockRanges::RegisterSet HexagonBlockRanges::expandToSubRegs(
262      RegisterRef R, const MachineRegisterInfo &MRI,
263      const TargetRegisterInfo &TRI) {
264  RegisterSet SRs;
265
266  if (R.Sub != 0) {
267    SRs.insert(R);
268    return SRs;
269  }
270
271  if (Register::isPhysicalRegister(R.Reg)) {
272    MCSubRegIterator I(R.Reg, &TRI);
273    if (!I.isValid())
274      SRs.insert({R.Reg, 0});
275    for (; I.isValid(); ++I)
276      SRs.insert({*I, 0});
277  } else {
278    assert(R.Reg.isVirtual());
279    auto &RC = *MRI.getRegClass(R.Reg);
280    unsigned PReg = *RC.begin();
281    MCSubRegIndexIterator I(PReg, &TRI);
282    if (!I.isValid())
283      SRs.insert({R.Reg, 0});
284    for (; I.isValid(); ++I)
285      SRs.insert({R.Reg, I.getSubRegIndex()});
286  }
287  return SRs;
288}
289
290void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
291      RegToRangeMap &LiveMap) {
292  std::map<RegisterRef,IndexType> LastDef, LastUse;
293  RegisterSet LiveOnEntry;
294  MachineBasicBlock &B = IndexMap.getBlock();
295  MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
296
297  for (auto R : getLiveIns(B, MRI, TRI))
298    LiveOnEntry.insert(R);
299
300  for (auto R : LiveOnEntry)
301    LastDef[R] = IndexType::Entry;
302
303  auto closeRange = [&LastUse,&LastDef,&LiveMap] (RegisterRef R) -> void {
304    auto LD = LastDef[R], LU = LastUse[R];
305    if (LD == IndexType::None)
306      LD = IndexType::Entry;
307    if (LU == IndexType::None)
308      LU = IndexType::Exit;
309    LiveMap[R].add(LD, LU, false, false);
310    LastUse[R] = LastDef[R] = IndexType::None;
311  };
312
313  RegisterSet Defs, Clobbers;
314
315  for (auto &In : B) {
316    if (In.isDebugInstr())
317      continue;
318    IndexType Index = IndexMap.getIndex(&In);
319    // Process uses first.
320    for (auto &Op : In.operands()) {
321      if (!Op.isReg() || !Op.isUse() || Op.isUndef())
322        continue;
323      RegisterRef R = { Op.getReg(), Op.getSubReg() };
324      if (Register::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
325        continue;
326      bool IsKill = Op.isKill();
327      for (auto S : expandToSubRegs(R, MRI, TRI)) {
328        LastUse[S] = Index;
329        if (IsKill)
330          closeRange(S);
331      }
332    }
333    // Process defs and clobbers.
334    Defs.clear();
335    Clobbers.clear();
336    for (auto &Op : In.operands()) {
337      if (!Op.isReg() || !Op.isDef() || Op.isUndef())
338        continue;
339      RegisterRef R = { Op.getReg(), Op.getSubReg() };
340      for (auto S : expandToSubRegs(R, MRI, TRI)) {
341        if (Register::isPhysicalRegister(S.Reg) && Reserved[S.Reg])
342          continue;
343        if (Op.isDead())
344          Clobbers.insert(S);
345        else
346          Defs.insert(S);
347      }
348    }
349
350    for (auto &Op : In.operands()) {
351      if (!Op.isRegMask())
352        continue;
353      const uint32_t *BM = Op.getRegMask();
354      for (unsigned PR = 1, N = TRI.getNumRegs(); PR != N; ++PR) {
355        // Skip registers that have subregisters. A register is preserved
356        // iff its bit is set in the regmask, so if R1:0 was preserved, both
357        // R1 and R0 would also be present.
358        if (MCSubRegIterator(PR, &TRI, false).isValid())
359          continue;
360        if (Reserved[PR])
361          continue;
362        if (BM[PR/32] & (1u << (PR%32)))
363          continue;
364        RegisterRef R = { PR, 0 };
365        if (!Defs.count(R))
366          Clobbers.insert(R);
367      }
368    }
369    // Defs and clobbers can overlap, e.g.
370    // dead %d0 = COPY %5, implicit-def %r0, implicit-def %r1
371    for (RegisterRef R : Defs)
372      Clobbers.erase(R);
373
374    // Update maps for defs.
375    for (RegisterRef S : Defs) {
376      // Defs should already be expanded into subregs.
377      assert(!Register::isPhysicalRegister(S.Reg) ||
378             !MCSubRegIterator(S.Reg, &TRI, false).isValid());
379      if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
380        closeRange(S);
381      LastDef[S] = Index;
382    }
383    // Update maps for clobbers.
384    for (RegisterRef S : Clobbers) {
385      // Clobbers should already be expanded into subregs.
386      assert(!Register::isPhysicalRegister(S.Reg) ||
387             !MCSubRegIterator(S.Reg, &TRI, false).isValid());
388      if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
389        closeRange(S);
390      // Create a single-instruction range.
391      LastDef[S] = LastUse[S] = Index;
392      closeRange(S);
393    }
394  }
395
396  // Collect live-on-exit.
397  RegisterSet LiveOnExit;
398  for (auto *SB : B.successors())
399    for (auto R : getLiveIns(*SB, MRI, TRI))
400      LiveOnExit.insert(R);
401
402  for (auto R : LiveOnExit)
403    LastUse[R] = IndexType::Exit;
404
405  // Process remaining registers.
406  RegisterSet Left;
407  for (auto &I : LastUse)
408    if (I.second != IndexType::None)
409      Left.insert(I.first);
410  for (auto &I : LastDef)
411    if (I.second != IndexType::None)
412      Left.insert(I.first);
413  for (auto R : Left)
414    closeRange(R);
415
416  // Finalize the live ranges.
417  for (auto &P : LiveMap)
418    P.second.unionize();
419}
420
421HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeLiveMap(
422      InstrIndexMap &IndexMap) {
423  RegToRangeMap LiveMap;
424  LLVM_DEBUG(dbgs() << __func__ << ": index map\n" << IndexMap << '\n');
425  computeInitialLiveRanges(IndexMap, LiveMap);
426  LLVM_DEBUG(dbgs() << __func__ << ": live map\n"
427                    << PrintRangeMap(LiveMap, TRI) << '\n');
428  return LiveMap;
429}
430
431HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeDeadMap(
432      InstrIndexMap &IndexMap, RegToRangeMap &LiveMap) {
433  RegToRangeMap DeadMap;
434
435  auto addDeadRanges = [&IndexMap,&LiveMap,&DeadMap] (RegisterRef R) -> void {
436    auto F = LiveMap.find(R);
437    if (F == LiveMap.end() || F->second.empty()) {
438      DeadMap[R].add(IndexType::Entry, IndexType::Exit, false, false);
439      return;
440    }
441
442    RangeList &RL = F->second;
443    RangeList::iterator A = RL.begin(), Z = RL.end()-1;
444
445    // Try to create the initial range.
446    if (A->start() != IndexType::Entry) {
447      IndexType DE = IndexMap.getPrevIndex(A->start());
448      if (DE != IndexType::Entry)
449        DeadMap[R].add(IndexType::Entry, DE, false, false);
450    }
451
452    while (A != Z) {
453      // Creating a dead range that follows A.  Pay attention to empty
454      // ranges (i.e. those ending with "None").
455      IndexType AE = (A->end() == IndexType::None) ? A->start() : A->end();
456      IndexType DS = IndexMap.getNextIndex(AE);
457      ++A;
458      IndexType DE = IndexMap.getPrevIndex(A->start());
459      if (DS < DE)
460        DeadMap[R].add(DS, DE, false, false);
461    }
462
463    // Try to create the final range.
464    if (Z->end() != IndexType::Exit) {
465      IndexType ZE = (Z->end() == IndexType::None) ? Z->start() : Z->end();
466      IndexType DS = IndexMap.getNextIndex(ZE);
467      if (DS < IndexType::Exit)
468        DeadMap[R].add(DS, IndexType::Exit, false, false);
469    }
470  };
471
472  MachineFunction &MF = *IndexMap.getBlock().getParent();
473  auto &MRI = MF.getRegInfo();
474  unsigned NumRegs = TRI.getNumRegs();
475  BitVector Visited(NumRegs);
476  for (unsigned R = 1; R < NumRegs; ++R) {
477    for (auto S : expandToSubRegs({R,0}, MRI, TRI)) {
478      if (Reserved[S.Reg] || Visited[S.Reg])
479        continue;
480      addDeadRanges(S);
481      Visited[S.Reg] = true;
482    }
483  }
484  for (auto &P : LiveMap)
485    if (P.first.Reg.isVirtual())
486      addDeadRanges(P.first);
487
488  LLVM_DEBUG(dbgs() << __func__ << ": dead map\n"
489                    << PrintRangeMap(DeadMap, TRI) << '\n');
490  return DeadMap;
491}
492
493raw_ostream &llvm::operator<<(raw_ostream &OS,
494                              HexagonBlockRanges::IndexType Idx) {
495  if (Idx == HexagonBlockRanges::IndexType::None)
496    return OS << '-';
497  if (Idx == HexagonBlockRanges::IndexType::Entry)
498    return OS << 'n';
499  if (Idx == HexagonBlockRanges::IndexType::Exit)
500    return OS << 'x';
501  return OS << unsigned(Idx)-HexagonBlockRanges::IndexType::First+1;
502}
503
504// A mapping to translate between instructions and their indices.
505raw_ostream &llvm::operator<<(raw_ostream &OS,
506                              const HexagonBlockRanges::IndexRange &IR) {
507  OS << '[' << IR.start() << ':' << IR.end() << (IR.TiedEnd ? '}' : ']');
508  if (IR.Fixed)
509    OS << '!';
510  return OS;
511}
512
513raw_ostream &llvm::operator<<(raw_ostream &OS,
514                              const HexagonBlockRanges::RangeList &RL) {
515  for (auto &R : RL)
516    OS << R << " ";
517  return OS;
518}
519
520raw_ostream &llvm::operator<<(raw_ostream &OS,
521                              const HexagonBlockRanges::InstrIndexMap &M) {
522  for (auto &In : M.Block) {
523    HexagonBlockRanges::IndexType Idx = M.getIndex(&In);
524    OS << Idx << (Idx == M.Last ? ". " : "  ") << In;
525  }
526  return OS;
527}
528
529raw_ostream &llvm::operator<<(raw_ostream &OS,
530                              const HexagonBlockRanges::PrintRangeMap &P) {
531  for (auto &I : P.Map) {
532    const HexagonBlockRanges::RangeList &RL = I.second;
533    OS << printReg(I.first.Reg, &P.TRI, I.first.Sub) << " -> " << RL << "\n";
534  }
535  return OS;
536}
537