SwitchLoweringUtils.cpp revision 360784
1//===- SwitchLoweringUtils.cpp - Switch Lowering --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains switch inst lowering optimizations and utilities for
10// codegen, so that it can be used for both SelectionDAG and GlobalISel.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/MachineJumpTableInfo.h"
15#include "llvm/CodeGen/SwitchLoweringUtils.h"
16
17using namespace llvm;
18using namespace SwitchCG;
19
20uint64_t SwitchCG::getJumpTableRange(const CaseClusterVector &Clusters,
21                                     unsigned First, unsigned Last) {
22  assert(Last >= First);
23  const APInt &LowCase = Clusters[First].Low->getValue();
24  const APInt &HighCase = Clusters[Last].High->getValue();
25  assert(LowCase.getBitWidth() == HighCase.getBitWidth());
26
27  // FIXME: A range of consecutive cases has 100% density, but only requires one
28  // comparison to lower. We should discriminate against such consecutive ranges
29  // in jump tables.
30  return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
31}
32
33uint64_t
34SwitchCG::getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
35                               unsigned First, unsigned Last) {
36  assert(Last >= First);
37  assert(TotalCases[Last] >= TotalCases[First]);
38  uint64_t NumCases =
39      TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
40  return NumCases;
41}
42
43void SwitchCG::SwitchLowering::findJumpTables(CaseClusterVector &Clusters,
44                                              const SwitchInst *SI,
45                                              MachineBasicBlock *DefaultMBB,
46                                              ProfileSummaryInfo *PSI,
47                                              BlockFrequencyInfo *BFI) {
48#ifndef NDEBUG
49  // Clusters must be non-empty, sorted, and only contain Range clusters.
50  assert(!Clusters.empty());
51  for (CaseCluster &C : Clusters)
52    assert(C.Kind == CC_Range);
53  for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
54    assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
55#endif
56
57  assert(TLI && "TLI not set!");
58  if (!TLI->areJTsAllowed(SI->getParent()->getParent()))
59    return;
60
61  const unsigned MinJumpTableEntries = TLI->getMinimumJumpTableEntries();
62  const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
63
64  // Bail if not enough cases.
65  const int64_t N = Clusters.size();
66  if (N < 2 || N < MinJumpTableEntries)
67    return;
68
69  // Accumulated number of cases in each cluster and those prior to it.
70  SmallVector<unsigned, 8> TotalCases(N);
71  for (unsigned i = 0; i < N; ++i) {
72    const APInt &Hi = Clusters[i].High->getValue();
73    const APInt &Lo = Clusters[i].Low->getValue();
74    TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
75    if (i != 0)
76      TotalCases[i] += TotalCases[i - 1];
77  }
78
79  uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
80  uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
81  assert(NumCases < UINT64_MAX / 100);
82  assert(Range >= NumCases);
83
84  // Cheap case: the whole range may be suitable for jump table.
85  if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
86    CaseCluster JTCluster;
87    if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
88      Clusters[0] = JTCluster;
89      Clusters.resize(1);
90      return;
91    }
92  }
93
94  // The algorithm below is not suitable for -O0.
95  if (TM->getOptLevel() == CodeGenOpt::None)
96    return;
97
98  // Split Clusters into minimum number of dense partitions. The algorithm uses
99  // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
100  // for the Case Statement'" (1994), but builds the MinPartitions array in
101  // reverse order to make it easier to reconstruct the partitions in ascending
102  // order. In the choice between two optimal partitionings, it picks the one
103  // which yields more jump tables.
104
105  // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
106  SmallVector<unsigned, 8> MinPartitions(N);
107  // LastElement[i] is the last element of the partition starting at i.
108  SmallVector<unsigned, 8> LastElement(N);
109  // PartitionsScore[i] is used to break ties when choosing between two
110  // partitionings resulting in the same number of partitions.
111  SmallVector<unsigned, 8> PartitionsScore(N);
112  // For PartitionsScore, a small number of comparisons is considered as good as
113  // a jump table and a single comparison is considered better than a jump
114  // table.
115  enum PartitionScores : unsigned {
116    NoTable = 0,
117    Table = 1,
118    FewCases = 1,
119    SingleCase = 2
120  };
121
122  // Base case: There is only one way to partition Clusters[N-1].
123  MinPartitions[N - 1] = 1;
124  LastElement[N - 1] = N - 1;
125  PartitionsScore[N - 1] = PartitionScores::SingleCase;
126
127  // Note: loop indexes are signed to avoid underflow.
128  for (int64_t i = N - 2; i >= 0; i--) {
129    // Find optimal partitioning of Clusters[i..N-1].
130    // Baseline: Put Clusters[i] into a partition on its own.
131    MinPartitions[i] = MinPartitions[i + 1] + 1;
132    LastElement[i] = i;
133    PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
134
135    // Search for a solution that results in fewer partitions.
136    for (int64_t j = N - 1; j > i; j--) {
137      // Try building a partition from Clusters[i..j].
138      Range = getJumpTableRange(Clusters, i, j);
139      NumCases = getJumpTableNumCases(TotalCases, i, j);
140      assert(NumCases < UINT64_MAX / 100);
141      assert(Range >= NumCases);
142
143      if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
144        unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
145        unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
146        int64_t NumEntries = j - i + 1;
147
148        if (NumEntries == 1)
149          Score += PartitionScores::SingleCase;
150        else if (NumEntries <= SmallNumberOfEntries)
151          Score += PartitionScores::FewCases;
152        else if (NumEntries >= MinJumpTableEntries)
153          Score += PartitionScores::Table;
154
155        // If this leads to fewer partitions, or to the same number of
156        // partitions with better score, it is a better partitioning.
157        if (NumPartitions < MinPartitions[i] ||
158            (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
159          MinPartitions[i] = NumPartitions;
160          LastElement[i] = j;
161          PartitionsScore[i] = Score;
162        }
163      }
164    }
165  }
166
167  // Iterate over the partitions, replacing some with jump tables in-place.
168  unsigned DstIndex = 0;
169  for (unsigned First = 0, Last; First < N; First = Last + 1) {
170    Last = LastElement[First];
171    assert(Last >= First);
172    assert(DstIndex <= First);
173    unsigned NumClusters = Last - First + 1;
174
175    CaseCluster JTCluster;
176    if (NumClusters >= MinJumpTableEntries &&
177        buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
178      Clusters[DstIndex++] = JTCluster;
179    } else {
180      for (unsigned I = First; I <= Last; ++I)
181        std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
182    }
183  }
184  Clusters.resize(DstIndex);
185}
186
187bool SwitchCG::SwitchLowering::buildJumpTable(const CaseClusterVector &Clusters,
188                                              unsigned First, unsigned Last,
189                                              const SwitchInst *SI,
190                                              MachineBasicBlock *DefaultMBB,
191                                              CaseCluster &JTCluster) {
192  assert(First <= Last);
193
194  auto Prob = BranchProbability::getZero();
195  unsigned NumCmps = 0;
196  std::vector<MachineBasicBlock*> Table;
197  DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
198
199  // Initialize probabilities in JTProbs.
200  for (unsigned I = First; I <= Last; ++I)
201    JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
202
203  for (unsigned I = First; I <= Last; ++I) {
204    assert(Clusters[I].Kind == CC_Range);
205    Prob += Clusters[I].Prob;
206    const APInt &Low = Clusters[I].Low->getValue();
207    const APInt &High = Clusters[I].High->getValue();
208    NumCmps += (Low == High) ? 1 : 2;
209    if (I != First) {
210      // Fill the gap between this and the previous cluster.
211      const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
212      assert(PreviousHigh.slt(Low));
213      uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
214      for (uint64_t J = 0; J < Gap; J++)
215        Table.push_back(DefaultMBB);
216    }
217    uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
218    for (uint64_t J = 0; J < ClusterSize; ++J)
219      Table.push_back(Clusters[I].MBB);
220    JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
221  }
222
223  unsigned NumDests = JTProbs.size();
224  if (TLI->isSuitableForBitTests(NumDests, NumCmps,
225                                 Clusters[First].Low->getValue(),
226                                 Clusters[Last].High->getValue(), *DL)) {
227    // Clusters[First..Last] should be lowered as bit tests instead.
228    return false;
229  }
230
231  // Create the MBB that will load from and jump through the table.
232  // Note: We create it here, but it's not inserted into the function yet.
233  MachineFunction *CurMF = FuncInfo.MF;
234  MachineBasicBlock *JumpTableMBB =
235      CurMF->CreateMachineBasicBlock(SI->getParent());
236
237  // Add successors. Note: use table order for determinism.
238  SmallPtrSet<MachineBasicBlock *, 8> Done;
239  for (MachineBasicBlock *Succ : Table) {
240    if (Done.count(Succ))
241      continue;
242    addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
243    Done.insert(Succ);
244  }
245  JumpTableMBB->normalizeSuccProbs();
246
247  unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI->getJumpTableEncoding())
248                     ->createJumpTableIndex(Table);
249
250  // Set up the jump table info.
251  JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
252  JumpTableHeader JTH(Clusters[First].Low->getValue(),
253                      Clusters[Last].High->getValue(), SI->getCondition(),
254                      nullptr, false);
255  JTCases.emplace_back(std::move(JTH), std::move(JT));
256
257  JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
258                                     JTCases.size() - 1, Prob);
259  return true;
260}
261
262void SwitchCG::SwitchLowering::findBitTestClusters(CaseClusterVector &Clusters,
263                                                   const SwitchInst *SI) {
264  // Partition Clusters into as few subsets as possible, where each subset has a
265  // range that fits in a machine word and has <= 3 unique destinations.
266
267#ifndef NDEBUG
268  // Clusters must be sorted and contain Range or JumpTable clusters.
269  assert(!Clusters.empty());
270  assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
271  for (const CaseCluster &C : Clusters)
272    assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
273  for (unsigned i = 1; i < Clusters.size(); ++i)
274    assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
275#endif
276
277  // The algorithm below is not suitable for -O0.
278  if (TM->getOptLevel() == CodeGenOpt::None)
279    return;
280
281  // If target does not have legal shift left, do not emit bit tests at all.
282  EVT PTy = TLI->getPointerTy(*DL);
283  if (!TLI->isOperationLegal(ISD::SHL, PTy))
284    return;
285
286  int BitWidth = PTy.getSizeInBits();
287  const int64_t N = Clusters.size();
288
289  // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
290  SmallVector<unsigned, 8> MinPartitions(N);
291  // LastElement[i] is the last element of the partition starting at i.
292  SmallVector<unsigned, 8> LastElement(N);
293
294  // FIXME: This might not be the best algorithm for finding bit test clusters.
295
296  // Base case: There is only one way to partition Clusters[N-1].
297  MinPartitions[N - 1] = 1;
298  LastElement[N - 1] = N - 1;
299
300  // Note: loop indexes are signed to avoid underflow.
301  for (int64_t i = N - 2; i >= 0; --i) {
302    // Find optimal partitioning of Clusters[i..N-1].
303    // Baseline: Put Clusters[i] into a partition on its own.
304    MinPartitions[i] = MinPartitions[i + 1] + 1;
305    LastElement[i] = i;
306
307    // Search for a solution that results in fewer partitions.
308    // Note: the search is limited by BitWidth, reducing time complexity.
309    for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
310      // Try building a partition from Clusters[i..j].
311
312      // Check the range.
313      if (!TLI->rangeFitsInWord(Clusters[i].Low->getValue(),
314                                Clusters[j].High->getValue(), *DL))
315        continue;
316
317      // Check nbr of destinations and cluster types.
318      // FIXME: This works, but doesn't seem very efficient.
319      bool RangesOnly = true;
320      BitVector Dests(FuncInfo.MF->getNumBlockIDs());
321      for (int64_t k = i; k <= j; k++) {
322        if (Clusters[k].Kind != CC_Range) {
323          RangesOnly = false;
324          break;
325        }
326        Dests.set(Clusters[k].MBB->getNumber());
327      }
328      if (!RangesOnly || Dests.count() > 3)
329        break;
330
331      // Check if it's a better partition.
332      unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
333      if (NumPartitions < MinPartitions[i]) {
334        // Found a better partition.
335        MinPartitions[i] = NumPartitions;
336        LastElement[i] = j;
337      }
338    }
339  }
340
341  // Iterate over the partitions, replacing with bit-test clusters in-place.
342  unsigned DstIndex = 0;
343  for (unsigned First = 0, Last; First < N; First = Last + 1) {
344    Last = LastElement[First];
345    assert(First <= Last);
346    assert(DstIndex <= First);
347
348    CaseCluster BitTestCluster;
349    if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
350      Clusters[DstIndex++] = BitTestCluster;
351    } else {
352      size_t NumClusters = Last - First + 1;
353      std::memmove(&Clusters[DstIndex], &Clusters[First],
354                   sizeof(Clusters[0]) * NumClusters);
355      DstIndex += NumClusters;
356    }
357  }
358  Clusters.resize(DstIndex);
359}
360
361bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector &Clusters,
362                                             unsigned First, unsigned Last,
363                                             const SwitchInst *SI,
364                                             CaseCluster &BTCluster) {
365  assert(First <= Last);
366  if (First == Last)
367    return false;
368
369  BitVector Dests(FuncInfo.MF->getNumBlockIDs());
370  unsigned NumCmps = 0;
371  for (int64_t I = First; I <= Last; ++I) {
372    assert(Clusters[I].Kind == CC_Range);
373    Dests.set(Clusters[I].MBB->getNumber());
374    NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
375  }
376  unsigned NumDests = Dests.count();
377
378  APInt Low = Clusters[First].Low->getValue();
379  APInt High = Clusters[Last].High->getValue();
380  assert(Low.slt(High));
381
382  if (!TLI->isSuitableForBitTests(NumDests, NumCmps, Low, High, *DL))
383    return false;
384
385  APInt LowBound;
386  APInt CmpRange;
387
388  const int BitWidth = TLI->getPointerTy(*DL).getSizeInBits();
389  assert(TLI->rangeFitsInWord(Low, High, *DL) &&
390         "Case range must fit in bit mask!");
391
392  // Check if the clusters cover a contiguous range such that no value in the
393  // range will jump to the default statement.
394  bool ContiguousRange = true;
395  for (int64_t I = First + 1; I <= Last; ++I) {
396    if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
397      ContiguousRange = false;
398      break;
399    }
400  }
401
402  if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
403    // Optimize the case where all the case values fit in a word without having
404    // to subtract minValue. In this case, we can optimize away the subtraction.
405    LowBound = APInt::getNullValue(Low.getBitWidth());
406    CmpRange = High;
407    ContiguousRange = false;
408  } else {
409    LowBound = Low;
410    CmpRange = High - Low;
411  }
412
413  CaseBitsVector CBV;
414  auto TotalProb = BranchProbability::getZero();
415  for (unsigned i = First; i <= Last; ++i) {
416    // Find the CaseBits for this destination.
417    unsigned j;
418    for (j = 0; j < CBV.size(); ++j)
419      if (CBV[j].BB == Clusters[i].MBB)
420        break;
421    if (j == CBV.size())
422      CBV.push_back(
423          CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
424    CaseBits *CB = &CBV[j];
425
426    // Update Mask, Bits and ExtraProb.
427    uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
428    uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
429    assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
430    CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
431    CB->Bits += Hi - Lo + 1;
432    CB->ExtraProb += Clusters[i].Prob;
433    TotalProb += Clusters[i].Prob;
434  }
435
436  BitTestInfo BTI;
437  llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
438    // Sort by probability first, number of bits second, bit mask third.
439    if (a.ExtraProb != b.ExtraProb)
440      return a.ExtraProb > b.ExtraProb;
441    if (a.Bits != b.Bits)
442      return a.Bits > b.Bits;
443    return a.Mask < b.Mask;
444  });
445
446  for (auto &CB : CBV) {
447    MachineBasicBlock *BitTestBB =
448        FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
449    BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
450  }
451  BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
452                            SI->getCondition(), -1U, MVT::Other, false,
453                            ContiguousRange, nullptr, nullptr, std::move(BTI),
454                            TotalProb);
455
456  BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
457                                    BitTestCases.size() - 1, TotalProb);
458  return true;
459}
460
461void SwitchCG::sortAndRangeify(CaseClusterVector &Clusters) {
462#ifndef NDEBUG
463  for (const CaseCluster &CC : Clusters)
464    assert(CC.Low == CC.High && "Input clusters must be single-case");
465#endif
466
467  llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
468    return a.Low->getValue().slt(b.Low->getValue());
469  });
470
471  // Merge adjacent clusters with the same destination.
472  const unsigned N = Clusters.size();
473  unsigned DstIndex = 0;
474  for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
475    CaseCluster &CC = Clusters[SrcIndex];
476    const ConstantInt *CaseVal = CC.Low;
477    MachineBasicBlock *Succ = CC.MBB;
478
479    if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
480        (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
481      // If this case has the same successor and is a neighbour, merge it into
482      // the previous cluster.
483      Clusters[DstIndex - 1].High = CaseVal;
484      Clusters[DstIndex - 1].Prob += CC.Prob;
485    } else {
486      std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
487                   sizeof(Clusters[SrcIndex]));
488    }
489  }
490  Clusters.resize(DstIndex);
491}
492