1//===- SwitchLoweringUtils.cpp - Switch Lowering --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains switch inst lowering optimizations and utilities for
10// codegen, so that it can be used for both SelectionDAG and GlobalISel.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/SwitchLoweringUtils.h"
15#include "llvm/CodeGen/FunctionLoweringInfo.h"
16#include "llvm/CodeGen/MachineJumpTableInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/Target/TargetMachine.h"
19
20using namespace llvm;
21using namespace SwitchCG;
22
23uint64_t SwitchCG::getJumpTableRange(const CaseClusterVector &Clusters,
24                                     unsigned First, unsigned Last) {
25  assert(Last >= First);
26  const APInt &LowCase = Clusters[First].Low->getValue();
27  const APInt &HighCase = Clusters[Last].High->getValue();
28  assert(LowCase.getBitWidth() == HighCase.getBitWidth());
29
30  // FIXME: A range of consecutive cases has 100% density, but only requires one
31  // comparison to lower. We should discriminate against such consecutive ranges
32  // in jump tables.
33  return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
34}
35
36uint64_t
37SwitchCG::getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
38                               unsigned First, unsigned Last) {
39  assert(Last >= First);
40  assert(TotalCases[Last] >= TotalCases[First]);
41  uint64_t NumCases =
42      TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
43  return NumCases;
44}
45
46void SwitchCG::SwitchLowering::findJumpTables(CaseClusterVector &Clusters,
47                                              const SwitchInst *SI,
48                                              std::optional<SDLoc> SL,
49                                              MachineBasicBlock *DefaultMBB,
50                                              ProfileSummaryInfo *PSI,
51                                              BlockFrequencyInfo *BFI) {
52#ifndef NDEBUG
53  // Clusters must be non-empty, sorted, and only contain Range clusters.
54  assert(!Clusters.empty());
55  for (CaseCluster &C : Clusters)
56    assert(C.Kind == CC_Range);
57  for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
58    assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
59#endif
60
61  assert(TLI && "TLI not set!");
62  if (!TLI->areJTsAllowed(SI->getParent()->getParent()))
63    return;
64
65  const unsigned MinJumpTableEntries = TLI->getMinimumJumpTableEntries();
66  const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
67
68  // Bail if not enough cases.
69  const int64_t N = Clusters.size();
70  if (N < 2 || N < MinJumpTableEntries)
71    return;
72
73  // Accumulated number of cases in each cluster and those prior to it.
74  SmallVector<unsigned, 8> TotalCases(N);
75  for (unsigned i = 0; i < N; ++i) {
76    const APInt &Hi = Clusters[i].High->getValue();
77    const APInt &Lo = Clusters[i].Low->getValue();
78    TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
79    if (i != 0)
80      TotalCases[i] += TotalCases[i - 1];
81  }
82
83  uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
84  uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
85  assert(NumCases < UINT64_MAX / 100);
86  assert(Range >= NumCases);
87
88  // Cheap case: the whole range may be suitable for jump table.
89  if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
90    CaseCluster JTCluster;
91    if (buildJumpTable(Clusters, 0, N - 1, SI, SL, DefaultMBB, JTCluster)) {
92      Clusters[0] = JTCluster;
93      Clusters.resize(1);
94      return;
95    }
96  }
97
98  // The algorithm below is not suitable for -O0.
99  if (TM->getOptLevel() == CodeGenOptLevel::None)
100    return;
101
102  // Split Clusters into minimum number of dense partitions. The algorithm uses
103  // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
104  // for the Case Statement'" (1994), but builds the MinPartitions array in
105  // reverse order to make it easier to reconstruct the partitions in ascending
106  // order. In the choice between two optimal partitionings, it picks the one
107  // which yields more jump tables.
108
109  // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
110  SmallVector<unsigned, 8> MinPartitions(N);
111  // LastElement[i] is the last element of the partition starting at i.
112  SmallVector<unsigned, 8> LastElement(N);
113  // PartitionsScore[i] is used to break ties when choosing between two
114  // partitionings resulting in the same number of partitions.
115  SmallVector<unsigned, 8> PartitionsScore(N);
116  // For PartitionsScore, a small number of comparisons is considered as good as
117  // a jump table and a single comparison is considered better than a jump
118  // table.
119  enum PartitionScores : unsigned {
120    NoTable = 0,
121    Table = 1,
122    FewCases = 1,
123    SingleCase = 2
124  };
125
126  // Base case: There is only one way to partition Clusters[N-1].
127  MinPartitions[N - 1] = 1;
128  LastElement[N - 1] = N - 1;
129  PartitionsScore[N - 1] = PartitionScores::SingleCase;
130
131  // Note: loop indexes are signed to avoid underflow.
132  for (int64_t i = N - 2; i >= 0; i--) {
133    // Find optimal partitioning of Clusters[i..N-1].
134    // Baseline: Put Clusters[i] into a partition on its own.
135    MinPartitions[i] = MinPartitions[i + 1] + 1;
136    LastElement[i] = i;
137    PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
138
139    // Search for a solution that results in fewer partitions.
140    for (int64_t j = N - 1; j > i; j--) {
141      // Try building a partition from Clusters[i..j].
142      Range = getJumpTableRange(Clusters, i, j);
143      NumCases = getJumpTableNumCases(TotalCases, i, j);
144      assert(NumCases < UINT64_MAX / 100);
145      assert(Range >= NumCases);
146
147      if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
148        unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
149        unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
150        int64_t NumEntries = j - i + 1;
151
152        if (NumEntries == 1)
153          Score += PartitionScores::SingleCase;
154        else if (NumEntries <= SmallNumberOfEntries)
155          Score += PartitionScores::FewCases;
156        else if (NumEntries >= MinJumpTableEntries)
157          Score += PartitionScores::Table;
158
159        // If this leads to fewer partitions, or to the same number of
160        // partitions with better score, it is a better partitioning.
161        if (NumPartitions < MinPartitions[i] ||
162            (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
163          MinPartitions[i] = NumPartitions;
164          LastElement[i] = j;
165          PartitionsScore[i] = Score;
166        }
167      }
168    }
169  }
170
171  // Iterate over the partitions, replacing some with jump tables in-place.
172  unsigned DstIndex = 0;
173  for (unsigned First = 0, Last; First < N; First = Last + 1) {
174    Last = LastElement[First];
175    assert(Last >= First);
176    assert(DstIndex <= First);
177    unsigned NumClusters = Last - First + 1;
178
179    CaseCluster JTCluster;
180    if (NumClusters >= MinJumpTableEntries &&
181        buildJumpTable(Clusters, First, Last, SI, SL, DefaultMBB, JTCluster)) {
182      Clusters[DstIndex++] = JTCluster;
183    } else {
184      for (unsigned I = First; I <= Last; ++I)
185        std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
186    }
187  }
188  Clusters.resize(DstIndex);
189}
190
191bool SwitchCG::SwitchLowering::buildJumpTable(const CaseClusterVector &Clusters,
192                                              unsigned First, unsigned Last,
193                                              const SwitchInst *SI,
194                                              const std::optional<SDLoc> &SL,
195                                              MachineBasicBlock *DefaultMBB,
196                                              CaseCluster &JTCluster) {
197  assert(First <= Last);
198
199  auto Prob = BranchProbability::getZero();
200  unsigned NumCmps = 0;
201  std::vector<MachineBasicBlock*> Table;
202  DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
203
204  // Initialize probabilities in JTProbs.
205  for (unsigned I = First; I <= Last; ++I)
206    JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
207
208  for (unsigned I = First; I <= Last; ++I) {
209    assert(Clusters[I].Kind == CC_Range);
210    Prob += Clusters[I].Prob;
211    const APInt &Low = Clusters[I].Low->getValue();
212    const APInt &High = Clusters[I].High->getValue();
213    NumCmps += (Low == High) ? 1 : 2;
214    if (I != First) {
215      // Fill the gap between this and the previous cluster.
216      const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
217      assert(PreviousHigh.slt(Low));
218      uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
219      for (uint64_t J = 0; J < Gap; J++)
220        Table.push_back(DefaultMBB);
221    }
222    uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
223    for (uint64_t J = 0; J < ClusterSize; ++J)
224      Table.push_back(Clusters[I].MBB);
225    JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
226  }
227
228  unsigned NumDests = JTProbs.size();
229  if (TLI->isSuitableForBitTests(NumDests, NumCmps,
230                                 Clusters[First].Low->getValue(),
231                                 Clusters[Last].High->getValue(), *DL)) {
232    // Clusters[First..Last] should be lowered as bit tests instead.
233    return false;
234  }
235
236  // Create the MBB that will load from and jump through the table.
237  // Note: We create it here, but it's not inserted into the function yet.
238  MachineFunction *CurMF = FuncInfo.MF;
239  MachineBasicBlock *JumpTableMBB =
240      CurMF->CreateMachineBasicBlock(SI->getParent());
241
242  // Add successors. Note: use table order for determinism.
243  SmallPtrSet<MachineBasicBlock *, 8> Done;
244  for (MachineBasicBlock *Succ : Table) {
245    if (Done.count(Succ))
246      continue;
247    addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
248    Done.insert(Succ);
249  }
250  JumpTableMBB->normalizeSuccProbs();
251
252  unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI->getJumpTableEncoding())
253                     ->createJumpTableIndex(Table);
254
255  // Set up the jump table info.
256  JumpTable JT(-1U, JTI, JumpTableMBB, nullptr, SL);
257  JumpTableHeader JTH(Clusters[First].Low->getValue(),
258                      Clusters[Last].High->getValue(), SI->getCondition(),
259                      nullptr, false);
260  JTCases.emplace_back(std::move(JTH), std::move(JT));
261
262  JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
263                                     JTCases.size() - 1, Prob);
264  return true;
265}
266
267void SwitchCG::SwitchLowering::findBitTestClusters(CaseClusterVector &Clusters,
268                                                   const SwitchInst *SI) {
269  // Partition Clusters into as few subsets as possible, where each subset has a
270  // range that fits in a machine word and has <= 3 unique destinations.
271
272#ifndef NDEBUG
273  // Clusters must be sorted and contain Range or JumpTable clusters.
274  assert(!Clusters.empty());
275  assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
276  for (const CaseCluster &C : Clusters)
277    assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
278  for (unsigned i = 1; i < Clusters.size(); ++i)
279    assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
280#endif
281
282  // The algorithm below is not suitable for -O0.
283  if (TM->getOptLevel() == CodeGenOptLevel::None)
284    return;
285
286  // If target does not have legal shift left, do not emit bit tests at all.
287  EVT PTy = TLI->getPointerTy(*DL);
288  if (!TLI->isOperationLegal(ISD::SHL, PTy))
289    return;
290
291  int BitWidth = PTy.getSizeInBits();
292  const int64_t N = Clusters.size();
293
294  // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
295  SmallVector<unsigned, 8> MinPartitions(N);
296  // LastElement[i] is the last element of the partition starting at i.
297  SmallVector<unsigned, 8> LastElement(N);
298
299  // FIXME: This might not be the best algorithm for finding bit test clusters.
300
301  // Base case: There is only one way to partition Clusters[N-1].
302  MinPartitions[N - 1] = 1;
303  LastElement[N - 1] = N - 1;
304
305  // Note: loop indexes are signed to avoid underflow.
306  for (int64_t i = N - 2; i >= 0; --i) {
307    // Find optimal partitioning of Clusters[i..N-1].
308    // Baseline: Put Clusters[i] into a partition on its own.
309    MinPartitions[i] = MinPartitions[i + 1] + 1;
310    LastElement[i] = i;
311
312    // Search for a solution that results in fewer partitions.
313    // Note: the search is limited by BitWidth, reducing time complexity.
314    for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
315      // Try building a partition from Clusters[i..j].
316
317      // Check the range.
318      if (!TLI->rangeFitsInWord(Clusters[i].Low->getValue(),
319                                Clusters[j].High->getValue(), *DL))
320        continue;
321
322      // Check nbr of destinations and cluster types.
323      // FIXME: This works, but doesn't seem very efficient.
324      bool RangesOnly = true;
325      BitVector Dests(FuncInfo.MF->getNumBlockIDs());
326      for (int64_t k = i; k <= j; k++) {
327        if (Clusters[k].Kind != CC_Range) {
328          RangesOnly = false;
329          break;
330        }
331        Dests.set(Clusters[k].MBB->getNumber());
332      }
333      if (!RangesOnly || Dests.count() > 3)
334        break;
335
336      // Check if it's a better partition.
337      unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
338      if (NumPartitions < MinPartitions[i]) {
339        // Found a better partition.
340        MinPartitions[i] = NumPartitions;
341        LastElement[i] = j;
342      }
343    }
344  }
345
346  // Iterate over the partitions, replacing with bit-test clusters in-place.
347  unsigned DstIndex = 0;
348  for (unsigned First = 0, Last; First < N; First = Last + 1) {
349    Last = LastElement[First];
350    assert(First <= Last);
351    assert(DstIndex <= First);
352
353    CaseCluster BitTestCluster;
354    if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
355      Clusters[DstIndex++] = BitTestCluster;
356    } else {
357      size_t NumClusters = Last - First + 1;
358      std::memmove(&Clusters[DstIndex], &Clusters[First],
359                   sizeof(Clusters[0]) * NumClusters);
360      DstIndex += NumClusters;
361    }
362  }
363  Clusters.resize(DstIndex);
364}
365
366bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector &Clusters,
367                                             unsigned First, unsigned Last,
368                                             const SwitchInst *SI,
369                                             CaseCluster &BTCluster) {
370  assert(First <= Last);
371  if (First == Last)
372    return false;
373
374  BitVector Dests(FuncInfo.MF->getNumBlockIDs());
375  unsigned NumCmps = 0;
376  for (int64_t I = First; I <= Last; ++I) {
377    assert(Clusters[I].Kind == CC_Range);
378    Dests.set(Clusters[I].MBB->getNumber());
379    NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
380  }
381  unsigned NumDests = Dests.count();
382
383  APInt Low = Clusters[First].Low->getValue();
384  APInt High = Clusters[Last].High->getValue();
385  assert(Low.slt(High));
386
387  if (!TLI->isSuitableForBitTests(NumDests, NumCmps, Low, High, *DL))
388    return false;
389
390  APInt LowBound;
391  APInt CmpRange;
392
393  const int BitWidth = TLI->getPointerTy(*DL).getSizeInBits();
394  assert(TLI->rangeFitsInWord(Low, High, *DL) &&
395         "Case range must fit in bit mask!");
396
397  // Check if the clusters cover a contiguous range such that no value in the
398  // range will jump to the default statement.
399  bool ContiguousRange = true;
400  for (int64_t I = First + 1; I <= Last; ++I) {
401    if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
402      ContiguousRange = false;
403      break;
404    }
405  }
406
407  if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
408    // Optimize the case where all the case values fit in a word without having
409    // to subtract minValue. In this case, we can optimize away the subtraction.
410    LowBound = APInt::getZero(Low.getBitWidth());
411    CmpRange = High;
412    ContiguousRange = false;
413  } else {
414    LowBound = Low;
415    CmpRange = High - Low;
416  }
417
418  CaseBitsVector CBV;
419  auto TotalProb = BranchProbability::getZero();
420  for (unsigned i = First; i <= Last; ++i) {
421    // Find the CaseBits for this destination.
422    unsigned j;
423    for (j = 0; j < CBV.size(); ++j)
424      if (CBV[j].BB == Clusters[i].MBB)
425        break;
426    if (j == CBV.size())
427      CBV.push_back(
428          CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
429    CaseBits *CB = &CBV[j];
430
431    // Update Mask, Bits and ExtraProb.
432    uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
433    uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
434    assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
435    CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
436    CB->Bits += Hi - Lo + 1;
437    CB->ExtraProb += Clusters[i].Prob;
438    TotalProb += Clusters[i].Prob;
439  }
440
441  BitTestInfo BTI;
442  llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
443    // Sort by probability first, number of bits second, bit mask third.
444    if (a.ExtraProb != b.ExtraProb)
445      return a.ExtraProb > b.ExtraProb;
446    if (a.Bits != b.Bits)
447      return a.Bits > b.Bits;
448    return a.Mask < b.Mask;
449  });
450
451  for (auto &CB : CBV) {
452    MachineBasicBlock *BitTestBB =
453        FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
454    BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
455  }
456  BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
457                            SI->getCondition(), -1U, MVT::Other, false,
458                            ContiguousRange, nullptr, nullptr, std::move(BTI),
459                            TotalProb);
460
461  BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
462                                    BitTestCases.size() - 1, TotalProb);
463  return true;
464}
465
466void SwitchCG::sortAndRangeify(CaseClusterVector &Clusters) {
467#ifndef NDEBUG
468  for (const CaseCluster &CC : Clusters)
469    assert(CC.Low == CC.High && "Input clusters must be single-case");
470#endif
471
472  llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
473    return a.Low->getValue().slt(b.Low->getValue());
474  });
475
476  // Merge adjacent clusters with the same destination.
477  const unsigned N = Clusters.size();
478  unsigned DstIndex = 0;
479  for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
480    CaseCluster &CC = Clusters[SrcIndex];
481    const ConstantInt *CaseVal = CC.Low;
482    MachineBasicBlock *Succ = CC.MBB;
483
484    if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
485        (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
486      // If this case has the same successor and is a neighbour, merge it into
487      // the previous cluster.
488      Clusters[DstIndex - 1].High = CaseVal;
489      Clusters[DstIndex - 1].Prob += CC.Prob;
490    } else {
491      std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
492                   sizeof(Clusters[SrcIndex]));
493    }
494  }
495  Clusters.resize(DstIndex);
496}
497
498unsigned SwitchCG::SwitchLowering::caseClusterRank(const CaseCluster &CC,
499                                                   CaseClusterIt First,
500                                                   CaseClusterIt Last) {
501  return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
502    if (X.Prob != CC.Prob)
503      return X.Prob > CC.Prob;
504
505    // Ties are broken by comparing the case value.
506    return X.Low->getValue().slt(CC.Low->getValue());
507  });
508}
509
510llvm::SwitchCG::SwitchLowering::SplitWorkItemInfo
511SwitchCG::SwitchLowering::computeSplitWorkItemInfo(
512    const SwitchWorkListItem &W) {
513  CaseClusterIt LastLeft = W.FirstCluster;
514  CaseClusterIt FirstRight = W.LastCluster;
515  auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
516  auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
517
518  // Move LastLeft and FirstRight towards each other from opposite directions to
519  // find a partitioning of the clusters which balances the probability on both
520  // sides. If LeftProb and RightProb are equal, alternate which side is
521  // taken to ensure 0-probability nodes are distributed evenly.
522  unsigned I = 0;
523  while (LastLeft + 1 < FirstRight) {
524    if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
525      LeftProb += (++LastLeft)->Prob;
526    else
527      RightProb += (--FirstRight)->Prob;
528    I++;
529  }
530
531  while (true) {
532    // Our binary search tree differs from a typical BST in that ours can have
533    // up to three values in each leaf. The pivot selection above doesn't take
534    // that into account, which means the tree might require more nodes and be
535    // less efficient. We compensate for this here.
536
537    unsigned NumLeft = LastLeft - W.FirstCluster + 1;
538    unsigned NumRight = W.LastCluster - FirstRight + 1;
539
540    if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
541      // If one side has less than 3 clusters, and the other has more than 3,
542      // consider taking a cluster from the other side.
543
544      if (NumLeft < NumRight) {
545        // Consider moving the first cluster on the right to the left side.
546        CaseCluster &CC = *FirstRight;
547        unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
548        unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
549        if (LeftSideRank <= RightSideRank) {
550          // Moving the cluster to the left does not demote it.
551          ++LastLeft;
552          ++FirstRight;
553          continue;
554        }
555      } else {
556        assert(NumRight < NumLeft);
557        // Consider moving the last element on the left to the right side.
558        CaseCluster &CC = *LastLeft;
559        unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
560        unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
561        if (RightSideRank <= LeftSideRank) {
562          // Moving the cluster to the right does not demot it.
563          --LastLeft;
564          --FirstRight;
565          continue;
566        }
567      }
568    }
569    break;
570  }
571
572  assert(LastLeft + 1 == FirstRight);
573  assert(LastLeft >= W.FirstCluster);
574  assert(FirstRight <= W.LastCluster);
575
576  return SplitWorkItemInfo{LastLeft, FirstRight, LeftProb, RightProb};
577}