1//===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass optimizes atomic operations by using a single lane of a wavefront
11/// to perform the atomic operation, thus reducing contention on that memory
12/// location.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPU.h"
17#include "AMDGPUSubtarget.h"
18#include "SIDefines.h"
19#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
20#include "llvm/CodeGen/TargetPassConfig.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/InstVisitor.h"
23#include "llvm/InitializePasses.h"
24#include "llvm/Transforms/Utils/BasicBlockUtils.h"
25
26#define DEBUG_TYPE "amdgpu-atomic-optimizer"
27
28using namespace llvm;
29using namespace llvm::AMDGPU;
30
31namespace {
32
33struct ReplacementInfo {
34  Instruction *I;
35  AtomicRMWInst::BinOp Op;
36  unsigned ValIdx;
37  bool ValDivergent;
38};
39
40class AMDGPUAtomicOptimizer : public FunctionPass,
41                              public InstVisitor<AMDGPUAtomicOptimizer> {
42private:
43  SmallVector<ReplacementInfo, 8> ToReplace;
44  const LegacyDivergenceAnalysis *DA;
45  const DataLayout *DL;
46  DominatorTree *DT;
47  const GCNSubtarget *ST;
48  bool IsPixelShader;
49
50  Value *buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V,
51                   Value *const Identity) const;
52  Value *buildShiftRight(IRBuilder<> &B, Value *V, Value *const Identity) const;
53  void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx,
54                      bool ValDivergent) const;
55
56public:
57  static char ID;
58
59  AMDGPUAtomicOptimizer() : FunctionPass(ID) {}
60
61  bool runOnFunction(Function &F) override;
62
63  void getAnalysisUsage(AnalysisUsage &AU) const override {
64    AU.addPreserved<DominatorTreeWrapperPass>();
65    AU.addRequired<LegacyDivergenceAnalysis>();
66    AU.addRequired<TargetPassConfig>();
67  }
68
69  void visitAtomicRMWInst(AtomicRMWInst &I);
70  void visitIntrinsicInst(IntrinsicInst &I);
71};
72
73} // namespace
74
75char AMDGPUAtomicOptimizer::ID = 0;
76
77char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID;
78
79bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) {
80  if (skipFunction(F)) {
81    return false;
82  }
83
84  DA = &getAnalysis<LegacyDivergenceAnalysis>();
85  DL = &F.getParent()->getDataLayout();
86  DominatorTreeWrapperPass *const DTW =
87      getAnalysisIfAvailable<DominatorTreeWrapperPass>();
88  DT = DTW ? &DTW->getDomTree() : nullptr;
89  const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
90  const TargetMachine &TM = TPC.getTM<TargetMachine>();
91  ST = &TM.getSubtarget<GCNSubtarget>(F);
92  IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS;
93
94  visit(F);
95
96  const bool Changed = !ToReplace.empty();
97
98  for (ReplacementInfo &Info : ToReplace) {
99    optimizeAtomic(*Info.I, Info.Op, Info.ValIdx, Info.ValDivergent);
100  }
101
102  ToReplace.clear();
103
104  return Changed;
105}
106
107void AMDGPUAtomicOptimizer::visitAtomicRMWInst(AtomicRMWInst &I) {
108  // Early exit for unhandled address space atomic instructions.
109  switch (I.getPointerAddressSpace()) {
110  default:
111    return;
112  case AMDGPUAS::GLOBAL_ADDRESS:
113  case AMDGPUAS::LOCAL_ADDRESS:
114    break;
115  }
116
117  AtomicRMWInst::BinOp Op = I.getOperation();
118
119  switch (Op) {
120  default:
121    return;
122  case AtomicRMWInst::Add:
123  case AtomicRMWInst::Sub:
124  case AtomicRMWInst::And:
125  case AtomicRMWInst::Or:
126  case AtomicRMWInst::Xor:
127  case AtomicRMWInst::Max:
128  case AtomicRMWInst::Min:
129  case AtomicRMWInst::UMax:
130  case AtomicRMWInst::UMin:
131    break;
132  }
133
134  const unsigned PtrIdx = 0;
135  const unsigned ValIdx = 1;
136
137  // If the pointer operand is divergent, then each lane is doing an atomic
138  // operation on a different address, and we cannot optimize that.
139  if (DA->isDivergentUse(&I.getOperandUse(PtrIdx))) {
140    return;
141  }
142
143  const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx));
144
145  // If the value operand is divergent, each lane is contributing a different
146  // value to the atomic calculation. We can only optimize divergent values if
147  // we have DPP available on our subtarget, and the atomic operation is 32
148  // bits.
149  if (ValDivergent &&
150      (!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) {
151    return;
152  }
153
154  // If we get here, we can optimize the atomic using a single wavefront-wide
155  // atomic operation to do the calculation for the entire wavefront, so
156  // remember the instruction so we can come back to it.
157  const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent};
158
159  ToReplace.push_back(Info);
160}
161
162void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) {
163  AtomicRMWInst::BinOp Op;
164
165  switch (I.getIntrinsicID()) {
166  default:
167    return;
168  case Intrinsic::amdgcn_buffer_atomic_add:
169  case Intrinsic::amdgcn_struct_buffer_atomic_add:
170  case Intrinsic::amdgcn_raw_buffer_atomic_add:
171    Op = AtomicRMWInst::Add;
172    break;
173  case Intrinsic::amdgcn_buffer_atomic_sub:
174  case Intrinsic::amdgcn_struct_buffer_atomic_sub:
175  case Intrinsic::amdgcn_raw_buffer_atomic_sub:
176    Op = AtomicRMWInst::Sub;
177    break;
178  case Intrinsic::amdgcn_buffer_atomic_and:
179  case Intrinsic::amdgcn_struct_buffer_atomic_and:
180  case Intrinsic::amdgcn_raw_buffer_atomic_and:
181    Op = AtomicRMWInst::And;
182    break;
183  case Intrinsic::amdgcn_buffer_atomic_or:
184  case Intrinsic::amdgcn_struct_buffer_atomic_or:
185  case Intrinsic::amdgcn_raw_buffer_atomic_or:
186    Op = AtomicRMWInst::Or;
187    break;
188  case Intrinsic::amdgcn_buffer_atomic_xor:
189  case Intrinsic::amdgcn_struct_buffer_atomic_xor:
190  case Intrinsic::amdgcn_raw_buffer_atomic_xor:
191    Op = AtomicRMWInst::Xor;
192    break;
193  case Intrinsic::amdgcn_buffer_atomic_smin:
194  case Intrinsic::amdgcn_struct_buffer_atomic_smin:
195  case Intrinsic::amdgcn_raw_buffer_atomic_smin:
196    Op = AtomicRMWInst::Min;
197    break;
198  case Intrinsic::amdgcn_buffer_atomic_umin:
199  case Intrinsic::amdgcn_struct_buffer_atomic_umin:
200  case Intrinsic::amdgcn_raw_buffer_atomic_umin:
201    Op = AtomicRMWInst::UMin;
202    break;
203  case Intrinsic::amdgcn_buffer_atomic_smax:
204  case Intrinsic::amdgcn_struct_buffer_atomic_smax:
205  case Intrinsic::amdgcn_raw_buffer_atomic_smax:
206    Op = AtomicRMWInst::Max;
207    break;
208  case Intrinsic::amdgcn_buffer_atomic_umax:
209  case Intrinsic::amdgcn_struct_buffer_atomic_umax:
210  case Intrinsic::amdgcn_raw_buffer_atomic_umax:
211    Op = AtomicRMWInst::UMax;
212    break;
213  }
214
215  const unsigned ValIdx = 0;
216
217  const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx));
218
219  // If the value operand is divergent, each lane is contributing a different
220  // value to the atomic calculation. We can only optimize divergent values if
221  // we have DPP available on our subtarget, and the atomic operation is 32
222  // bits.
223  if (ValDivergent &&
224      (!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) {
225    return;
226  }
227
228  // If any of the other arguments to the intrinsic are divergent, we can't
229  // optimize the operation.
230  for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) {
231    if (DA->isDivergentUse(&I.getOperandUse(Idx))) {
232      return;
233    }
234  }
235
236  // If we get here, we can optimize the atomic using a single wavefront-wide
237  // atomic operation to do the calculation for the entire wavefront, so
238  // remember the instruction so we can come back to it.
239  const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent};
240
241  ToReplace.push_back(Info);
242}
243
244// Use the builder to create the non-atomic counterpart of the specified
245// atomicrmw binary op.
246static Value *buildNonAtomicBinOp(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
247                                  Value *LHS, Value *RHS) {
248  CmpInst::Predicate Pred;
249
250  switch (Op) {
251  default:
252    llvm_unreachable("Unhandled atomic op");
253  case AtomicRMWInst::Add:
254    return B.CreateBinOp(Instruction::Add, LHS, RHS);
255  case AtomicRMWInst::Sub:
256    return B.CreateBinOp(Instruction::Sub, LHS, RHS);
257  case AtomicRMWInst::And:
258    return B.CreateBinOp(Instruction::And, LHS, RHS);
259  case AtomicRMWInst::Or:
260    return B.CreateBinOp(Instruction::Or, LHS, RHS);
261  case AtomicRMWInst::Xor:
262    return B.CreateBinOp(Instruction::Xor, LHS, RHS);
263
264  case AtomicRMWInst::Max:
265    Pred = CmpInst::ICMP_SGT;
266    break;
267  case AtomicRMWInst::Min:
268    Pred = CmpInst::ICMP_SLT;
269    break;
270  case AtomicRMWInst::UMax:
271    Pred = CmpInst::ICMP_UGT;
272    break;
273  case AtomicRMWInst::UMin:
274    Pred = CmpInst::ICMP_ULT;
275    break;
276  }
277  Value *Cond = B.CreateICmp(Pred, LHS, RHS);
278  return B.CreateSelect(Cond, LHS, RHS);
279}
280
281// Use the builder to create an inclusive scan of V across the wavefront, with
282// all lanes active.
283Value *AMDGPUAtomicOptimizer::buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
284                                        Value *V, Value *const Identity) const {
285  Type *const Ty = V->getType();
286  Module *M = B.GetInsertBlock()->getModule();
287  Function *UpdateDPP =
288      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, Ty);
289  Function *PermLaneX16 =
290      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_permlanex16, {});
291  Function *ReadLane =
292      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
293
294  for (unsigned Idx = 0; Idx < 4; Idx++) {
295    V = buildNonAtomicBinOp(
296        B, Op, V,
297        B.CreateCall(UpdateDPP,
298                     {Identity, V, B.getInt32(DPP::ROW_SHR0 | 1 << Idx),
299                      B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}));
300  }
301  if (ST->hasDPPBroadcasts()) {
302    // GFX9 has DPP row broadcast operations.
303    V = buildNonAtomicBinOp(
304        B, Op, V,
305        B.CreateCall(UpdateDPP,
306                     {Identity, V, B.getInt32(DPP::BCAST15), B.getInt32(0xa),
307                      B.getInt32(0xf), B.getFalse()}));
308    V = buildNonAtomicBinOp(
309        B, Op, V,
310        B.CreateCall(UpdateDPP,
311                     {Identity, V, B.getInt32(DPP::BCAST31), B.getInt32(0xc),
312                      B.getInt32(0xf), B.getFalse()}));
313  } else {
314    // On GFX10 all DPP operations are confined to a single row. To get cross-
315    // row operations we have to use permlane or readlane.
316
317    // Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes
318    // 48..63).
319    Value *const PermX =
320        B.CreateCall(PermLaneX16, {V, V, B.getInt32(-1), B.getInt32(-1),
321                                   B.getFalse(), B.getFalse()});
322    V = buildNonAtomicBinOp(
323        B, Op, V,
324        B.CreateCall(UpdateDPP,
325                     {Identity, PermX, B.getInt32(DPP::QUAD_PERM_ID),
326                      B.getInt32(0xa), B.getInt32(0xf), B.getFalse()}));
327    if (!ST->isWave32()) {
328      // Combine lane 31 into lanes 32..63.
329      Value *const Lane31 = B.CreateCall(ReadLane, {V, B.getInt32(31)});
330      V = buildNonAtomicBinOp(
331          B, Op, V,
332          B.CreateCall(UpdateDPP,
333                       {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID),
334                        B.getInt32(0xc), B.getInt32(0xf), B.getFalse()}));
335    }
336  }
337  return V;
338}
339
340// Use the builder to create a shift right of V across the wavefront, with all
341// lanes active, to turn an inclusive scan into an exclusive scan.
342Value *AMDGPUAtomicOptimizer::buildShiftRight(IRBuilder<> &B, Value *V,
343                                              Value *const Identity) const {
344  Type *const Ty = V->getType();
345  Module *M = B.GetInsertBlock()->getModule();
346  Function *UpdateDPP =
347      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, Ty);
348  Function *ReadLane =
349      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
350  Function *WriteLane =
351      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, {});
352
353  if (ST->hasDPPWavefrontShifts()) {
354    // GFX9 has DPP wavefront shift operations.
355    V = B.CreateCall(UpdateDPP,
356                     {Identity, V, B.getInt32(DPP::WAVE_SHR1), B.getInt32(0xf),
357                      B.getInt32(0xf), B.getFalse()});
358  } else {
359    // On GFX10 all DPP operations are confined to a single row. To get cross-
360    // row operations we have to use permlane or readlane.
361    Value *Old = V;
362    V = B.CreateCall(UpdateDPP,
363                     {Identity, V, B.getInt32(DPP::ROW_SHR0 + 1),
364                      B.getInt32(0xf), B.getInt32(0xf), B.getFalse()});
365
366    // Copy the old lane 15 to the new lane 16.
367    V = B.CreateCall(WriteLane, {B.CreateCall(ReadLane, {Old, B.getInt32(15)}),
368                                 B.getInt32(16), V});
369
370    if (!ST->isWave32()) {
371      // Copy the old lane 31 to the new lane 32.
372      V = B.CreateCall(
373          WriteLane,
374          {B.CreateCall(ReadLane, {Old, B.getInt32(31)}), B.getInt32(32), V});
375
376      // Copy the old lane 47 to the new lane 48.
377      V = B.CreateCall(
378          WriteLane,
379          {B.CreateCall(ReadLane, {Old, B.getInt32(47)}), B.getInt32(48), V});
380    }
381  }
382
383  return V;
384}
385
386static APInt getIdentityValueForAtomicOp(AtomicRMWInst::BinOp Op,
387                                         unsigned BitWidth) {
388  switch (Op) {
389  default:
390    llvm_unreachable("Unhandled atomic op");
391  case AtomicRMWInst::Add:
392  case AtomicRMWInst::Sub:
393  case AtomicRMWInst::Or:
394  case AtomicRMWInst::Xor:
395  case AtomicRMWInst::UMax:
396    return APInt::getMinValue(BitWidth);
397  case AtomicRMWInst::And:
398  case AtomicRMWInst::UMin:
399    return APInt::getMaxValue(BitWidth);
400  case AtomicRMWInst::Max:
401    return APInt::getSignedMinValue(BitWidth);
402  case AtomicRMWInst::Min:
403    return APInt::getSignedMaxValue(BitWidth);
404  }
405}
406
407void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
408                                           AtomicRMWInst::BinOp Op,
409                                           unsigned ValIdx,
410                                           bool ValDivergent) const {
411  // Start building just before the instruction.
412  IRBuilder<> B(&I);
413
414  // If we are in a pixel shader, because of how we have to mask out helper
415  // lane invocations, we need to record the entry and exit BB's.
416  BasicBlock *PixelEntryBB = nullptr;
417  BasicBlock *PixelExitBB = nullptr;
418
419  // If we're optimizing an atomic within a pixel shader, we need to wrap the
420  // entire atomic operation in a helper-lane check. We do not want any helper
421  // lanes that are around only for the purposes of derivatives to take part
422  // in any cross-lane communication, and we use a branch on whether the lane is
423  // live to do this.
424  if (IsPixelShader) {
425    // Record I's original position as the entry block.
426    PixelEntryBB = I.getParent();
427
428    Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {});
429    Instruction *const NonHelperTerminator =
430        SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
431
432    // Record I's new position as the exit block.
433    PixelExitBB = I.getParent();
434
435    I.moveBefore(NonHelperTerminator);
436    B.SetInsertPoint(&I);
437  }
438
439  Type *const Ty = I.getType();
440  const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
441  Type *const VecTy = VectorType::get(B.getInt32Ty(), 2);
442
443  // This is the value in the atomic operation we need to combine in order to
444  // reduce the number of atomic operations.
445  Value *const V = I.getOperand(ValIdx);
446
447  // We need to know how many lanes are active within the wavefront, and we do
448  // this by doing a ballot of active lanes.
449  Type *const WaveTy = B.getIntNTy(ST->getWavefrontSize());
450  CallInst *const Ballot = B.CreateIntrinsic(
451      Intrinsic::amdgcn_icmp, {WaveTy, B.getInt32Ty()},
452      {B.getInt32(1), B.getInt32(0), B.getInt32(CmpInst::ICMP_NE)});
453
454  // We need to know how many lanes are active within the wavefront that are
455  // below us. If we counted each lane linearly starting from 0, a lane is
456  // below us only if its associated index was less than ours. We do this by
457  // using the mbcnt intrinsic.
458  Value *Mbcnt;
459  if (ST->isWave32()) {
460    Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {},
461                              {Ballot, B.getInt32(0)});
462  } else {
463    Value *const BitCast = B.CreateBitCast(Ballot, VecTy);
464    Value *const ExtractLo = B.CreateExtractElement(BitCast, B.getInt32(0));
465    Value *const ExtractHi = B.CreateExtractElement(BitCast, B.getInt32(1));
466    Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {},
467                              {ExtractLo, B.getInt32(0)});
468    Mbcnt =
469        B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, {ExtractHi, Mbcnt});
470  }
471  Mbcnt = B.CreateIntCast(Mbcnt, Ty, false);
472
473  Value *const Identity = B.getInt(getIdentityValueForAtomicOp(Op, TyBitWidth));
474
475  Value *ExclScan = nullptr;
476  Value *NewV = nullptr;
477
478  // If we have a divergent value in each lane, we need to combine the value
479  // using DPP.
480  if (ValDivergent) {
481    // First we need to set all inactive invocations to the identity value, so
482    // that they can correctly contribute to the final result.
483    NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty, {V, Identity});
484
485    const AtomicRMWInst::BinOp ScanOp =
486        Op == AtomicRMWInst::Sub ? AtomicRMWInst::Add : Op;
487    NewV = buildScan(B, ScanOp, NewV, Identity);
488    ExclScan = buildShiftRight(B, NewV, Identity);
489
490    // Read the value from the last lane, which has accumlated the values of
491    // each active lane in the wavefront. This will be our new value which we
492    // will provide to the atomic operation.
493    Value *const LastLaneIdx = B.getInt32(ST->getWavefrontSize() - 1);
494    if (TyBitWidth == 64) {
495      Value *const ExtractLo = B.CreateTrunc(NewV, B.getInt32Ty());
496      Value *const ExtractHi =
497          B.CreateTrunc(B.CreateLShr(NewV, 32), B.getInt32Ty());
498      CallInst *const ReadLaneLo = B.CreateIntrinsic(
499          Intrinsic::amdgcn_readlane, {}, {ExtractLo, LastLaneIdx});
500      CallInst *const ReadLaneHi = B.CreateIntrinsic(
501          Intrinsic::amdgcn_readlane, {}, {ExtractHi, LastLaneIdx});
502      Value *const PartialInsert = B.CreateInsertElement(
503          UndefValue::get(VecTy), ReadLaneLo, B.getInt32(0));
504      Value *const Insert =
505          B.CreateInsertElement(PartialInsert, ReadLaneHi, B.getInt32(1));
506      NewV = B.CreateBitCast(Insert, Ty);
507    } else if (TyBitWidth == 32) {
508      NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {},
509                               {NewV, LastLaneIdx});
510    } else {
511      llvm_unreachable("Unhandled atomic bit width");
512    }
513
514    // Finally mark the readlanes in the WWM section.
515    NewV = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, NewV);
516  } else {
517    switch (Op) {
518    default:
519      llvm_unreachable("Unhandled atomic op");
520
521    case AtomicRMWInst::Add:
522    case AtomicRMWInst::Sub: {
523      // The new value we will be contributing to the atomic operation is the
524      // old value times the number of active lanes.
525      Value *const Ctpop = B.CreateIntCast(
526          B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false);
527      NewV = B.CreateMul(V, Ctpop);
528      break;
529    }
530
531    case AtomicRMWInst::And:
532    case AtomicRMWInst::Or:
533    case AtomicRMWInst::Max:
534    case AtomicRMWInst::Min:
535    case AtomicRMWInst::UMax:
536    case AtomicRMWInst::UMin:
537      // These operations with a uniform value are idempotent: doing the atomic
538      // operation multiple times has the same effect as doing it once.
539      NewV = V;
540      break;
541
542    case AtomicRMWInst::Xor:
543      // The new value we will be contributing to the atomic operation is the
544      // old value times the parity of the number of active lanes.
545      Value *const Ctpop = B.CreateIntCast(
546          B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false);
547      NewV = B.CreateMul(V, B.CreateAnd(Ctpop, 1));
548      break;
549    }
550  }
551
552  // We only want a single lane to enter our new control flow, and we do this
553  // by checking if there are any active lanes below us. Only one lane will
554  // have 0 active lanes below us, so that will be the only one to progress.
555  Value *const Cond = B.CreateICmpEQ(Mbcnt, B.getIntN(TyBitWidth, 0));
556
557  // Store I's original basic block before we split the block.
558  BasicBlock *const EntryBB = I.getParent();
559
560  // We need to introduce some new control flow to force a single lane to be
561  // active. We do this by splitting I's basic block at I, and introducing the
562  // new block such that:
563  // entry --> single_lane -\
564  //       \------------------> exit
565  Instruction *const SingleLaneTerminator =
566      SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
567
568  // Move the IR builder into single_lane next.
569  B.SetInsertPoint(SingleLaneTerminator);
570
571  // Clone the original atomic operation into single lane, replacing the
572  // original value with our newly created one.
573  Instruction *const NewI = I.clone();
574  B.Insert(NewI);
575  NewI->setOperand(ValIdx, NewV);
576
577  // Move the IR builder into exit next, and start inserting just before the
578  // original instruction.
579  B.SetInsertPoint(&I);
580
581  const bool NeedResult = !I.use_empty();
582  if (NeedResult) {
583    // Create a PHI node to get our new atomic result into the exit block.
584    PHINode *const PHI = B.CreatePHI(Ty, 2);
585    PHI->addIncoming(UndefValue::get(Ty), EntryBB);
586    PHI->addIncoming(NewI, SingleLaneTerminator->getParent());
587
588    // We need to broadcast the value who was the lowest active lane (the first
589    // lane) to all other lanes in the wavefront. We use an intrinsic for this,
590    // but have to handle 64-bit broadcasts with two calls to this intrinsic.
591    Value *BroadcastI = nullptr;
592
593    if (TyBitWidth == 64) {
594      Value *const ExtractLo = B.CreateTrunc(PHI, B.getInt32Ty());
595      Value *const ExtractHi =
596          B.CreateTrunc(B.CreateLShr(PHI, 32), B.getInt32Ty());
597      CallInst *const ReadFirstLaneLo =
598          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo);
599      CallInst *const ReadFirstLaneHi =
600          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi);
601      Value *const PartialInsert = B.CreateInsertElement(
602          UndefValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0));
603      Value *const Insert =
604          B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1));
605      BroadcastI = B.CreateBitCast(Insert, Ty);
606    } else if (TyBitWidth == 32) {
607
608      BroadcastI = B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, PHI);
609    } else {
610      llvm_unreachable("Unhandled atomic bit width");
611    }
612
613    // Now that we have the result of our single atomic operation, we need to
614    // get our individual lane's slice into the result. We use the lane offset
615    // we previously calculated combined with the atomic result value we got
616    // from the first lane, to get our lane's index into the atomic result.
617    Value *LaneOffset = nullptr;
618    if (ValDivergent) {
619      LaneOffset = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, ExclScan);
620    } else {
621      switch (Op) {
622      default:
623        llvm_unreachable("Unhandled atomic op");
624      case AtomicRMWInst::Add:
625      case AtomicRMWInst::Sub:
626        LaneOffset = B.CreateMul(V, Mbcnt);
627        break;
628      case AtomicRMWInst::And:
629      case AtomicRMWInst::Or:
630      case AtomicRMWInst::Max:
631      case AtomicRMWInst::Min:
632      case AtomicRMWInst::UMax:
633      case AtomicRMWInst::UMin:
634        LaneOffset = B.CreateSelect(Cond, Identity, V);
635        break;
636      case AtomicRMWInst::Xor:
637        LaneOffset = B.CreateMul(V, B.CreateAnd(Mbcnt, 1));
638        break;
639      }
640    }
641    Value *const Result = buildNonAtomicBinOp(B, Op, BroadcastI, LaneOffset);
642
643    if (IsPixelShader) {
644      // Need a final PHI to reconverge to above the helper lane branch mask.
645      B.SetInsertPoint(PixelExitBB->getFirstNonPHI());
646
647      PHINode *const PHI = B.CreatePHI(Ty, 2);
648      PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB);
649      PHI->addIncoming(Result, I.getParent());
650      I.replaceAllUsesWith(PHI);
651    } else {
652      // Replace the original atomic instruction with the new one.
653      I.replaceAllUsesWith(Result);
654    }
655  }
656
657  // And delete the original.
658  I.eraseFromParent();
659}
660
661INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE,
662                      "AMDGPU atomic optimizations", false, false)
663INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
664INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
665INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE,
666                    "AMDGPU atomic optimizations", false, false)
667
668FunctionPass *llvm::createAMDGPUAtomicOptimizerPass() {
669  return new AMDGPUAtomicOptimizer();
670}
671