1327952Sdim//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2284677Sdim//
3353358Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4353358Sdim// See https://llvm.org/LICENSE.txt for license information.
5353358Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6284677Sdim//
7284677Sdim//===----------------------------------------------------------------------===//
8284677Sdim//
9284677Sdim// This pass tries to fuse DS instructions with close by immediate offsets.
10284677Sdim// This will fuse operations such as
11284677Sdim//  ds_read_b32 v0, v2 offset:16
12284677Sdim//  ds_read_b32 v1, v2 offset:32
13284677Sdim// ==>
14284677Sdim//   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15284677Sdim//
16327952Sdim// The same is done for certain SMEM and VMEM opcodes, e.g.:
17327952Sdim//  s_buffer_load_dword s4, s[0:3], 4
18327952Sdim//  s_buffer_load_dword s5, s[0:3], 8
19327952Sdim// ==>
20327952Sdim//  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21284677Sdim//
22344779Sdim// This pass also tries to promote constant offset to the immediate by
23344779Sdim// adjusting the base. It tries to use a base from the nearby instructions that
24344779Sdim// allows it to have a 13bit constant offset and then promotes the 13bit offset
25344779Sdim// to the immediate.
26344779Sdim// E.g.
27344779Sdim//  s_movk_i32 s0, 0x1800
28344779Sdim//  v_add_co_u32_e32 v0, vcc, s0, v2
29344779Sdim//  v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30327952Sdim//
31344779Sdim//  s_movk_i32 s0, 0x1000
32344779Sdim//  v_add_co_u32_e32 v5, vcc, s0, v2
33344779Sdim//  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34344779Sdim//  global_load_dwordx2 v[5:6], v[5:6], off
35344779Sdim//  global_load_dwordx2 v[0:1], v[0:1], off
36344779Sdim// =>
37344779Sdim//  s_movk_i32 s0, 0x1000
38344779Sdim//  v_add_co_u32_e32 v5, vcc, s0, v2
39344779Sdim//  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40344779Sdim//  global_load_dwordx2 v[5:6], v[5:6], off
41344779Sdim//  global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42344779Sdim//
43284677Sdim// Future improvements:
44284677Sdim//
45360784Sdim// - This is currently missing stores of constants because loading
46284677Sdim//   the constant into the data register is placed between the stores, although
47284677Sdim//   this is arguably a scheduling problem.
48284677Sdim//
49284677Sdim// - Live interval recomputing seems inefficient. This currently only matches
50284677Sdim//   one pair, and recomputes live intervals and moves on to the next pair. It
51309124Sdim//   would be better to compute a list of all merges that need to occur.
52284677Sdim//
53284677Sdim// - With a list of instructions to process, we can also merge more. If a
54284677Sdim//   cluster of loads have offsets that are too large to fit in the 8-bit
55284677Sdim//   offsets, but are close enough to fit in the 8 bits, we can add to the base
56284677Sdim//   pointer and use the new reduced offsets.
57284677Sdim//
58284677Sdim//===----------------------------------------------------------------------===//
59284677Sdim
60284677Sdim#include "AMDGPU.h"
61309124Sdim#include "AMDGPUSubtarget.h"
62344779Sdim#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
63284677Sdim#include "SIInstrInfo.h"
64284677Sdim#include "SIRegisterInfo.h"
65321369Sdim#include "Utils/AMDGPUBaseInfo.h"
66321369Sdim#include "llvm/ADT/ArrayRef.h"
67321369Sdim#include "llvm/ADT/SmallVector.h"
68321369Sdim#include "llvm/ADT/StringRef.h"
69321369Sdim#include "llvm/Analysis/AliasAnalysis.h"
70321369Sdim#include "llvm/CodeGen/MachineBasicBlock.h"
71284677Sdim#include "llvm/CodeGen/MachineFunction.h"
72284677Sdim#include "llvm/CodeGen/MachineFunctionPass.h"
73321369Sdim#include "llvm/CodeGen/MachineInstr.h"
74284677Sdim#include "llvm/CodeGen/MachineInstrBuilder.h"
75321369Sdim#include "llvm/CodeGen/MachineOperand.h"
76284677Sdim#include "llvm/CodeGen/MachineRegisterInfo.h"
77321369Sdim#include "llvm/IR/DebugLoc.h"
78360784Sdim#include "llvm/InitializePasses.h"
79321369Sdim#include "llvm/Pass.h"
80284677Sdim#include "llvm/Support/Debug.h"
81321369Sdim#include "llvm/Support/MathExtras.h"
82284677Sdim#include "llvm/Support/raw_ostream.h"
83327952Sdim#include <algorithm>
84321369Sdim#include <cassert>
85327952Sdim#include <cstdlib>
86321369Sdim#include <iterator>
87321369Sdim#include <utility>
88284677Sdim
89284677Sdimusing namespace llvm;
90284677Sdim
91284677Sdim#define DEBUG_TYPE "si-load-store-opt"
92284677Sdim
93284677Sdimnamespace {
94344779Sdimenum InstClassEnum {
95344779Sdim  UNKNOWN,
96344779Sdim  DS_READ,
97344779Sdim  DS_WRITE,
98344779Sdim  S_BUFFER_LOAD_IMM,
99360784Sdim  BUFFER_LOAD,
100360784Sdim  BUFFER_STORE,
101360784Sdim  MIMG,
102360784Sdim  TBUFFER_LOAD,
103360784Sdim  TBUFFER_STORE,
104344779Sdim};
105284677Sdim
106344779Sdimenum RegisterEnum {
107344779Sdim  SBASE = 0x1,
108344779Sdim  SRSRC = 0x2,
109344779Sdim  SOFFSET = 0x4,
110344779Sdim  VADDR = 0x8,
111344779Sdim  ADDR = 0x10,
112360784Sdim  SSAMP = 0x20,
113344779Sdim};
114344779Sdim
115284677Sdimclass SILoadStoreOptimizer : public MachineFunctionPass {
116327952Sdim  struct CombineInfo {
117321369Sdim    MachineBasicBlock::iterator I;
118321369Sdim    unsigned EltSize;
119360784Sdim    unsigned Offset;
120360784Sdim    unsigned Width;
121360784Sdim    unsigned Format;
122321369Sdim    unsigned BaseOff;
123360784Sdim    unsigned DMask;
124327952Sdim    InstClassEnum InstClass;
125360784Sdim    bool GLC;
126360784Sdim    bool SLC;
127360784Sdim    bool DLC;
128321369Sdim    bool UseST64;
129344779Sdim    SmallVector<MachineInstr *, 8> InstsToMove;
130360784Sdim    int AddrIdx[5];
131360784Sdim    const MachineOperand *AddrReg[5];
132360784Sdim    unsigned NumAddresses;
133360784Sdim
134360784Sdim    bool hasSameBaseAddress(const MachineInstr &MI) {
135360784Sdim      for (unsigned i = 0; i < NumAddresses; i++) {
136360784Sdim        const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
137360784Sdim
138360784Sdim        if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
139360784Sdim          if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
140360784Sdim              AddrReg[i]->getImm() != AddrRegNext.getImm()) {
141360784Sdim            return false;
142360784Sdim          }
143360784Sdim          continue;
144360784Sdim        }
145360784Sdim
146360784Sdim        // Check same base pointer. Be careful of subregisters, which can occur
147360784Sdim        // with vectors of pointers.
148360784Sdim        if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
149360784Sdim            AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
150360784Sdim         return false;
151360784Sdim        }
152360784Sdim      }
153360784Sdim      return true;
154360784Sdim    }
155360784Sdim
156360784Sdim    bool hasMergeableAddress(const MachineRegisterInfo &MRI) {
157360784Sdim      for (unsigned i = 0; i < NumAddresses; ++i) {
158360784Sdim        const MachineOperand *AddrOp = AddrReg[i];
159360784Sdim        // Immediates are always OK.
160360784Sdim        if (AddrOp->isImm())
161360784Sdim          continue;
162360784Sdim
163360784Sdim        // Don't try to merge addresses that aren't either immediates or registers.
164360784Sdim        // TODO: Should be possible to merge FrameIndexes and maybe some other
165360784Sdim        // non-register
166360784Sdim        if (!AddrOp->isReg())
167360784Sdim          return false;
168360784Sdim
169360784Sdim        // TODO: We should be able to merge physical reg addreses.
170360784Sdim        if (Register::isPhysicalRegister(AddrOp->getReg()))
171360784Sdim          return false;
172360784Sdim
173360784Sdim        // If an address has only one use then there will be on other
174360784Sdim        // instructions with the same address, so we can't merge this one.
175360784Sdim        if (MRI.hasOneNonDBGUse(AddrOp->getReg()))
176360784Sdim          return false;
177360784Sdim      }
178360784Sdim      return true;
179360784Sdim    }
180360784Sdim
181360784Sdim    void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
182360784Sdim               const GCNSubtarget &STM);
183344779Sdim  };
184321369Sdim
185344779Sdim  struct BaseRegisters {
186344779Sdim    unsigned LoReg = 0;
187344779Sdim    unsigned HiReg = 0;
188344779Sdim
189344779Sdim    unsigned LoSubReg = 0;
190344779Sdim    unsigned HiSubReg = 0;
191344779Sdim  };
192344779Sdim
193344779Sdim  struct MemAddress {
194344779Sdim    BaseRegisters Base;
195344779Sdim    int64_t Offset = 0;
196344779Sdim  };
197344779Sdim
198344779Sdim  using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
199344779Sdim
200284677Sdimprivate:
201341825Sdim  const GCNSubtarget *STM = nullptr;
202321369Sdim  const SIInstrInfo *TII = nullptr;
203321369Sdim  const SIRegisterInfo *TRI = nullptr;
204360784Sdim  const MCSubtargetInfo *STI = nullptr;
205321369Sdim  MachineRegisterInfo *MRI = nullptr;
206321369Sdim  AliasAnalysis *AA = nullptr;
207344779Sdim  bool OptimizeAgain;
208284677Sdim
209360784Sdim  static bool dmasksCanBeCombined(const CombineInfo &CI,
210360784Sdim                                  const SIInstrInfo &TII,
211360784Sdim                                  const CombineInfo &Paired);
212360784Sdim  static bool offsetsCanBeCombined(CombineInfo &CI, const MCSubtargetInfo &STI,
213360784Sdim                                   CombineInfo &Paired);
214360784Sdim  static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI,
215360784Sdim                        const CombineInfo &Paired);
216360784Sdim  static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired);
217360784Sdim  static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI,
218360784Sdim                                                     const CombineInfo &Paired);
219360784Sdim  const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI,
220360784Sdim                                                    const CombineInfo &Paired);
221284677Sdim
222360784Sdim  bool findMatchingInst(CombineInfo &CI, CombineInfo &Paired);
223284677Sdim
224327952Sdim  unsigned read2Opcode(unsigned EltSize) const;
225327952Sdim  unsigned read2ST64Opcode(unsigned EltSize) const;
226360784Sdim  MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired);
227284677Sdim
228327952Sdim  unsigned write2Opcode(unsigned EltSize) const;
229327952Sdim  unsigned write2ST64Opcode(unsigned EltSize) const;
230360784Sdim  MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired);
231360784Sdim  MachineBasicBlock::iterator mergeImagePair(CombineInfo &CI, CombineInfo &Paired);
232360784Sdim  MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired);
233360784Sdim  MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired);
234360784Sdim  MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired);
235360784Sdim  MachineBasicBlock::iterator mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired);
236360784Sdim  MachineBasicBlock::iterator mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired);
237284677Sdim
238344779Sdim  void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
239360784Sdim                           int32_t NewOffset) const;
240360784Sdim  unsigned computeBase(MachineInstr &MI, const MemAddress &Addr) const;
241360784Sdim  MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
242360784Sdim  Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
243360784Sdim  void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
244344779Sdim  /// Promotes constant offset to the immediate by adjusting the base. It
245344779Sdim  /// tries to use a base from the nearby instructions that allows it to have
246344779Sdim  /// a 13bit constant offset which gets promoted to the immediate.
247344779Sdim  bool promoteConstantOffsetToImm(MachineInstr &CI,
248344779Sdim                                  MemInfoMap &Visited,
249360784Sdim                                  SmallPtrSet<MachineInstr *, 4> &Promoted) const;
250360784Sdim  void addInstToMergeableList(const CombineInfo &CI,
251360784Sdim                  std::list<std::list<CombineInfo> > &MergeableInsts) const;
252360784Sdim  bool collectMergeableInsts(MachineBasicBlock &MBB,
253360784Sdim                  std::list<std::list<CombineInfo> > &MergeableInsts) const;
254344779Sdim
255284677Sdimpublic:
256284677Sdim  static char ID;
257284677Sdim
258321369Sdim  SILoadStoreOptimizer() : MachineFunctionPass(ID) {
259284677Sdim    initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
260284677Sdim  }
261284677Sdim
262360784Sdim  void removeCombinedInst(std::list<CombineInfo> &MergeList,
263360784Sdim                                         const MachineInstr &MI);
264360784Sdim  bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList,
265360784Sdim                                     bool &OptimizeListAgain);
266360784Sdim  bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts);
267284677Sdim
268284677Sdim  bool runOnMachineFunction(MachineFunction &MF) override;
269284677Sdim
270341825Sdim  StringRef getPassName() const override { return "SI Load Store Optimizer"; }
271284677Sdim
272284677Sdim  void getAnalysisUsage(AnalysisUsage &AU) const override {
273284677Sdim    AU.setPreservesCFG();
274314564Sdim    AU.addRequired<AAResultsWrapperPass>();
275284677Sdim
276284677Sdim    MachineFunctionPass::getAnalysisUsage(AU);
277284677Sdim  }
278284677Sdim};
279284677Sdim
280360784Sdimstatic unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
281360784Sdim  const unsigned Opc = MI.getOpcode();
282360784Sdim
283360784Sdim  if (TII.isMUBUF(Opc)) {
284360784Sdim    // FIXME: Handle d16 correctly
285360784Sdim    return AMDGPU::getMUBUFElements(Opc);
286360784Sdim  }
287360784Sdim  if (TII.isMIMG(MI)) {
288360784Sdim    uint64_t DMaskImm =
289360784Sdim        TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm();
290360784Sdim    return countPopulation(DMaskImm);
291360784Sdim  }
292360784Sdim  if (TII.isMTBUF(Opc)) {
293360784Sdim    return AMDGPU::getMTBUFElements(Opc);
294360784Sdim  }
295360784Sdim
296360784Sdim  switch (Opc) {
297360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
298360784Sdim    return 1;
299360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
300360784Sdim    return 2;
301360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
302360784Sdim    return 4;
303360784Sdim  default:
304360784Sdim    return 0;
305360784Sdim  }
306360784Sdim}
307360784Sdim
308360784Sdim/// Maps instruction opcode to enum InstClassEnum.
309360784Sdimstatic InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
310360784Sdim  switch (Opc) {
311360784Sdim  default:
312360784Sdim    if (TII.isMUBUF(Opc)) {
313360784Sdim      switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
314360784Sdim      default:
315360784Sdim        return UNKNOWN;
316360784Sdim      case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
317360784Sdim      case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
318360784Sdim      case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
319360784Sdim      case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
320360784Sdim        return BUFFER_LOAD;
321360784Sdim      case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
322360784Sdim      case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
323360784Sdim      case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
324360784Sdim      case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
325360784Sdim        return BUFFER_STORE;
326360784Sdim      }
327360784Sdim    }
328360784Sdim    if (TII.isMIMG(Opc)) {
329360784Sdim      // Ignore instructions encoded without vaddr.
330360784Sdim      if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1)
331360784Sdim        return UNKNOWN;
332360784Sdim      // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD.
333360784Sdim      if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() ||
334360784Sdim          TII.isGather4(Opc))
335360784Sdim        return UNKNOWN;
336360784Sdim      return MIMG;
337360784Sdim    }
338360784Sdim    if (TII.isMTBUF(Opc)) {
339360784Sdim      switch (AMDGPU::getMTBUFBaseOpcode(Opc)) {
340360784Sdim      default:
341360784Sdim        return UNKNOWN;
342360784Sdim      case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN:
343360784Sdim      case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact:
344360784Sdim      case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET:
345360784Sdim      case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact:
346360784Sdim        return TBUFFER_LOAD;
347360784Sdim      case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN:
348360784Sdim      case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact:
349360784Sdim      case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET:
350360784Sdim      case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact:
351360784Sdim        return TBUFFER_STORE;
352360784Sdim      }
353360784Sdim    }
354360784Sdim    return UNKNOWN;
355360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
356360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
357360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
358360784Sdim    return S_BUFFER_LOAD_IMM;
359360784Sdim  case AMDGPU::DS_READ_B32:
360360784Sdim  case AMDGPU::DS_READ_B32_gfx9:
361360784Sdim  case AMDGPU::DS_READ_B64:
362360784Sdim  case AMDGPU::DS_READ_B64_gfx9:
363360784Sdim    return DS_READ;
364360784Sdim  case AMDGPU::DS_WRITE_B32:
365360784Sdim  case AMDGPU::DS_WRITE_B32_gfx9:
366360784Sdim  case AMDGPU::DS_WRITE_B64:
367360784Sdim  case AMDGPU::DS_WRITE_B64_gfx9:
368360784Sdim    return DS_WRITE;
369360784Sdim  }
370360784Sdim}
371360784Sdim
372360784Sdim/// Determines instruction subclass from opcode. Only instructions
373360784Sdim/// of the same subclass can be merged together.
374360784Sdimstatic unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
375360784Sdim  switch (Opc) {
376360784Sdim  default:
377360784Sdim    if (TII.isMUBUF(Opc))
378360784Sdim      return AMDGPU::getMUBUFBaseOpcode(Opc);
379360784Sdim    if (TII.isMIMG(Opc)) {
380360784Sdim      const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
381360784Sdim      assert(Info);
382360784Sdim      return Info->BaseOpcode;
383360784Sdim    }
384360784Sdim    if (TII.isMTBUF(Opc))
385360784Sdim      return AMDGPU::getMTBUFBaseOpcode(Opc);
386360784Sdim    return -1;
387360784Sdim  case AMDGPU::DS_READ_B32:
388360784Sdim  case AMDGPU::DS_READ_B32_gfx9:
389360784Sdim  case AMDGPU::DS_READ_B64:
390360784Sdim  case AMDGPU::DS_READ_B64_gfx9:
391360784Sdim  case AMDGPU::DS_WRITE_B32:
392360784Sdim  case AMDGPU::DS_WRITE_B32_gfx9:
393360784Sdim  case AMDGPU::DS_WRITE_B64:
394360784Sdim  case AMDGPU::DS_WRITE_B64_gfx9:
395360784Sdim    return Opc;
396360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
397360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
398360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
399360784Sdim    return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
400360784Sdim  }
401360784Sdim}
402360784Sdim
403360784Sdimstatic unsigned getRegs(unsigned Opc, const SIInstrInfo &TII) {
404360784Sdim  if (TII.isMUBUF(Opc)) {
405360784Sdim    unsigned result = 0;
406360784Sdim
407360784Sdim    if (AMDGPU::getMUBUFHasVAddr(Opc)) {
408360784Sdim      result |= VADDR;
409360784Sdim    }
410360784Sdim
411360784Sdim    if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
412360784Sdim      result |= SRSRC;
413360784Sdim    }
414360784Sdim
415360784Sdim    if (AMDGPU::getMUBUFHasSoffset(Opc)) {
416360784Sdim      result |= SOFFSET;
417360784Sdim    }
418360784Sdim
419360784Sdim    return result;
420360784Sdim  }
421360784Sdim
422360784Sdim  if (TII.isMIMG(Opc)) {
423360784Sdim    unsigned result = VADDR | SRSRC;
424360784Sdim    const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
425360784Sdim    if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler)
426360784Sdim      result |= SSAMP;
427360784Sdim
428360784Sdim    return result;
429360784Sdim  }
430360784Sdim  if (TII.isMTBUF(Opc)) {
431360784Sdim    unsigned result = 0;
432360784Sdim
433360784Sdim    if (AMDGPU::getMTBUFHasVAddr(Opc)) {
434360784Sdim      result |= VADDR;
435360784Sdim    }
436360784Sdim
437360784Sdim    if (AMDGPU::getMTBUFHasSrsrc(Opc)) {
438360784Sdim      result |= SRSRC;
439360784Sdim    }
440360784Sdim
441360784Sdim    if (AMDGPU::getMTBUFHasSoffset(Opc)) {
442360784Sdim      result |= SOFFSET;
443360784Sdim    }
444360784Sdim
445360784Sdim    return result;
446360784Sdim  }
447360784Sdim
448360784Sdim  switch (Opc) {
449360784Sdim  default:
450360784Sdim    return 0;
451360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
452360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
453360784Sdim  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
454360784Sdim    return SBASE;
455360784Sdim  case AMDGPU::DS_READ_B32:
456360784Sdim  case AMDGPU::DS_READ_B64:
457360784Sdim  case AMDGPU::DS_READ_B32_gfx9:
458360784Sdim  case AMDGPU::DS_READ_B64_gfx9:
459360784Sdim  case AMDGPU::DS_WRITE_B32:
460360784Sdim  case AMDGPU::DS_WRITE_B64:
461360784Sdim  case AMDGPU::DS_WRITE_B32_gfx9:
462360784Sdim  case AMDGPU::DS_WRITE_B64_gfx9:
463360784Sdim    return ADDR;
464360784Sdim  }
465360784Sdim}
466360784Sdim
467360784Sdimvoid SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
468360784Sdim                                              const SIInstrInfo &TII,
469360784Sdim                                              const GCNSubtarget &STM) {
470360784Sdim  I = MI;
471360784Sdim  unsigned Opc = MI->getOpcode();
472360784Sdim  InstClass = getInstClass(Opc, TII);
473360784Sdim
474360784Sdim  if (InstClass == UNKNOWN)
475360784Sdim    return;
476360784Sdim
477360784Sdim  switch (InstClass) {
478360784Sdim  case DS_READ:
479360784Sdim   EltSize =
480360784Sdim          (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
481360784Sdim                                                                          : 4;
482360784Sdim   break;
483360784Sdim  case DS_WRITE:
484360784Sdim    EltSize =
485360784Sdim          (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
486360784Sdim                                                                            : 4;
487360784Sdim    break;
488360784Sdim  case S_BUFFER_LOAD_IMM:
489360784Sdim    EltSize = AMDGPU::getSMRDEncodedOffset(STM, 4);
490360784Sdim    break;
491360784Sdim  default:
492360784Sdim    EltSize = 4;
493360784Sdim    break;
494360784Sdim  }
495360784Sdim
496360784Sdim  if (InstClass == MIMG) {
497360784Sdim    DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
498360784Sdim  } else {
499360784Sdim    int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
500360784Sdim    Offset = I->getOperand(OffsetIdx).getImm();
501360784Sdim  }
502360784Sdim
503360784Sdim  if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE)
504360784Sdim    Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
505360784Sdim
506360784Sdim  Width = getOpcodeWidth(*I, TII);
507360784Sdim
508360784Sdim  if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
509360784Sdim    Offset &= 0xffff;
510360784Sdim  } else if (InstClass != MIMG) {
511360784Sdim    GLC = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm();
512360784Sdim    if (InstClass != S_BUFFER_LOAD_IMM) {
513360784Sdim      SLC = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm();
514360784Sdim    }
515360784Sdim    DLC = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm();
516360784Sdim  }
517360784Sdim
518360784Sdim  unsigned AddrOpName[5] = {0};
519360784Sdim  NumAddresses = 0;
520360784Sdim  const unsigned Regs = getRegs(I->getOpcode(), TII);
521360784Sdim
522360784Sdim  if (Regs & ADDR) {
523360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
524360784Sdim  }
525360784Sdim
526360784Sdim  if (Regs & SBASE) {
527360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
528360784Sdim  }
529360784Sdim
530360784Sdim  if (Regs & SRSRC) {
531360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
532360784Sdim  }
533360784Sdim
534360784Sdim  if (Regs & SOFFSET) {
535360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
536360784Sdim  }
537360784Sdim
538360784Sdim  if (Regs & VADDR) {
539360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
540360784Sdim  }
541360784Sdim
542360784Sdim  if (Regs & SSAMP) {
543360784Sdim    AddrOpName[NumAddresses++] = AMDGPU::OpName::ssamp;
544360784Sdim  }
545360784Sdim
546360784Sdim  for (unsigned i = 0; i < NumAddresses; i++) {
547360784Sdim    AddrIdx[i] = AMDGPU::getNamedOperandIdx(I->getOpcode(), AddrOpName[i]);
548360784Sdim    AddrReg[i] = &I->getOperand(AddrIdx[i]);
549360784Sdim  }
550360784Sdim
551360784Sdim  InstsToMove.clear();
552360784Sdim}
553360784Sdim
554321369Sdim} // end anonymous namespace.
555284677Sdim
556284677SdimINITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
557341825Sdim                      "SI Load Store Optimizer", false, false)
558314564SdimINITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
559344779SdimINITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
560344779Sdim                    false, false)
561284677Sdim
562284677Sdimchar SILoadStoreOptimizer::ID = 0;
563284677Sdim
564284677Sdimchar &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
565284677Sdim
566321369SdimFunctionPass *llvm::createSILoadStoreOptimizerPass() {
567321369Sdim  return new SILoadStoreOptimizer();
568284677Sdim}
569284677Sdim
570314564Sdimstatic void moveInstsAfter(MachineBasicBlock::iterator I,
571344779Sdim                           ArrayRef<MachineInstr *> InstsToMove) {
572314564Sdim  MachineBasicBlock *MBB = I->getParent();
573314564Sdim  ++I;
574314564Sdim  for (MachineInstr *MI : InstsToMove) {
575314564Sdim    MI->removeFromParent();
576314564Sdim    MBB->insert(I, MI);
577314564Sdim  }
578314564Sdim}
579314564Sdim
580341825Sdimstatic void addDefsUsesToList(const MachineInstr &MI,
581341825Sdim                              DenseSet<unsigned> &RegDefs,
582341825Sdim                              DenseSet<unsigned> &PhysRegUses) {
583341825Sdim  for (const MachineOperand &Op : MI.operands()) {
584341825Sdim    if (Op.isReg()) {
585341825Sdim      if (Op.isDef())
586341825Sdim        RegDefs.insert(Op.getReg());
587360784Sdim      else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
588341825Sdim        PhysRegUses.insert(Op.getReg());
589341825Sdim    }
590341825Sdim  }
591314564Sdim}
592314564Sdim
593321369Sdimstatic bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
594321369Sdim                                      MachineBasicBlock::iterator B,
595344779Sdim                                      AliasAnalysis *AA) {
596327952Sdim  // RAW or WAR - cannot reorder
597327952Sdim  // WAW - cannot reorder
598327952Sdim  // RAR - safe to reorder
599353358Sdim  return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
600314564Sdim}
601314564Sdim
602314564Sdim// Add MI and its defs to the lists if MI reads one of the defs that are
603314564Sdim// already in the list. Returns true in that case.
604344779Sdimstatic bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
605344779Sdim                                  DenseSet<unsigned> &PhysRegUses,
606344779Sdim                                  SmallVectorImpl<MachineInstr *> &Insts) {
607327952Sdim  for (MachineOperand &Use : MI.operands()) {
608327952Sdim    // If one of the defs is read, then there is a use of Def between I and the
609327952Sdim    // instruction that I will potentially be merged with. We will need to move
610327952Sdim    // this instruction after the merged instructions.
611341825Sdim    //
612341825Sdim    // Similarly, if there is a def which is read by an instruction that is to
613341825Sdim    // be moved for merging, then we need to move the def-instruction as well.
614341825Sdim    // This can only happen for physical registers such as M0; virtual
615341825Sdim    // registers are in SSA form.
616341825Sdim    if (Use.isReg() &&
617341825Sdim        ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
618353358Sdim         (Use.isDef() && RegDefs.count(Use.getReg())) ||
619360784Sdim         (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
620341825Sdim          PhysRegUses.count(Use.getReg())))) {
621314564Sdim      Insts.push_back(&MI);
622341825Sdim      addDefsUsesToList(MI, RegDefs, PhysRegUses);
623314564Sdim      return true;
624314564Sdim    }
625314564Sdim  }
626314564Sdim
627314564Sdim  return false;
628314564Sdim}
629314564Sdim
630344779Sdimstatic bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
631344779Sdim                                    ArrayRef<MachineInstr *> InstsToMove,
632353358Sdim                                    AliasAnalysis *AA) {
633314564Sdim  assert(MemOp.mayLoadOrStore());
634314564Sdim
635314564Sdim  for (MachineInstr *InstToMove : InstsToMove) {
636314564Sdim    if (!InstToMove->mayLoadOrStore())
637314564Sdim      continue;
638353358Sdim    if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
639344779Sdim      return false;
640314564Sdim  }
641314564Sdim  return true;
642314564Sdim}
643314564Sdim
644360784Sdim// This function assumes that \p A and \p B have are identical except for
645360784Sdim// size and offset, and they referecne adjacent memory.
646360784Sdimstatic MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
647360784Sdim                                                   const MachineMemOperand *A,
648360784Sdim                                                   const MachineMemOperand *B) {
649360784Sdim  unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
650360784Sdim  unsigned Size = A->getSize() + B->getSize();
651360784Sdim  // This function adds the offset parameter to the existing offset for A,
652360784Sdim  // so we pass 0 here as the offset and then manually set it to the correct
653360784Sdim  // value after the call.
654360784Sdim  MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
655360784Sdim  MMO->setOffset(MinOffset);
656360784Sdim  return MMO;
657360784Sdim}
658360784Sdim
659360784Sdimbool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI,
660360784Sdim                                               const SIInstrInfo &TII,
661360784Sdim                                               const CombineInfo &Paired) {
662360784Sdim  assert(CI.InstClass == MIMG);
663360784Sdim
664360784Sdim  // Ignore instructions with tfe/lwe set.
665360784Sdim  const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe);
666360784Sdim  const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe);
667360784Sdim
668360784Sdim  if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm()))
669360784Sdim    return false;
670360784Sdim
671360784Sdim  // Check other optional immediate operands for equality.
672360784Sdim  unsigned OperandsToMatch[] = {AMDGPU::OpName::glc, AMDGPU::OpName::slc,
673360784Sdim                                AMDGPU::OpName::d16, AMDGPU::OpName::unorm,
674360784Sdim                                AMDGPU::OpName::da,  AMDGPU::OpName::r128};
675360784Sdim
676360784Sdim  for (auto op : OperandsToMatch) {
677360784Sdim    int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
678360784Sdim    if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx)
679360784Sdim      return false;
680360784Sdim    if (Idx != -1 &&
681360784Sdim        CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm())
682360784Sdim      return false;
683360784Sdim  }
684360784Sdim
685360784Sdim  // Check DMask for overlaps.
686360784Sdim  unsigned MaxMask = std::max(CI.DMask, Paired.DMask);
687360784Sdim  unsigned MinMask = std::min(CI.DMask, Paired.DMask);
688360784Sdim
689360784Sdim  unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask);
690360784Sdim  if ((1u << AllowedBitsForMin) <= MinMask)
691360784Sdim    return false;
692360784Sdim
693360784Sdim  return true;
694360784Sdim}
695360784Sdim
696360784Sdimstatic unsigned getBufferFormatWithCompCount(unsigned OldFormat,
697360784Sdim                                       unsigned ComponentCount,
698360784Sdim                                       const MCSubtargetInfo &STI) {
699360784Sdim  if (ComponentCount > 4)
700360784Sdim    return 0;
701360784Sdim
702360784Sdim  const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo =
703360784Sdim      llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI);
704360784Sdim  if (!OldFormatInfo)
705360784Sdim    return 0;
706360784Sdim
707360784Sdim  const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo =
708360784Sdim      llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp,
709360784Sdim                                           ComponentCount,
710360784Sdim                                           OldFormatInfo->NumFormat, STI);
711360784Sdim
712360784Sdim  if (!NewFormatInfo)
713360784Sdim    return 0;
714360784Sdim
715360784Sdim  assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat &&
716360784Sdim         NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp);
717360784Sdim
718360784Sdim  return NewFormatInfo->Format;
719360784Sdim}
720360784Sdim
721360784Sdimbool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI,
722360784Sdim                                                const MCSubtargetInfo &STI,
723360784Sdim                                                CombineInfo &Paired) {
724360784Sdim  assert(CI.InstClass != MIMG);
725360784Sdim
726284677Sdim  // XXX - Would the same offset be OK? Is there any reason this would happen or
727284677Sdim  // be useful?
728360784Sdim  if (CI.Offset == Paired.Offset)
729284677Sdim    return false;
730284677Sdim
731284677Sdim  // This won't be valid if the offset isn't aligned.
732360784Sdim  if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0))
733284677Sdim    return false;
734284677Sdim
735360784Sdim  if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) {
736360784Sdim
737360784Sdim    const llvm::AMDGPU::GcnBufferFormatInfo *Info0 =
738360784Sdim        llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI);
739360784Sdim    if (!Info0)
740360784Sdim      return false;
741360784Sdim    const llvm::AMDGPU::GcnBufferFormatInfo *Info1 =
742360784Sdim        llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI);
743360784Sdim    if (!Info1)
744360784Sdim      return false;
745360784Sdim
746360784Sdim    if (Info0->BitsPerComp != Info1->BitsPerComp ||
747360784Sdim        Info0->NumFormat != Info1->NumFormat)
748360784Sdim      return false;
749360784Sdim
750360784Sdim    // TODO: Should be possible to support more formats, but if format loads
751360784Sdim    // are not dword-aligned, the merged load might not be valid.
752360784Sdim    if (Info0->BitsPerComp != 32)
753360784Sdim      return false;
754360784Sdim
755360784Sdim    if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0)
756360784Sdim      return false;
757360784Sdim  }
758360784Sdim
759360784Sdim  unsigned EltOffset0 = CI.Offset / CI.EltSize;
760360784Sdim  unsigned EltOffset1 = Paired.Offset / CI.EltSize;
761321369Sdim  CI.UseST64 = false;
762321369Sdim  CI.BaseOff = 0;
763284677Sdim
764327952Sdim  // Handle SMEM and VMEM instructions.
765344779Sdim  if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
766360784Sdim    return (EltOffset0 + CI.Width == EltOffset1 ||
767360784Sdim            EltOffset1 + Paired.Width == EltOffset0) &&
768360784Sdim           CI.GLC == Paired.GLC && CI.DLC == Paired.DLC &&
769360784Sdim           (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC == Paired.SLC);
770327952Sdim  }
771327952Sdim
772321369Sdim  // If the offset in elements doesn't fit in 8-bits, we might be able to use
773321369Sdim  // the stride 64 versions.
774321369Sdim  if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
775321369Sdim      isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
776360784Sdim    CI.Offset = EltOffset0 / 64;
777360784Sdim    Paired.Offset = EltOffset1 / 64;
778321369Sdim    CI.UseST64 = true;
779321369Sdim    return true;
780321369Sdim  }
781321369Sdim
782284677Sdim  // Check if the new offsets fit in the reduced 8-bit range.
783321369Sdim  if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
784360784Sdim    CI.Offset = EltOffset0;
785360784Sdim    Paired.Offset = EltOffset1;
786284677Sdim    return true;
787321369Sdim  }
788284677Sdim
789321369Sdim  // Try to shift base address to decrease offsets.
790321369Sdim  unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
791360784Sdim  CI.BaseOff = std::min(CI.Offset, Paired.Offset);
792284677Sdim
793321369Sdim  if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
794360784Sdim    CI.Offset = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
795360784Sdim    Paired.Offset = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
796321369Sdim    CI.UseST64 = true;
797321369Sdim    return true;
798321369Sdim  }
799321369Sdim
800321369Sdim  if (isUInt<8>(OffsetDiff)) {
801360784Sdim    CI.Offset = EltOffset0 - CI.BaseOff / CI.EltSize;
802360784Sdim    Paired.Offset = EltOffset1 - CI.BaseOff / CI.EltSize;
803321369Sdim    return true;
804321369Sdim  }
805321369Sdim
806321369Sdim  return false;
807284677Sdim}
808284677Sdim
809344779Sdimbool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
810360784Sdim                                     const CombineInfo &CI,
811360784Sdim                                     const CombineInfo &Paired) {
812360784Sdim  const unsigned Width = (CI.Width + Paired.Width);
813344779Sdim  switch (CI.InstClass) {
814344779Sdim  default:
815344779Sdim    return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
816344779Sdim  case S_BUFFER_LOAD_IMM:
817344779Sdim    switch (Width) {
818344779Sdim    default:
819344779Sdim      return false;
820344779Sdim    case 2:
821344779Sdim    case 4:
822344779Sdim      return true;
823344779Sdim    }
824344779Sdim  }
825344779Sdim}
826344779Sdim
827360784Sdimbool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI,
828360784Sdim                                            CombineInfo &Paired) {
829327952Sdim  MachineBasicBlock *MBB = CI.I->getParent();
830327952Sdim  MachineBasicBlock::iterator E = MBB->end();
831321369Sdim  MachineBasicBlock::iterator MBBI = CI.I;
832327952Sdim
833344779Sdim  const unsigned Opc = CI.I->getOpcode();
834360784Sdim  const InstClassEnum InstClass = getInstClass(Opc, *TII);
835344779Sdim
836344779Sdim  if (InstClass == UNKNOWN) {
837344779Sdim    return false;
838344779Sdim  }
839360784Sdim  const unsigned InstSubclass = getInstSubclass(Opc, *TII);
840344779Sdim
841360784Sdim  // Do not merge VMEM buffer instructions with "swizzled" bit set.
842360784Sdim  int Swizzled =
843360784Sdim      AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
844360784Sdim  if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
845360784Sdim    return false;
846344779Sdim
847284677Sdim  ++MBBI;
848284677Sdim
849341825Sdim  DenseSet<unsigned> RegDefsToMove;
850341825Sdim  DenseSet<unsigned> PhysRegUsesToMove;
851341825Sdim  addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
852284677Sdim
853344779Sdim  for (; MBBI != E; ++MBBI) {
854344779Sdim
855360784Sdim    if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
856360784Sdim        (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) {
857360784Sdim      // This is not a matching instruction, but we can keep looking as
858314564Sdim      // long as one of these conditions are met:
859314564Sdim      // 1. It is safe to move I down past MBBI.
860314564Sdim      // 2. It is safe to move MBBI down past the instruction that I will
861314564Sdim      //    be merged into.
862284677Sdim
863327952Sdim      if (MBBI->hasUnmodeledSideEffects()) {
864314564Sdim        // We can't re-order this instruction with respect to other memory
865327952Sdim        // operations, so we fail both conditions mentioned above.
866321369Sdim        return false;
867327952Sdim      }
868314564Sdim
869314564Sdim      if (MBBI->mayLoadOrStore() &&
870353358Sdim          (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
871353358Sdim           !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
872314564Sdim        // We fail condition #1, but we may still be able to satisfy condition
873314564Sdim        // #2.  Add this instruction to the move list and then we will check
874314564Sdim        // if condition #2 holds once we have selected the matching instruction.
875321369Sdim        CI.InstsToMove.push_back(&*MBBI);
876341825Sdim        addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
877314564Sdim        continue;
878314564Sdim      }
879314564Sdim
880314564Sdim      // When we match I with another DS instruction we will be moving I down
881314564Sdim      // to the location of the matched instruction any uses of I will need to
882314564Sdim      // be moved down as well.
883341825Sdim      addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
884341825Sdim                            CI.InstsToMove);
885314564Sdim      continue;
886314564Sdim    }
887314564Sdim
888314564Sdim    // Don't merge volatiles.
889314564Sdim    if (MBBI->hasOrderedMemoryRef())
890321369Sdim      return false;
891314564Sdim
892360784Sdim    int Swizzled =
893360784Sdim        AMDGPU::getNamedOperandIdx(MBBI->getOpcode(), AMDGPU::OpName::swz);
894360784Sdim    if (Swizzled != -1 && MBBI->getOperand(Swizzled).getImm())
895360784Sdim      return false;
896360784Sdim
897314564Sdim    // Handle a case like
898314564Sdim    //   DS_WRITE_B32 addr, v, idx0
899314564Sdim    //   w = DS_READ_B32 addr, idx0
900314564Sdim    //   DS_WRITE_B32 addr, f(w), idx1
901314564Sdim    // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
902314564Sdim    // merging of the two writes.
903341825Sdim    if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
904341825Sdim                              CI.InstsToMove))
905314564Sdim      continue;
906314564Sdim
907360784Sdim    bool Match = CI.hasSameBaseAddress(*MBBI);
908314564Sdim
909327952Sdim    if (Match) {
910360784Sdim      Paired.setMI(MBBI, *TII, *STM);
911314564Sdim
912360784Sdim      // Check both offsets (or masks for MIMG) can be combined and fit in the
913360784Sdim      // reduced range.
914360784Sdim      bool canBeCombined =
915360784Sdim          CI.InstClass == MIMG
916360784Sdim              ? dmasksCanBeCombined(CI, *TII, Paired)
917360784Sdim              : widthsFit(*STM, CI, Paired) && offsetsCanBeCombined(CI, *STI, Paired);
918327952Sdim
919314564Sdim      // We also need to go through the list of instructions that we plan to
920314564Sdim      // move and make sure they are all safe to move down past the merged
921314564Sdim      // instruction.
922360784Sdim      if (canBeCombined && canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
923360784Sdim        return true;
924314564Sdim    }
925314564Sdim
926314564Sdim    // We've found a load/store that we couldn't merge for some reason.
927314564Sdim    // We could potentially keep looking, but we'd need to make sure that
928314564Sdim    // it was safe to move I and also all the instruction in InstsToMove
929314564Sdim    // down past this instruction.
930321369Sdim    // check if we can move I across MBBI and if we can move all I's users
931353358Sdim    if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
932353358Sdim        !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
933314564Sdim      break;
934284677Sdim  }
935321369Sdim  return false;
936284677Sdim}
937284677Sdim
938327952Sdimunsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
939327952Sdim  if (STM->ldsRequiresM0Init())
940327952Sdim    return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
941327952Sdim  return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
942327952Sdim}
943327952Sdim
944327952Sdimunsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
945327952Sdim  if (STM->ldsRequiresM0Init())
946327952Sdim    return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
947327952Sdim
948344779Sdim  return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
949344779Sdim                        : AMDGPU::DS_READ2ST64_B64_gfx9;
950327952Sdim}
951327952Sdim
952344779SdimMachineBasicBlock::iterator
953360784SdimSILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired) {
954321369Sdim  MachineBasicBlock *MBB = CI.I->getParent();
955284677Sdim
956284677Sdim  // Be careful, since the addresses could be subregisters themselves in weird
957284677Sdim  // cases, like vectors of pointers.
958321369Sdim  const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
959284677Sdim
960321369Sdim  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
961360784Sdim  const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst);
962284677Sdim
963360784Sdim  unsigned NewOffset0 = CI.Offset;
964360784Sdim  unsigned NewOffset1 = Paired.Offset;
965344779Sdim  unsigned Opc =
966344779Sdim      CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
967284677Sdim
968321369Sdim  unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
969321369Sdim  unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
970284677Sdim
971314564Sdim  if (NewOffset0 > NewOffset1) {
972314564Sdim    // Canonicalize the merged instruction so the smaller offset comes first.
973314564Sdim    std::swap(NewOffset0, NewOffset1);
974314564Sdim    std::swap(SubRegIdx0, SubRegIdx1);
975314564Sdim  }
976314564Sdim
977284677Sdim  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
978344779Sdim         (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
979284677Sdim
980284677Sdim  const MCInstrDesc &Read2Desc = TII->get(Opc);
981284677Sdim
982344779Sdim  const TargetRegisterClass *SuperRC =
983344779Sdim      (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
984360784Sdim  Register DestReg = MRI->createVirtualRegister(SuperRC);
985284677Sdim
986321369Sdim  DebugLoc DL = CI.I->getDebugLoc();
987321369Sdim
988360784Sdim  Register BaseReg = AddrReg->getReg();
989344779Sdim  unsigned BaseSubReg = AddrReg->getSubReg();
990321369Sdim  unsigned BaseRegFlags = 0;
991321369Sdim  if (CI.BaseOff) {
992360784Sdim    Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
993360784Sdim    BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
994344779Sdim        .addImm(CI.BaseOff);
995341825Sdim
996321369Sdim    BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
997321369Sdim    BaseRegFlags = RegState::Kill;
998327952Sdim
999360784Sdim    TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1000344779Sdim        .addReg(ImmReg)
1001353358Sdim        .addReg(AddrReg->getReg(), 0, BaseSubReg)
1002353358Sdim        .addImm(0); // clamp bit
1003344779Sdim    BaseSubReg = 0;
1004321369Sdim  }
1005321369Sdim
1006321369Sdim  MachineInstrBuilder Read2 =
1007360784Sdim      BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg)
1008344779Sdim          .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1009344779Sdim          .addImm(NewOffset0)                        // offset0
1010344779Sdim          .addImm(NewOffset1)                        // offset1
1011344779Sdim          .addImm(0)                                 // gds
1012360784Sdim          .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1013321369Sdim
1014314564Sdim  (void)Read2;
1015284677Sdim
1016286684Sdim  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1017286684Sdim
1018286684Sdim  // Copy to the old destination registers.
1019360784Sdim  BuildMI(*MBB, Paired.I, DL, CopyDesc)
1020321369Sdim      .add(*Dest0) // Copy to same destination including flags and sub reg.
1021321369Sdim      .addReg(DestReg, 0, SubRegIdx0);
1022360784Sdim  MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1023321369Sdim                            .add(*Dest1)
1024321369Sdim                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
1025286684Sdim
1026321369Sdim  moveInstsAfter(Copy1, CI.InstsToMove);
1027286684Sdim
1028321369Sdim  CI.I->eraseFromParent();
1029360784Sdim  Paired.I->eraseFromParent();
1030284677Sdim
1031341825Sdim  LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
1032360784Sdim  return Read2;
1033284677Sdim}
1034284677Sdim
1035327952Sdimunsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
1036327952Sdim  if (STM->ldsRequiresM0Init())
1037327952Sdim    return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
1038344779Sdim  return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
1039344779Sdim                        : AMDGPU::DS_WRITE2_B64_gfx9;
1040327952Sdim}
1041327952Sdim
1042327952Sdimunsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
1043327952Sdim  if (STM->ldsRequiresM0Init())
1044344779Sdim    return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
1045344779Sdim                          : AMDGPU::DS_WRITE2ST64_B64;
1046327952Sdim
1047344779Sdim  return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
1048344779Sdim                        : AMDGPU::DS_WRITE2ST64_B64_gfx9;
1049327952Sdim}
1050327952Sdim
1051344779SdimMachineBasicBlock::iterator
1052360784SdimSILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired) {
1053321369Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1054284677Sdim
1055284677Sdim  // Be sure to use .addOperand(), and not .addReg() with these. We want to be
1056284677Sdim  // sure we preserve the subregister index and any register flags set on them.
1057344779Sdim  const MachineOperand *AddrReg =
1058344779Sdim      TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1059344779Sdim  const MachineOperand *Data0 =
1060344779Sdim      TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
1061344779Sdim  const MachineOperand *Data1 =
1062360784Sdim      TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0);
1063284677Sdim
1064360784Sdim  unsigned NewOffset0 = CI.Offset;
1065360784Sdim  unsigned NewOffset1 = Paired.Offset;
1066344779Sdim  unsigned Opc =
1067344779Sdim      CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
1068284677Sdim
1069314564Sdim  if (NewOffset0 > NewOffset1) {
1070314564Sdim    // Canonicalize the merged instruction so the smaller offset comes first.
1071314564Sdim    std::swap(NewOffset0, NewOffset1);
1072314564Sdim    std::swap(Data0, Data1);
1073314564Sdim  }
1074314564Sdim
1075284677Sdim  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1076344779Sdim         (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1077284677Sdim
1078284677Sdim  const MCInstrDesc &Write2Desc = TII->get(Opc);
1079321369Sdim  DebugLoc DL = CI.I->getDebugLoc();
1080284677Sdim
1081360784Sdim  Register BaseReg = AddrReg->getReg();
1082344779Sdim  unsigned BaseSubReg = AddrReg->getSubReg();
1083321369Sdim  unsigned BaseRegFlags = 0;
1084321369Sdim  if (CI.BaseOff) {
1085360784Sdim    Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1086360784Sdim    BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1087344779Sdim        .addImm(CI.BaseOff);
1088341825Sdim
1089321369Sdim    BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1090321369Sdim    BaseRegFlags = RegState::Kill;
1091327952Sdim
1092360784Sdim    TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1093344779Sdim        .addReg(ImmReg)
1094353358Sdim        .addReg(AddrReg->getReg(), 0, BaseSubReg)
1095353358Sdim        .addImm(0); // clamp bit
1096344779Sdim    BaseSubReg = 0;
1097321369Sdim  }
1098284677Sdim
1099321369Sdim  MachineInstrBuilder Write2 =
1100360784Sdim      BuildMI(*MBB, Paired.I, DL, Write2Desc)
1101344779Sdim          .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1102344779Sdim          .add(*Data0)                               // data0
1103344779Sdim          .add(*Data1)                               // data1
1104344779Sdim          .addImm(NewOffset0)                        // offset0
1105344779Sdim          .addImm(NewOffset1)                        // offset1
1106344779Sdim          .addImm(0)                                 // gds
1107360784Sdim          .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1108284677Sdim
1109321369Sdim  moveInstsAfter(Write2, CI.InstsToMove);
1110284677Sdim
1111321369Sdim  CI.I->eraseFromParent();
1112360784Sdim  Paired.I->eraseFromParent();
1113321369Sdim
1114341825Sdim  LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
1115360784Sdim  return Write2;
1116284677Sdim}
1117284677Sdim
1118344779SdimMachineBasicBlock::iterator
1119360784SdimSILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired) {
1120327952Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1121327952Sdim  DebugLoc DL = CI.I->getDebugLoc();
1122360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1123327952Sdim
1124360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1125344779Sdim
1126360784Sdim  Register DestReg = MRI->createVirtualRegister(SuperRC);
1127360784Sdim  unsigned MergedDMask = CI.DMask | Paired.DMask;
1128360784Sdim  unsigned DMaskIdx =
1129360784Sdim      AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask);
1130327952Sdim
1131360784Sdim  auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1132360784Sdim  for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) {
1133360784Sdim    if (I == DMaskIdx)
1134360784Sdim      MIB.addImm(MergedDMask);
1135360784Sdim    else
1136360784Sdim      MIB.add((*CI.I).getOperand(I));
1137360784Sdim  }
1138327952Sdim
1139360784Sdim  // It shouldn't be possible to get this far if the two instructions
1140360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1141360784Sdim  // will return true if this is the case.
1142360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1143360784Sdim
1144360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1145360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1146360784Sdim
1147360784Sdim  MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1148360784Sdim
1149360784Sdim  unsigned SubRegIdx0, SubRegIdx1;
1150360784Sdim  std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired);
1151360784Sdim
1152360784Sdim  // Copy to the old destination registers.
1153360784Sdim  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1154360784Sdim  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1155360784Sdim  const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1156360784Sdim
1157360784Sdim  BuildMI(*MBB, Paired.I, DL, CopyDesc)
1158360784Sdim      .add(*Dest0) // Copy to same destination including flags and sub reg.
1159360784Sdim      .addReg(DestReg, 0, SubRegIdx0);
1160360784Sdim  MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1161360784Sdim                            .add(*Dest1)
1162360784Sdim                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
1163360784Sdim
1164360784Sdim  moveInstsAfter(Copy1, CI.InstsToMove);
1165360784Sdim
1166360784Sdim  CI.I->eraseFromParent();
1167360784Sdim  Paired.I->eraseFromParent();
1168360784Sdim  return New;
1169360784Sdim}
1170360784Sdim
1171360784SdimMachineBasicBlock::iterator
1172360784SdimSILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired) {
1173360784Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1174360784Sdim  DebugLoc DL = CI.I->getDebugLoc();
1175360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1176360784Sdim
1177360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1178360784Sdim
1179360784Sdim  Register DestReg = MRI->createVirtualRegister(SuperRC);
1180360784Sdim  unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1181360784Sdim
1182360784Sdim  // It shouldn't be possible to get this far if the two instructions
1183360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1184360784Sdim  // will return true if this is the case.
1185360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1186360784Sdim
1187360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1188360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1189360784Sdim
1190360784Sdim  MachineInstr *New =
1191360784Sdim    BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg)
1192360784Sdim        .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
1193360784Sdim        .addImm(MergedOffset) // offset
1194360784Sdim        .addImm(CI.GLC)      // glc
1195360784Sdim        .addImm(CI.DLC)      // dlc
1196360784Sdim        .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1197360784Sdim
1198360784Sdim  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1199344779Sdim  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1200344779Sdim  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1201327952Sdim
1202327952Sdim  // Copy to the old destination registers.
1203327952Sdim  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1204327952Sdim  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
1205360784Sdim  const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst);
1206327952Sdim
1207360784Sdim  BuildMI(*MBB, Paired.I, DL, CopyDesc)
1208327952Sdim      .add(*Dest0) // Copy to same destination including flags and sub reg.
1209327952Sdim      .addReg(DestReg, 0, SubRegIdx0);
1210360784Sdim  MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1211327952Sdim                            .add(*Dest1)
1212327952Sdim                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
1213327952Sdim
1214327952Sdim  moveInstsAfter(Copy1, CI.InstsToMove);
1215327952Sdim
1216327952Sdim  CI.I->eraseFromParent();
1217360784Sdim  Paired.I->eraseFromParent();
1218360784Sdim  return New;
1219327952Sdim}
1220327952Sdim
1221344779SdimMachineBasicBlock::iterator
1222360784SdimSILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired) {
1223327952Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1224327952Sdim  DebugLoc DL = CI.I->getDebugLoc();
1225327952Sdim
1226360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1227327952Sdim
1228360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1229344779Sdim
1230344779Sdim  // Copy to the new source register.
1231360784Sdim  Register DestReg = MRI->createVirtualRegister(SuperRC);
1232360784Sdim  unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1233327952Sdim
1234360784Sdim  auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1235327952Sdim
1236360784Sdim  const unsigned Regs = getRegs(Opcode, *TII);
1237327952Sdim
1238344779Sdim  if (Regs & VADDR)
1239344779Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1240344779Sdim
1241360784Sdim  // It shouldn't be possible to get this far if the two instructions
1242360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1243360784Sdim  // will return true if this is the case.
1244360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1245327952Sdim
1246360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1247360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1248360784Sdim
1249360784Sdim  MachineInstr *New =
1250360784Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1251360784Sdim        .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1252360784Sdim        .addImm(MergedOffset) // offset
1253360784Sdim        .addImm(CI.GLC)      // glc
1254360784Sdim        .addImm(CI.SLC)      // slc
1255360784Sdim        .addImm(0)            // tfe
1256360784Sdim        .addImm(CI.DLC)      // dlc
1257360784Sdim        .addImm(0)            // swz
1258360784Sdim        .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1259360784Sdim
1260360784Sdim  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1261344779Sdim  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1262344779Sdim  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1263327952Sdim
1264327952Sdim  // Copy to the old destination registers.
1265327952Sdim  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1266327952Sdim  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1267360784Sdim  const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1268327952Sdim
1269360784Sdim  BuildMI(*MBB, Paired.I, DL, CopyDesc)
1270327952Sdim      .add(*Dest0) // Copy to same destination including flags and sub reg.
1271327952Sdim      .addReg(DestReg, 0, SubRegIdx0);
1272360784Sdim  MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1273327952Sdim                            .add(*Dest1)
1274327952Sdim                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
1275327952Sdim
1276327952Sdim  moveInstsAfter(Copy1, CI.InstsToMove);
1277327952Sdim
1278327952Sdim  CI.I->eraseFromParent();
1279360784Sdim  Paired.I->eraseFromParent();
1280360784Sdim  return New;
1281327952Sdim}
1282327952Sdim
1283360784SdimMachineBasicBlock::iterator
1284360784SdimSILoadStoreOptimizer::mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired) {
1285360784Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1286360784Sdim  DebugLoc DL = CI.I->getDebugLoc();
1287327952Sdim
1288360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1289360784Sdim
1290360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1291360784Sdim
1292360784Sdim  // Copy to the new source register.
1293360784Sdim  Register DestReg = MRI->createVirtualRegister(SuperRC);
1294360784Sdim  unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1295360784Sdim
1296360784Sdim  auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1297360784Sdim
1298360784Sdim  const unsigned Regs = getRegs(Opcode, *TII);
1299360784Sdim
1300360784Sdim  if (Regs & VADDR)
1301360784Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1302360784Sdim
1303360784Sdim  unsigned JoinedFormat =
1304360784Sdim      getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STI);
1305360784Sdim
1306360784Sdim  // It shouldn't be possible to get this far if the two instructions
1307360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1308360784Sdim  // will return true if this is the case.
1309360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1310360784Sdim
1311360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1312360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1313360784Sdim
1314360784Sdim  MachineInstr *New =
1315360784Sdim      MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1316360784Sdim          .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1317360784Sdim          .addImm(MergedOffset) // offset
1318360784Sdim          .addImm(JoinedFormat) // format
1319360784Sdim          .addImm(CI.GLC)      // glc
1320360784Sdim          .addImm(CI.SLC)      // slc
1321360784Sdim          .addImm(0)            // tfe
1322360784Sdim          .addImm(CI.DLC)      // dlc
1323360784Sdim          .addImm(0)            // swz
1324360784Sdim          .addMemOperand(
1325360784Sdim              combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1326360784Sdim
1327360784Sdim  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1328360784Sdim  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1329360784Sdim  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1330360784Sdim
1331360784Sdim  // Copy to the old destination registers.
1332360784Sdim  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1333360784Sdim  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1334360784Sdim  const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1335360784Sdim
1336360784Sdim  BuildMI(*MBB, Paired.I, DL, CopyDesc)
1337360784Sdim      .add(*Dest0) // Copy to same destination including flags and sub reg.
1338360784Sdim      .addReg(DestReg, 0, SubRegIdx0);
1339360784Sdim  MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1340360784Sdim                            .add(*Dest1)
1341360784Sdim                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
1342360784Sdim
1343360784Sdim  moveInstsAfter(Copy1, CI.InstsToMove);
1344360784Sdim
1345360784Sdim  CI.I->eraseFromParent();
1346360784Sdim  Paired.I->eraseFromParent();
1347360784Sdim  return New;
1348360784Sdim}
1349360784Sdim
1350360784SdimMachineBasicBlock::iterator
1351360784SdimSILoadStoreOptimizer::mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired) {
1352360784Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1353360784Sdim  DebugLoc DL = CI.I->getDebugLoc();
1354360784Sdim
1355360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1356360784Sdim
1357360784Sdim  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1358360784Sdim  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1359360784Sdim  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1360360784Sdim
1361360784Sdim  // Copy to the new source register.
1362360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1363360784Sdim  Register SrcReg = MRI->createVirtualRegister(SuperRC);
1364360784Sdim
1365360784Sdim  const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1366360784Sdim  const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1367360784Sdim
1368360784Sdim  BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1369360784Sdim      .add(*Src0)
1370360784Sdim      .addImm(SubRegIdx0)
1371360784Sdim      .add(*Src1)
1372360784Sdim      .addImm(SubRegIdx1);
1373360784Sdim
1374360784Sdim  auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1375360784Sdim                 .addReg(SrcReg, RegState::Kill);
1376360784Sdim
1377360784Sdim  const unsigned Regs = getRegs(Opcode, *TII);
1378360784Sdim
1379360784Sdim  if (Regs & VADDR)
1380360784Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1381360784Sdim
1382360784Sdim  unsigned JoinedFormat =
1383360784Sdim      getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STI);
1384360784Sdim
1385360784Sdim  // It shouldn't be possible to get this far if the two instructions
1386360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1387360784Sdim  // will return true if this is the case.
1388360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1389360784Sdim
1390360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1391360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1392360784Sdim
1393360784Sdim  MachineInstr *New =
1394360784Sdim      MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1395360784Sdim          .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1396360784Sdim          .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1397360784Sdim          .addImm(JoinedFormat)                     // format
1398360784Sdim          .addImm(CI.GLC)                          // glc
1399360784Sdim          .addImm(CI.SLC)                          // slc
1400360784Sdim          .addImm(0)                                // tfe
1401360784Sdim          .addImm(CI.DLC)                          // dlc
1402360784Sdim          .addImm(0)                                // swz
1403360784Sdim          .addMemOperand(
1404360784Sdim              combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1405360784Sdim
1406360784Sdim  moveInstsAfter(MIB, CI.InstsToMove);
1407360784Sdim
1408360784Sdim  CI.I->eraseFromParent();
1409360784Sdim  Paired.I->eraseFromParent();
1410360784Sdim  return New;
1411360784Sdim}
1412360784Sdim
1413360784Sdimunsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
1414360784Sdim                                            const CombineInfo &Paired) {
1415360784Sdim  const unsigned Width = CI.Width + Paired.Width;
1416360784Sdim
1417344779Sdim  switch (CI.InstClass) {
1418344779Sdim  default:
1419360784Sdim    assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE);
1420360784Sdim    // FIXME: Handle d16 correctly
1421360784Sdim    return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()),
1422360784Sdim                                  Width);
1423360784Sdim  case TBUFFER_LOAD:
1424360784Sdim  case TBUFFER_STORE:
1425360784Sdim    return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()),
1426360784Sdim                                  Width);
1427360784Sdim
1428344779Sdim  case UNKNOWN:
1429344779Sdim    llvm_unreachable("Unknown instruction class");
1430344779Sdim  case S_BUFFER_LOAD_IMM:
1431344779Sdim    switch (Width) {
1432344779Sdim    default:
1433344779Sdim      return 0;
1434344779Sdim    case 2:
1435344779Sdim      return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1436344779Sdim    case 4:
1437344779Sdim      return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1438344779Sdim    }
1439360784Sdim  case MIMG:
1440360784Sdim    assert("No overlaps" && (countPopulation(CI.DMask | Paired.DMask) == Width));
1441360784Sdim    return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width);
1442327952Sdim  }
1443327952Sdim}
1444327952Sdim
1445344779Sdimstd::pair<unsigned, unsigned>
1446360784SdimSILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI, const CombineInfo &Paired) {
1447360784Sdim
1448360784Sdim  if (CI.Width == 0 || Paired.Width == 0 || CI.Width + Paired.Width > 4)
1449360784Sdim    return std::make_pair(0, 0);
1450360784Sdim
1451360784Sdim  bool ReverseOrder;
1452360784Sdim  if (CI.InstClass == MIMG) {
1453360784Sdim    assert((countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) &&
1454360784Sdim           "No overlaps");
1455360784Sdim    ReverseOrder = CI.DMask > Paired.DMask;
1456360784Sdim  } else
1457360784Sdim    ReverseOrder = CI.Offset > Paired.Offset;
1458360784Sdim
1459360784Sdim  static const unsigned Idxs[4][4] = {
1460360784Sdim      {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
1461360784Sdim      {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, 0},
1462360784Sdim      {AMDGPU::sub2, AMDGPU::sub2_sub3, 0, 0},
1463360784Sdim      {AMDGPU::sub3, 0, 0, 0},
1464360784Sdim  };
1465360784Sdim  unsigned Idx0;
1466360784Sdim  unsigned Idx1;
1467360784Sdim
1468360784Sdim  assert(CI.Width >= 1 && CI.Width <= 3);
1469360784Sdim  assert(Paired.Width >= 1 && Paired.Width <= 3);
1470360784Sdim
1471360784Sdim  if (ReverseOrder) {
1472360784Sdim    Idx1 = Idxs[0][Paired.Width - 1];
1473360784Sdim    Idx0 = Idxs[Paired.Width][CI.Width - 1];
1474344779Sdim  } else {
1475360784Sdim    Idx0 = Idxs[0][CI.Width - 1];
1476360784Sdim    Idx1 = Idxs[CI.Width][Paired.Width - 1];
1477344779Sdim  }
1478360784Sdim
1479360784Sdim  return std::make_pair(Idx0, Idx1);
1480344779Sdim}
1481344779Sdim
1482344779Sdimconst TargetRegisterClass *
1483360784SdimSILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
1484360784Sdim                                             const CombineInfo &Paired) {
1485344779Sdim  if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1486360784Sdim    switch (CI.Width + Paired.Width) {
1487344779Sdim    default:
1488344779Sdim      return nullptr;
1489344779Sdim    case 2:
1490344779Sdim      return &AMDGPU::SReg_64_XEXECRegClass;
1491344779Sdim    case 4:
1492360784Sdim      return &AMDGPU::SGPR_128RegClass;
1493344779Sdim    case 8:
1494344779Sdim      return &AMDGPU::SReg_256RegClass;
1495344779Sdim    case 16:
1496344779Sdim      return &AMDGPU::SReg_512RegClass;
1497344779Sdim    }
1498344779Sdim  } else {
1499360784Sdim    switch (CI.Width + Paired.Width) {
1500344779Sdim    default:
1501344779Sdim      return nullptr;
1502344779Sdim    case 2:
1503344779Sdim      return &AMDGPU::VReg_64RegClass;
1504344779Sdim    case 3:
1505344779Sdim      return &AMDGPU::VReg_96RegClass;
1506344779Sdim    case 4:
1507344779Sdim      return &AMDGPU::VReg_128RegClass;
1508344779Sdim    }
1509344779Sdim  }
1510344779Sdim}
1511344779Sdim
1512344779SdimMachineBasicBlock::iterator
1513360784SdimSILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired) {
1514327952Sdim  MachineBasicBlock *MBB = CI.I->getParent();
1515327952Sdim  DebugLoc DL = CI.I->getDebugLoc();
1516327952Sdim
1517360784Sdim  const unsigned Opcode = getNewOpcode(CI, Paired);
1518327952Sdim
1519360784Sdim  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1520344779Sdim  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1521344779Sdim  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1522327952Sdim
1523327952Sdim  // Copy to the new source register.
1524360784Sdim  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1525360784Sdim  Register SrcReg = MRI->createVirtualRegister(SuperRC);
1526327952Sdim
1527327952Sdim  const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1528360784Sdim  const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1529327952Sdim
1530360784Sdim  BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1531327952Sdim      .add(*Src0)
1532327952Sdim      .addImm(SubRegIdx0)
1533327952Sdim      .add(*Src1)
1534327952Sdim      .addImm(SubRegIdx1);
1535327952Sdim
1536360784Sdim  auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1537344779Sdim                 .addReg(SrcReg, RegState::Kill);
1538327952Sdim
1539360784Sdim  const unsigned Regs = getRegs(Opcode, *TII);
1540327952Sdim
1541344779Sdim  if (Regs & VADDR)
1542344779Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1543344779Sdim
1544327952Sdim
1545360784Sdim  // It shouldn't be possible to get this far if the two instructions
1546360784Sdim  // don't have a single memoperand, because MachineInstr::mayAlias()
1547360784Sdim  // will return true if this is the case.
1548360784Sdim  assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1549360784Sdim
1550360784Sdim  const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1551360784Sdim  const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1552360784Sdim
1553360784Sdim  MachineInstr *New =
1554360784Sdim    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1555360784Sdim        .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1556360784Sdim        .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1557360784Sdim        .addImm(CI.GLC)      // glc
1558360784Sdim        .addImm(CI.SLC)      // slc
1559360784Sdim        .addImm(0)            // tfe
1560360784Sdim        .addImm(CI.DLC)      // dlc
1561360784Sdim        .addImm(0)            // swz
1562360784Sdim        .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1563360784Sdim
1564327952Sdim  moveInstsAfter(MIB, CI.InstsToMove);
1565327952Sdim
1566327952Sdim  CI.I->eraseFromParent();
1567360784Sdim  Paired.I->eraseFromParent();
1568360784Sdim  return New;
1569327952Sdim}
1570327952Sdim
1571344779SdimMachineOperand
1572360784SdimSILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
1573344779Sdim  APInt V(32, Val, true);
1574344779Sdim  if (TII->isInlineConstant(V))
1575344779Sdim    return MachineOperand::CreateImm(Val);
1576344779Sdim
1577360784Sdim  Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1578344779Sdim  MachineInstr *Mov =
1579344779Sdim  BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1580344779Sdim          TII->get(AMDGPU::S_MOV_B32), Reg)
1581344779Sdim    .addImm(Val);
1582344779Sdim  (void)Mov;
1583344779Sdim  LLVM_DEBUG(dbgs() << "    "; Mov->dump());
1584344779Sdim  return MachineOperand::CreateReg(Reg, false);
1585344779Sdim}
1586344779Sdim
1587344779Sdim// Compute base address using Addr and return the final register.
1588344779Sdimunsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1589360784Sdim                                           const MemAddress &Addr) const {
1590344779Sdim  MachineBasicBlock *MBB = MI.getParent();
1591344779Sdim  MachineBasicBlock::iterator MBBI = MI.getIterator();
1592344779Sdim  DebugLoc DL = MI.getDebugLoc();
1593344779Sdim
1594344779Sdim  assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1595344779Sdim          Addr.Base.LoSubReg) &&
1596344779Sdim         "Expected 32-bit Base-Register-Low!!");
1597344779Sdim
1598344779Sdim  assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1599344779Sdim          Addr.Base.HiSubReg) &&
1600344779Sdim         "Expected 32-bit Base-Register-Hi!!");
1601344779Sdim
1602344779Sdim  LLVM_DEBUG(dbgs() << "  Re-Computed Anchor-Base:\n");
1603344779Sdim  MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1604344779Sdim  MachineOperand OffsetHi =
1605344779Sdim    createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1606344779Sdim
1607353358Sdim  const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1608360784Sdim  Register CarryReg = MRI->createVirtualRegister(CarryRC);
1609360784Sdim  Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1610353358Sdim
1611360784Sdim  Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1612360784Sdim  Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1613344779Sdim  MachineInstr *LoHalf =
1614344779Sdim    BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1615344779Sdim      .addReg(CarryReg, RegState::Define)
1616344779Sdim      .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1617353358Sdim      .add(OffsetLo)
1618353358Sdim      .addImm(0); // clamp bit
1619344779Sdim  (void)LoHalf;
1620344779Sdim  LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
1621344779Sdim
1622344779Sdim  MachineInstr *HiHalf =
1623344779Sdim  BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1624344779Sdim    .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1625344779Sdim    .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1626344779Sdim    .add(OffsetHi)
1627353358Sdim    .addReg(CarryReg, RegState::Kill)
1628353358Sdim    .addImm(0); // clamp bit
1629344779Sdim  (void)HiHalf;
1630344779Sdim  LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
1631344779Sdim
1632360784Sdim  Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1633344779Sdim  MachineInstr *FullBase =
1634344779Sdim    BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1635344779Sdim      .addReg(DestSub0)
1636344779Sdim      .addImm(AMDGPU::sub0)
1637344779Sdim      .addReg(DestSub1)
1638344779Sdim      .addImm(AMDGPU::sub1);
1639344779Sdim  (void)FullBase;
1640344779Sdim  LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
1641344779Sdim
1642344779Sdim  return FullDestReg;
1643344779Sdim}
1644344779Sdim
1645344779Sdim// Update base and offset with the NewBase and NewOffset in MI.
1646344779Sdimvoid SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1647344779Sdim                                               unsigned NewBase,
1648360784Sdim                                               int32_t NewOffset) const {
1649360784Sdim  auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1650360784Sdim  Base->setReg(NewBase);
1651360784Sdim  Base->setIsKill(false);
1652344779Sdim  TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1653344779Sdim}
1654344779Sdim
1655344779SdimOptional<int32_t>
1656360784SdimSILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
1657344779Sdim  if (Op.isImm())
1658344779Sdim    return Op.getImm();
1659344779Sdim
1660344779Sdim  if (!Op.isReg())
1661344779Sdim    return None;
1662344779Sdim
1663344779Sdim  MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1664344779Sdim  if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1665344779Sdim      !Def->getOperand(1).isImm())
1666344779Sdim    return None;
1667344779Sdim
1668344779Sdim  return Def->getOperand(1).getImm();
1669344779Sdim}
1670344779Sdim
1671344779Sdim// Analyze Base and extracts:
1672344779Sdim//  - 32bit base registers, subregisters
1673344779Sdim//  - 64bit constant offset
1674344779Sdim// Expecting base computation as:
1675344779Sdim//   %OFFSET0:sgpr_32 = S_MOV_B32 8000
1676344779Sdim//   %LO:vgpr_32, %c:sreg_64_xexec =
1677344779Sdim//       V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1678344779Sdim//   %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1679344779Sdim//   %Base:vreg_64 =
1680344779Sdim//       REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1681344779Sdimvoid SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1682360784Sdim                                                      MemAddress &Addr) const {
1683344779Sdim  if (!Base.isReg())
1684344779Sdim    return;
1685344779Sdim
1686344779Sdim  MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1687344779Sdim  if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1688344779Sdim      || Def->getNumOperands() != 5)
1689344779Sdim    return;
1690344779Sdim
1691344779Sdim  MachineOperand BaseLo = Def->getOperand(1);
1692344779Sdim  MachineOperand BaseHi = Def->getOperand(3);
1693344779Sdim  if (!BaseLo.isReg() || !BaseHi.isReg())
1694344779Sdim    return;
1695344779Sdim
1696344779Sdim  MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1697344779Sdim  MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1698344779Sdim
1699344779Sdim  if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1700344779Sdim      !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1701344779Sdim    return;
1702344779Sdim
1703344779Sdim  const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1704344779Sdim  const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1705344779Sdim
1706344779Sdim  auto Offset0P = extractConstOffset(*Src0);
1707344779Sdim  if (Offset0P)
1708344779Sdim    BaseLo = *Src1;
1709344779Sdim  else {
1710344779Sdim    if (!(Offset0P = extractConstOffset(*Src1)))
1711344779Sdim      return;
1712344779Sdim    BaseLo = *Src0;
1713344779Sdim  }
1714344779Sdim
1715344779Sdim  Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1716344779Sdim  Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1717344779Sdim
1718344779Sdim  if (Src0->isImm())
1719344779Sdim    std::swap(Src0, Src1);
1720344779Sdim
1721344779Sdim  if (!Src1->isImm())
1722344779Sdim    return;
1723344779Sdim
1724344779Sdim  uint64_t Offset1 = Src1->getImm();
1725344779Sdim  BaseHi = *Src0;
1726344779Sdim
1727344779Sdim  Addr.Base.LoReg = BaseLo.getReg();
1728344779Sdim  Addr.Base.HiReg = BaseHi.getReg();
1729344779Sdim  Addr.Base.LoSubReg = BaseLo.getSubReg();
1730344779Sdim  Addr.Base.HiSubReg = BaseHi.getSubReg();
1731344779Sdim  Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1732344779Sdim}
1733344779Sdim
1734344779Sdimbool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1735344779Sdim    MachineInstr &MI,
1736344779Sdim    MemInfoMap &Visited,
1737360784Sdim    SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
1738344779Sdim
1739360784Sdim  if (!(MI.mayLoad() ^ MI.mayStore()))
1740360784Sdim    return false;
1741360784Sdim
1742344779Sdim  // TODO: Support flat and scratch.
1743360784Sdim  if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1744344779Sdim    return false;
1745344779Sdim
1746360784Sdim  if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1747344779Sdim    return false;
1748344779Sdim
1749344779Sdim  if (AnchorList.count(&MI))
1750344779Sdim    return false;
1751344779Sdim
1752344779Sdim  LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1753344779Sdim
1754344779Sdim  if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1755344779Sdim    LLVM_DEBUG(dbgs() << "  Const-offset is already promoted.\n";);
1756344779Sdim    return false;
1757344779Sdim  }
1758344779Sdim
1759344779Sdim  // Step1: Find the base-registers and a 64bit constant offset.
1760344779Sdim  MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1761344779Sdim  MemAddress MAddr;
1762344779Sdim  if (Visited.find(&MI) == Visited.end()) {
1763344779Sdim    processBaseWithConstOffset(Base, MAddr);
1764344779Sdim    Visited[&MI] = MAddr;
1765344779Sdim  } else
1766344779Sdim    MAddr = Visited[&MI];
1767344779Sdim
1768344779Sdim  if (MAddr.Offset == 0) {
1769344779Sdim    LLVM_DEBUG(dbgs() << "  Failed to extract constant-offset or there are no"
1770344779Sdim                         " constant offsets that can be promoted.\n";);
1771344779Sdim    return false;
1772344779Sdim  }
1773344779Sdim
1774344779Sdim  LLVM_DEBUG(dbgs() << "  BASE: {" << MAddr.Base.HiReg << ", "
1775344779Sdim             << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1776344779Sdim
1777344779Sdim  // Step2: Traverse through MI's basic block and find an anchor(that has the
1778344779Sdim  // same base-registers) with the highest 13bit distance from MI's offset.
1779344779Sdim  // E.g. (64bit loads)
1780344779Sdim  // bb:
1781344779Sdim  //   addr1 = &a + 4096;   load1 = load(addr1,  0)
1782344779Sdim  //   addr2 = &a + 6144;   load2 = load(addr2,  0)
1783344779Sdim  //   addr3 = &a + 8192;   load3 = load(addr3,  0)
1784344779Sdim  //   addr4 = &a + 10240;  load4 = load(addr4,  0)
1785344779Sdim  //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1786344779Sdim  //
1787344779Sdim  // Starting from the first load, the optimization will try to find a new base
1788344779Sdim  // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1789344779Sdim  // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1790344779Sdim  // as the new-base(anchor) because of the maximum distance which can
1791344779Sdim  // accomodate more intermediate bases presumeably.
1792344779Sdim  //
1793344779Sdim  // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1794344779Sdim  // (&a + 8192) for load1, load2, load4.
1795344779Sdim  //   addr = &a + 8192
1796344779Sdim  //   load1 = load(addr,       -4096)
1797344779Sdim  //   load2 = load(addr,       -2048)
1798344779Sdim  //   load3 = load(addr,       0)
1799344779Sdim  //   load4 = load(addr,       2048)
1800344779Sdim  //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1801344779Sdim  //
1802344779Sdim  MachineInstr *AnchorInst = nullptr;
1803344779Sdim  MemAddress AnchorAddr;
1804344779Sdim  uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1805344779Sdim  SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1806344779Sdim
1807344779Sdim  MachineBasicBlock *MBB = MI.getParent();
1808344779Sdim  MachineBasicBlock::iterator E = MBB->end();
1809344779Sdim  MachineBasicBlock::iterator MBBI = MI.getIterator();
1810344779Sdim  ++MBBI;
1811344779Sdim  const SITargetLowering *TLI =
1812344779Sdim    static_cast<const SITargetLowering *>(STM->getTargetLowering());
1813344779Sdim
1814344779Sdim  for ( ; MBBI != E; ++MBBI) {
1815344779Sdim    MachineInstr &MINext = *MBBI;
1816344779Sdim    // TODO: Support finding an anchor(with same base) from store addresses or
1817344779Sdim    // any other load addresses where the opcodes are different.
1818344779Sdim    if (MINext.getOpcode() != MI.getOpcode() ||
1819344779Sdim        TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1820344779Sdim      continue;
1821344779Sdim
1822344779Sdim    const MachineOperand &BaseNext =
1823344779Sdim      *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1824344779Sdim    MemAddress MAddrNext;
1825344779Sdim    if (Visited.find(&MINext) == Visited.end()) {
1826344779Sdim      processBaseWithConstOffset(BaseNext, MAddrNext);
1827344779Sdim      Visited[&MINext] = MAddrNext;
1828344779Sdim    } else
1829344779Sdim      MAddrNext = Visited[&MINext];
1830344779Sdim
1831344779Sdim    if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1832344779Sdim        MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1833344779Sdim        MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1834344779Sdim        MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1835344779Sdim      continue;
1836344779Sdim
1837344779Sdim    InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1838344779Sdim
1839344779Sdim    int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1840344779Sdim    TargetLoweringBase::AddrMode AM;
1841344779Sdim    AM.HasBaseReg = true;
1842344779Sdim    AM.BaseOffs = Dist;
1843344779Sdim    if (TLI->isLegalGlobalAddressingMode(AM) &&
1844344779Sdim        (uint32_t)std::abs(Dist) > MaxDist) {
1845344779Sdim      MaxDist = std::abs(Dist);
1846344779Sdim
1847344779Sdim      AnchorAddr = MAddrNext;
1848344779Sdim      AnchorInst = &MINext;
1849344779Sdim    }
1850344779Sdim  }
1851344779Sdim
1852344779Sdim  if (AnchorInst) {
1853344779Sdim    LLVM_DEBUG(dbgs() << "  Anchor-Inst(with max-distance from Offset): ";
1854344779Sdim               AnchorInst->dump());
1855344779Sdim    LLVM_DEBUG(dbgs() << "  Anchor-Offset from BASE: "
1856344779Sdim               <<  AnchorAddr.Offset << "\n\n");
1857344779Sdim
1858344779Sdim    // Instead of moving up, just re-compute anchor-instruction's base address.
1859344779Sdim    unsigned Base = computeBase(MI, AnchorAddr);
1860344779Sdim
1861344779Sdim    updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1862344779Sdim    LLVM_DEBUG(dbgs() << "  After promotion: "; MI.dump(););
1863344779Sdim
1864344779Sdim    for (auto P : InstsWCommonBase) {
1865344779Sdim      TargetLoweringBase::AddrMode AM;
1866344779Sdim      AM.HasBaseReg = true;
1867344779Sdim      AM.BaseOffs = P.second - AnchorAddr.Offset;
1868344779Sdim
1869344779Sdim      if (TLI->isLegalGlobalAddressingMode(AM)) {
1870344779Sdim        LLVM_DEBUG(dbgs() << "  Promote Offset(" << P.second;
1871344779Sdim                   dbgs() << ")"; P.first->dump());
1872344779Sdim        updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1873344779Sdim        LLVM_DEBUG(dbgs() << "     After promotion: "; P.first->dump());
1874344779Sdim      }
1875344779Sdim    }
1876344779Sdim    AnchorList.insert(AnchorInst);
1877344779Sdim    return true;
1878344779Sdim  }
1879344779Sdim
1880344779Sdim  return false;
1881344779Sdim}
1882344779Sdim
1883360784Sdimvoid SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
1884360784Sdim                 std::list<std::list<CombineInfo> > &MergeableInsts) const {
1885360784Sdim  for (std::list<CombineInfo> &AddrList : MergeableInsts) {
1886360784Sdim    if (AddrList.front().InstClass == CI.InstClass &&
1887360784Sdim        AddrList.front().hasSameBaseAddress(*CI.I)) {
1888360784Sdim      AddrList.emplace_back(CI);
1889360784Sdim      return;
1890360784Sdim    }
1891360784Sdim  }
1892360784Sdim
1893360784Sdim  // Base address not found, so add a new list.
1894360784Sdim  MergeableInsts.emplace_back(1, CI);
1895360784Sdim}
1896360784Sdim
1897360784Sdimbool SILoadStoreOptimizer::collectMergeableInsts(MachineBasicBlock &MBB,
1898360784Sdim                 std::list<std::list<CombineInfo> > &MergeableInsts) const {
1899284677Sdim  bool Modified = false;
1900344779Sdim  // Contain the list
1901344779Sdim  MemInfoMap Visited;
1902344779Sdim  // Contains the list of instructions for which constant offsets are being
1903344779Sdim  // promoted to the IMM.
1904344779Sdim  SmallPtrSet<MachineInstr *, 4> AnchorList;
1905344779Sdim
1906360784Sdim  // Sort potential mergeable instructions into lists.  One list per base address.
1907360784Sdim  for (MachineInstr &MI : MBB.instrs()) {
1908360784Sdim    // We run this before checking if an address is mergeable, because it can produce
1909360784Sdim    // better code even if the instructions aren't mergeable.
1910344779Sdim    if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1911344779Sdim      Modified = true;
1912344779Sdim
1913360784Sdim    const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
1914360784Sdim    if (InstClass == UNKNOWN)
1915360784Sdim      continue;
1916360784Sdim
1917284677Sdim    // Don't combine if volatile.
1918360784Sdim    if (MI.hasOrderedMemoryRef())
1919284677Sdim      continue;
1920360784Sdim
1921360784Sdim    CombineInfo CI;
1922360784Sdim    CI.setMI(MI, *TII, *STM);
1923360784Sdim
1924360784Sdim    if (!CI.hasMergeableAddress(*MRI))
1925360784Sdim      continue;
1926360784Sdim
1927360784Sdim    addInstToMergeableList(CI, MergeableInsts);
1928360784Sdim  }
1929360784Sdim  return Modified;
1930360784Sdim}
1931360784Sdim
1932360784Sdim// Scan through looking for adjacent LDS operations with constant offsets from
1933360784Sdim// the same base register. We rely on the scheduler to do the hard work of
1934360784Sdim// clustering nearby loads, and assume these are all adjacent.
1935360784Sdimbool SILoadStoreOptimizer::optimizeBlock(
1936360784Sdim                       std::list<std::list<CombineInfo> > &MergeableInsts) {
1937360784Sdim  bool Modified = false;
1938360784Sdim
1939360784Sdim  for (std::list<CombineInfo> &MergeList : MergeableInsts) {
1940360784Sdim    if (MergeList.size() < 2)
1941360784Sdim      continue;
1942360784Sdim
1943360784Sdim    bool OptimizeListAgain = false;
1944360784Sdim    if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) {
1945360784Sdim      // We weren't able to make any changes, so clear the list so we don't
1946360784Sdim      // process the same instructions the next time we try to optimize this
1947360784Sdim      // block.
1948360784Sdim      MergeList.clear();
1949360784Sdim      continue;
1950284677Sdim    }
1951284677Sdim
1952360784Sdim    // We made changes, but also determined that there were no more optimization
1953360784Sdim    // opportunities, so we don't need to reprocess the list
1954360784Sdim    if (!OptimizeListAgain)
1955360784Sdim      MergeList.clear();
1956344779Sdim
1957360784Sdim    OptimizeAgain |= OptimizeListAgain;
1958360784Sdim    Modified = true;
1959360784Sdim  }
1960360784Sdim  return Modified;
1961360784Sdim}
1962327952Sdim
1963360784Sdimvoid
1964360784SdimSILoadStoreOptimizer::removeCombinedInst(std::list<CombineInfo> &MergeList,
1965360784Sdim                                         const MachineInstr &MI) {
1966360784Sdim
1967360784Sdim  for (auto CI = MergeList.begin(), E = MergeList.end(); CI != E; ++CI) {
1968360784Sdim    if (&*CI->I == &MI) {
1969360784Sdim      MergeList.erase(CI);
1970360784Sdim      return;
1971360784Sdim    }
1972360784Sdim  }
1973360784Sdim}
1974360784Sdim
1975360784Sdimbool
1976360784SdimSILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
1977360784Sdim                                          std::list<CombineInfo> &MergeList,
1978360784Sdim                                          bool &OptimizeListAgain) {
1979360784Sdim  bool Modified = false;
1980360784Sdim  for (auto I = MergeList.begin(); I != MergeList.end(); ++I) {
1981360784Sdim    CombineInfo &CI = *I;
1982360784Sdim    CombineInfo Paired;
1983360784Sdim
1984360784Sdim    if (CI.InstClass == UNKNOWN)
1985360784Sdim      continue;
1986360784Sdim
1987360784Sdim    if (!findMatchingInst(CI, Paired))
1988360784Sdim      goto done;
1989360784Sdim
1990360784Sdim    Modified = true;
1991360784Sdim    removeCombinedInst(MergeList, *Paired.I);
1992360784Sdim
1993344779Sdim    switch (CI.InstClass) {
1994344779Sdim    default:
1995360784Sdim      llvm_unreachable("unknown InstClass");
1996344779Sdim      break;
1997360784Sdim    case DS_READ: {
1998360784Sdim      MachineBasicBlock::iterator NewMI = mergeRead2Pair(CI, Paired);
1999360784Sdim      CI.setMI(NewMI, *TII, *STM);
2000360784Sdim      break;
2001327952Sdim    }
2002360784Sdim    case DS_WRITE: {
2003360784Sdim      MachineBasicBlock::iterator NewMI = mergeWrite2Pair(CI, Paired);
2004360784Sdim      CI.setMI(NewMI, *TII, *STM);
2005360784Sdim      break;
2006360784Sdim    }
2007360784Sdim    case S_BUFFER_LOAD_IMM: {
2008360784Sdim      MachineBasicBlock::iterator NewMI = mergeSBufferLoadImmPair(CI, Paired);
2009360784Sdim      CI.setMI(NewMI, *TII, *STM);
2010360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 16;
2011360784Sdim      break;
2012360784Sdim    }
2013360784Sdim    case BUFFER_LOAD: {
2014360784Sdim      MachineBasicBlock::iterator NewMI = mergeBufferLoadPair(CI, Paired);
2015360784Sdim      CI.setMI(NewMI, *TII, *STM);
2016360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2017360784Sdim      break;
2018360784Sdim    }
2019360784Sdim    case BUFFER_STORE: {
2020360784Sdim      MachineBasicBlock::iterator NewMI = mergeBufferStorePair(CI, Paired);
2021360784Sdim      CI.setMI(NewMI, *TII, *STM);
2022360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2023360784Sdim      break;
2024360784Sdim    }
2025360784Sdim    case MIMG: {
2026360784Sdim      MachineBasicBlock::iterator NewMI = mergeImagePair(CI, Paired);
2027360784Sdim      CI.setMI(NewMI, *TII, *STM);
2028360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2029360784Sdim      break;
2030360784Sdim    }
2031360784Sdim    case TBUFFER_LOAD: {
2032360784Sdim      MachineBasicBlock::iterator NewMI = mergeTBufferLoadPair(CI, Paired);
2033360784Sdim      CI.setMI(NewMI, *TII, *STM);
2034360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2035360784Sdim      break;
2036360784Sdim    }
2037360784Sdim    case TBUFFER_STORE: {
2038360784Sdim      MachineBasicBlock::iterator NewMI = mergeTBufferStorePair(CI, Paired);
2039360784Sdim      CI.setMI(NewMI, *TII, *STM);
2040360784Sdim      OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2041360784Sdim      break;
2042360784Sdim    }
2043360784Sdim    }
2044327952Sdim
2045360784Sdimdone:
2046360784Sdim    // Clear the InstsToMove after we have finished searching so we don't have
2047360784Sdim    // stale values left over if we search for this CI again in another pass
2048360784Sdim    // over the block.
2049360784Sdim    CI.InstsToMove.clear();
2050284677Sdim  }
2051284677Sdim
2052284677Sdim  return Modified;
2053284677Sdim}
2054284677Sdim
2055284677Sdimbool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
2056327952Sdim  if (skipFunction(MF.getFunction()))
2057309124Sdim    return false;
2058309124Sdim
2059341825Sdim  STM = &MF.getSubtarget<GCNSubtarget>();
2060327952Sdim  if (!STM->loadStoreOptEnabled())
2061309124Sdim    return false;
2062309124Sdim
2063327952Sdim  TII = STM->getInstrInfo();
2064309124Sdim  TRI = &TII->getRegisterInfo();
2065360784Sdim  STI = &MF.getSubtarget<MCSubtargetInfo>();
2066309124Sdim
2067284677Sdim  MRI = &MF.getRegInfo();
2068314564Sdim  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2069284677Sdim
2070327952Sdim  assert(MRI->isSSA() && "Must be run on SSA");
2071327952Sdim
2072341825Sdim  LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
2073284677Sdim
2074284677Sdim  bool Modified = false;
2075284677Sdim
2076360784Sdim
2077327952Sdim  for (MachineBasicBlock &MBB : MF) {
2078360784Sdim    std::list<std::list<CombineInfo> > MergeableInsts;
2079360784Sdim    // First pass: Collect list of all instructions we know how to merge.
2080360784Sdim    Modified |= collectMergeableInsts(MBB, MergeableInsts);
2081344779Sdim    do {
2082344779Sdim      OptimizeAgain = false;
2083360784Sdim      Modified |= optimizeBlock(MergeableInsts);
2084344779Sdim    } while (OptimizeAgain);
2085327952Sdim  }
2086327952Sdim
2087284677Sdim  return Modified;
2088284677Sdim}
2089