1 //=== lib/CodeGen/GlobalISel/AArch64PostLegalizerCombiner.cpp -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This performs post-legalization combines on generic MachineInstrs.
10//
11// Any combine that this pass performs must preserve instruction legality.
12// Combines unconcerned with legality should be handled by the
13// PreLegalizerCombiner instead.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AArch64TargetMachine.h"
18#include "llvm/CodeGen/GlobalISel/Combiner.h"
19#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
20#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
21#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23#include "llvm/CodeGen/MachineDominators.h"
24#include "llvm/CodeGen/MachineFunctionPass.h"
25#include "llvm/CodeGen/TargetPassConfig.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "aarch64-postlegalizer-combiner"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33/// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
34///
35/// Used for matching target-supported shuffles before codegen.
36struct ShuffleVectorPseudo {
37  unsigned Opc; ///< Opcode for the instruction. (E.g. G_ZIP1)
38  Register Dst; ///< Destination register.
39  SmallVector<SrcOp, 2> SrcOps; ///< Source registers.
40  ShuffleVectorPseudo(unsigned Opc, Register Dst,
41                      std::initializer_list<SrcOp> SrcOps)
42      : Opc(Opc), Dst(Dst), SrcOps(SrcOps){};
43  ShuffleVectorPseudo() {}
44};
45
46/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
47/// If \p MI is not a splat, returns None.
48static Optional<int> getSplatIndex(MachineInstr &MI) {
49  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
50         "Only G_SHUFFLE_VECTOR can have a splat index!");
51  ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
52  auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
53
54  // If all elements are undefined, this shuffle can be considered a splat.
55  // Return 0 for better potential for callers to simplify.
56  if (FirstDefinedIdx == Mask.end())
57    return 0;
58
59  // Make sure all remaining elements are either undef or the same
60  // as the first non-undef value.
61  int SplatValue = *FirstDefinedIdx;
62  if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
63             [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
64    return None;
65
66  return SplatValue;
67}
68
69/// Check if a vector shuffle corresponds to a REV instruction with the
70/// specified blocksize.
71static bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
72                      unsigned BlockSize) {
73  assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
74         "Only possible block sizes for REV are: 16, 32, 64");
75  assert(EltSize != 64 && "EltSize cannot be 64 for REV mask.");
76
77  unsigned BlockElts = M[0] + 1;
78
79  // If the first shuffle index is UNDEF, be optimistic.
80  if (M[0] < 0)
81    BlockElts = BlockSize / EltSize;
82
83  if (BlockSize <= EltSize || BlockSize != BlockElts * EltSize)
84    return false;
85
86  for (unsigned i = 0; i < NumElts; ++i) {
87    // Ignore undef indices.
88    if (M[i] < 0)
89      continue;
90    if (static_cast<unsigned>(M[i]) !=
91        (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
92      return false;
93  }
94
95  return true;
96}
97
98/// Determines if \p M is a shuffle vector mask for a TRN of \p NumElts.
99/// Whether or not G_TRN1 or G_TRN2 should be used is stored in \p WhichResult.
100static bool isTRNMask(ArrayRef<int> M, unsigned NumElts,
101                      unsigned &WhichResult) {
102  if (NumElts % 2 != 0)
103    return false;
104  WhichResult = (M[0] == 0 ? 0 : 1);
105  for (unsigned i = 0; i < NumElts; i += 2) {
106    if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != i + WhichResult) ||
107        (M[i + 1] >= 0 &&
108         static_cast<unsigned>(M[i + 1]) != i + NumElts + WhichResult))
109      return false;
110  }
111  return true;
112}
113
114/// Check if a G_EXT instruction can handle a shuffle mask \p M when the vector
115/// sources of the shuffle are different.
116static Optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
117                                                      unsigned NumElts) {
118  // Look for the first non-undef element.
119  auto FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
120  if (FirstRealElt == M.end())
121    return None;
122
123  // Use APInt to handle overflow when calculating expected element.
124  unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
125  APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
126
127  // The following shuffle indices must be the successive elements after the
128  // first real element.
129  if (any_of(
130          make_range(std::next(FirstRealElt), M.end()),
131          [&ExpectedElt](int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
132    return None;
133
134  // The index of an EXT is the first element if it is not UNDEF.
135  // Watch out for the beginning UNDEFs. The EXT index should be the expected
136  // value of the first element.  E.g.
137  // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
138  // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
139  // ExpectedElt is the last mask index plus 1.
140  uint64_t Imm = ExpectedElt.getZExtValue();
141  bool ReverseExt = false;
142
143  // There are two difference cases requiring to reverse input vectors.
144  // For example, for vector <4 x i32> we have the following cases,
145  // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
146  // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
147  // For both cases, we finally use mask <5, 6, 7, 0>, which requires
148  // to reverse two input vectors.
149  if (Imm < NumElts)
150    ReverseExt = true;
151  else
152    Imm -= NumElts;
153  return std::make_pair(ReverseExt, Imm);
154}
155
156/// Determines if \p M is a shuffle vector mask for a UZP of \p NumElts.
157/// Whether or not G_UZP1 or G_UZP2 should be used is stored in \p WhichResult.
158static bool isUZPMask(ArrayRef<int> M, unsigned NumElts,
159                      unsigned &WhichResult) {
160  WhichResult = (M[0] == 0 ? 0 : 1);
161  for (unsigned i = 0; i != NumElts; ++i) {
162    // Skip undef indices.
163    if (M[i] < 0)
164      continue;
165    if (static_cast<unsigned>(M[i]) != 2 * i + WhichResult)
166      return false;
167  }
168  return true;
169}
170
171/// \return true if \p M is a zip mask for a shuffle vector of \p NumElts.
172/// Whether or not G_ZIP1 or G_ZIP2 should be used is stored in \p WhichResult.
173static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
174                      unsigned &WhichResult) {
175  if (NumElts % 2 != 0)
176    return false;
177
178  // 0 means use ZIP1, 1 means use ZIP2.
179  WhichResult = (M[0] == 0 ? 0 : 1);
180  unsigned Idx = WhichResult * NumElts / 2;
181  for (unsigned i = 0; i != NumElts; i += 2) {
182      if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != Idx) ||
183          (M[i + 1] >= 0 && static_cast<unsigned>(M[i + 1]) != Idx + NumElts))
184        return false;
185    Idx += 1;
186  }
187  return true;
188}
189
190/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with a
191/// G_REV instruction. Returns the appropriate G_REV opcode in \p Opc.
192static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
193                     ShuffleVectorPseudo &MatchInfo) {
194  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
195  ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
196  Register Dst = MI.getOperand(0).getReg();
197  Register Src = MI.getOperand(1).getReg();
198  LLT Ty = MRI.getType(Dst);
199  unsigned EltSize = Ty.getScalarSizeInBits();
200
201  // Element size for a rev cannot be 64.
202  if (EltSize == 64)
203    return false;
204
205  unsigned NumElts = Ty.getNumElements();
206
207  // Try to produce G_REV64
208  if (isREVMask(ShuffleMask, EltSize, NumElts, 64)) {
209    MatchInfo = ShuffleVectorPseudo(AArch64::G_REV64, Dst, {Src});
210    return true;
211  }
212
213  // TODO: Produce G_REV32 and G_REV16 once we have proper legalization support.
214  // This should be identical to above, but with a constant 32 and constant
215  // 16.
216  return false;
217}
218
219/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
220/// a G_TRN1 or G_TRN2 instruction.
221static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
222                     ShuffleVectorPseudo &MatchInfo) {
223  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
224  unsigned WhichResult;
225  ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
226  Register Dst = MI.getOperand(0).getReg();
227  unsigned NumElts = MRI.getType(Dst).getNumElements();
228  if (!isTRNMask(ShuffleMask, NumElts, WhichResult))
229    return false;
230  unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
231  Register V1 = MI.getOperand(1).getReg();
232  Register V2 = MI.getOperand(2).getReg();
233  MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
234  return true;
235}
236
237/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
238/// a G_UZP1 or G_UZP2 instruction.
239///
240/// \param [in] MI - The shuffle vector instruction.
241/// \param [out] MatchInfo - Either G_UZP1 or G_UZP2 on success.
242static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
243                     ShuffleVectorPseudo &MatchInfo) {
244  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
245  unsigned WhichResult;
246  ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
247  Register Dst = MI.getOperand(0).getReg();
248  unsigned NumElts = MRI.getType(Dst).getNumElements();
249  if (!isUZPMask(ShuffleMask, NumElts, WhichResult))
250    return false;
251  unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
252  Register V1 = MI.getOperand(1).getReg();
253  Register V2 = MI.getOperand(2).getReg();
254  MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
255  return true;
256}
257
258static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
259                     ShuffleVectorPseudo &MatchInfo) {
260  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
261  unsigned WhichResult;
262  ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
263  Register Dst = MI.getOperand(0).getReg();
264  unsigned NumElts = MRI.getType(Dst).getNumElements();
265  if (!isZipMask(ShuffleMask, NumElts, WhichResult))
266    return false;
267  unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
268  Register V1 = MI.getOperand(1).getReg();
269  Register V2 = MI.getOperand(2).getReg();
270  MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
271  return true;
272}
273
274/// Helper function for matchDup.
275static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
276                                        MachineRegisterInfo &MRI,
277                                        ShuffleVectorPseudo &MatchInfo) {
278  if (Lane != 0)
279    return false;
280
281  // Try to match a vector splat operation into a dup instruction.
282  // We're looking for this pattern:
283  //
284  // %scalar:gpr(s64) = COPY $x0
285  // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
286  // %cst0:gpr(s32) = G_CONSTANT i32 0
287  // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
288  // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
289  // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef, %zerovec(<2 x s32>)
290  //
291  // ...into:
292  // %splat = G_DUP %scalar
293
294  // Begin matching the insert.
295  auto *InsMI = getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
296                             MI.getOperand(1).getReg(), MRI);
297  if (!InsMI)
298    return false;
299  // Match the undef vector operand.
300  if (!getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
301                    MRI))
302    return false;
303
304  // Match the index constant 0.
305  int64_t Index = 0;
306  if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
307    return false;
308
309  MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(),
310                                  {InsMI->getOperand(2).getReg()});
311  return true;
312}
313
314/// Helper function for matchDup.
315static bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
316                                    MachineRegisterInfo &MRI,
317                                    ShuffleVectorPseudo &MatchInfo) {
318  assert(Lane >= 0 && "Expected positive lane?");
319  // Test if the LHS is a BUILD_VECTOR. If it is, then we can just reference the
320  // lane's definition directly.
321  auto *BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
322                                  MI.getOperand(1).getReg(), MRI);
323  if (!BuildVecMI)
324    return false;
325  Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
326  MatchInfo =
327      ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(), {Reg});
328  return true;
329}
330
331static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
332                     ShuffleVectorPseudo &MatchInfo) {
333  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
334  auto MaybeLane = getSplatIndex(MI);
335  if (!MaybeLane)
336    return false;
337  int Lane = *MaybeLane;
338  // If this is undef splat, generate it via "just" vdup, if possible.
339  if (Lane < 0)
340    Lane = 0;
341  if (matchDupFromInsertVectorElt(Lane, MI, MRI, MatchInfo))
342    return true;
343  if (matchDupFromBuildVector(Lane, MI, MRI, MatchInfo))
344    return true;
345  return false;
346}
347
348static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
349                     ShuffleVectorPseudo &MatchInfo) {
350  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
351  Register Dst = MI.getOperand(0).getReg();
352  auto ExtInfo = getExtMask(MI.getOperand(3).getShuffleMask(),
353                            MRI.getType(Dst).getNumElements());
354  if (!ExtInfo)
355    return false;
356  bool ReverseExt;
357  uint64_t Imm;
358  std::tie(ReverseExt, Imm) = *ExtInfo;
359  Register V1 = MI.getOperand(1).getReg();
360  Register V2 = MI.getOperand(2).getReg();
361  if (ReverseExt)
362    std::swap(V1, V2);
363  uint64_t ExtFactor = MRI.getType(V1).getScalarSizeInBits() / 8;
364  Imm *= ExtFactor;
365  MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2, Imm});
366  return true;
367}
368
369/// Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
370/// \p Opc is the opcode to use. \p MI is the G_SHUFFLE_VECTOR.
371static bool applyShuffleVectorPseudo(MachineInstr &MI,
372                                     ShuffleVectorPseudo &MatchInfo) {
373  MachineIRBuilder MIRBuilder(MI);
374  MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
375  MI.eraseFromParent();
376  return true;
377}
378
379/// Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
380/// Special-cased because the constant operand must be emitted as a G_CONSTANT
381/// for the imported tablegen patterns to work.
382static bool applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
383  MachineIRBuilder MIRBuilder(MI);
384  // Tablegen patterns expect an i32 G_CONSTANT as the final op.
385  auto Cst =
386      MIRBuilder.buildConstant(LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
387  MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
388                        {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
389  MI.eraseFromParent();
390  return true;
391}
392
393#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
394#include "AArch64GenPostLegalizeGICombiner.inc"
395#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
396
397namespace {
398#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
399#include "AArch64GenPostLegalizeGICombiner.inc"
400#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
401
402class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
403  GISelKnownBits *KB;
404  MachineDominatorTree *MDT;
405
406public:
407  AArch64GenPostLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
408
409  AArch64PostLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
410                                   GISelKnownBits *KB,
411                                   MachineDominatorTree *MDT)
412      : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
413                     /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
414        KB(KB), MDT(MDT) {
415    if (!GeneratedRuleCfg.parseCommandLineOption())
416      report_fatal_error("Invalid rule identifier");
417  }
418
419  virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
420                       MachineIRBuilder &B) const override;
421};
422
423bool AArch64PostLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
424                                               MachineInstr &MI,
425                                               MachineIRBuilder &B) const {
426  const auto *LI =
427      MI.getParent()->getParent()->getSubtarget().getLegalizerInfo();
428  CombinerHelper Helper(Observer, B, KB, MDT, LI);
429  AArch64GenPostLegalizerCombinerHelper Generated(GeneratedRuleCfg);
430  return Generated.tryCombineAll(Observer, MI, B, Helper);
431}
432
433#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
434#include "AArch64GenPostLegalizeGICombiner.inc"
435#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
436
437class AArch64PostLegalizerCombiner : public MachineFunctionPass {
438public:
439  static char ID;
440
441  AArch64PostLegalizerCombiner(bool IsOptNone = false);
442
443  StringRef getPassName() const override {
444    return "AArch64PostLegalizerCombiner";
445  }
446
447  bool runOnMachineFunction(MachineFunction &MF) override;
448  void getAnalysisUsage(AnalysisUsage &AU) const override;
449
450private:
451  bool IsOptNone;
452};
453} // end anonymous namespace
454
455void AArch64PostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
456  AU.addRequired<TargetPassConfig>();
457  AU.setPreservesCFG();
458  getSelectionDAGFallbackAnalysisUsage(AU);
459  AU.addRequired<GISelKnownBitsAnalysis>();
460  AU.addPreserved<GISelKnownBitsAnalysis>();
461  if (!IsOptNone) {
462    AU.addRequired<MachineDominatorTree>();
463    AU.addPreserved<MachineDominatorTree>();
464  }
465  MachineFunctionPass::getAnalysisUsage(AU);
466}
467
468AArch64PostLegalizerCombiner::AArch64PostLegalizerCombiner(bool IsOptNone)
469    : MachineFunctionPass(ID), IsOptNone(IsOptNone) {
470  initializeAArch64PostLegalizerCombinerPass(*PassRegistry::getPassRegistry());
471}
472
473bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
474  if (MF.getProperties().hasProperty(
475          MachineFunctionProperties::Property::FailedISel))
476    return false;
477  assert(MF.getProperties().hasProperty(
478             MachineFunctionProperties::Property::Legalized) &&
479         "Expected a legalized function?");
480  auto *TPC = &getAnalysis<TargetPassConfig>();
481  const Function &F = MF.getFunction();
482  bool EnableOpt =
483      MF.getTarget().getOptLevel() != CodeGenOpt::None && !skipFunction(F);
484  GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
485  MachineDominatorTree *MDT =
486      IsOptNone ? nullptr : &getAnalysis<MachineDominatorTree>();
487  AArch64PostLegalizerCombinerInfo PCInfo(EnableOpt, F.hasOptSize(),
488                                          F.hasMinSize(), KB, MDT);
489  Combiner C(PCInfo, TPC);
490  return C.combineMachineInstrs(MF, /*CSEInfo*/ nullptr);
491}
492
493char AArch64PostLegalizerCombiner::ID = 0;
494INITIALIZE_PASS_BEGIN(AArch64PostLegalizerCombiner, DEBUG_TYPE,
495                      "Combine AArch64 MachineInstrs after legalization", false,
496                      false)
497INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
498INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
499INITIALIZE_PASS_END(AArch64PostLegalizerCombiner, DEBUG_TYPE,
500                    "Combine AArch64 MachineInstrs after legalization", false,
501                    false)
502
503namespace llvm {
504FunctionPass *createAArch64PostLegalizeCombiner(bool IsOptNone) {
505  return new AArch64PostLegalizerCombiner(IsOptNone);
506}
507} // end namespace llvm
508