133965Sjdp//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2130561Sobrien//
3218822Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
433965Sjdp// See https://llvm.org/LICENSE.txt for license information.
533965Sjdp// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
633965Sjdp//
7104834Sobrien//===----------------------------------------------------------------------===//
833965Sjdp//
9104834Sobrien// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10104834Sobrien// selection DAG.
11104834Sobrien//
12104834Sobrien//===----------------------------------------------------------------------===//
1333965Sjdp
14104834Sobrien#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15104834Sobrien#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16104834Sobrien
17104834Sobrien#include "AArch64.h"
1833965Sjdp#include "llvm/CodeGen/CallingConvLower.h"
19104834Sobrien#include "llvm/CodeGen/SelectionDAG.h"
20104834Sobrien#include "llvm/CodeGen/TargetLowering.h"
21218822Sdim#include "llvm/IR/CallingConv.h"
2233965Sjdp#include "llvm/IR/Instruction.h"
23218822Sdim
2433965Sjdpnamespace llvm {
2533965Sjdp
2633965Sjdpnamespace AArch64ISD {
2733965Sjdp
2833965Sjdpenum NodeType : unsigned {
2933965Sjdp  FIRST_NUMBER = ISD::BUILTIN_OP_END,
3033965Sjdp  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
3133965Sjdp  CALL,         // Function call.
3233965Sjdp
3333965Sjdp  // Produces the full sequence of instructions for getting the thread pointer
3433965Sjdp  // offset of a variable into X0, using the TLSDesc model.
3533965Sjdp  TLSDESC_CALLSEQ,
3633965Sjdp  ADRP,     // Page address of a TargetGlobalAddress operand.
3733965Sjdp  ADR,      // ADR
3833965Sjdp  ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
3933965Sjdp  LOADgot,  // Load from automatically generated descriptor (e.g. Global
4033965Sjdp            // Offset Table, TLS record).
4133965Sjdp  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
4233965Sjdp  BRCOND,   // Conditional branch instruction; "b.cond".
4333965Sjdp  CSEL,
4433965Sjdp  FCSEL, // Conditional move instruction.
4533965Sjdp  CSINV, // Conditional select invert.
46218822Sdim  CSNEG, // Conditional select negate.
4733965Sjdp  CSINC, // Conditional select increment.
48218822Sdim
49218822Sdim  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50218822Sdim  // ELF.
5133965Sjdp  THREAD_POINTER,
5233965Sjdp  ADC,
5360484Sobrien  SBC, // adc, sbc instructions
5460484Sobrien
55218822Sdim  // Arithmetic instructions which write flags.
56218822Sdim  ADDS,
57218822Sdim  SUBS,
58218822Sdim  ADCS,
59218822Sdim  SBCS,
60218822Sdim  ANDS,
61218822Sdim
62218822Sdim  // Conditional compares. Operands: left,right,falsecc,cc,flags
63218822Sdim  CCMP,
64218822Sdim  CCMN,
6578828Sobrien  FCCMP,
6678828Sobrien
6778828Sobrien  // Floating point comparison
6878828Sobrien  FCMP,
6978828Sobrien
7078828Sobrien  // Scalar extract
71218822Sdim  EXTR,
72218822Sdim
7378828Sobrien  // Scalar-to-vector duplication
7478828Sobrien  DUP,
7578828Sobrien  DUPLANE8,
76218822Sdim  DUPLANE16,
7778828Sobrien  DUPLANE32,
78218822Sdim  DUPLANE64,
79218822Sdim
80218822Sdim  // Vector immedate moves
8160484Sobrien  MOVI,
8233965Sjdp  MOVIshift,
8333965Sjdp  MOVIedit,
8433965Sjdp  MOVImsl,
85130561Sobrien  FMOV,
86218822Sdim  MVNIshift,
8733965Sjdp  MVNImsl,
8889857Sobrien
89104834Sobrien  // Vector immediate ops
90218822Sdim  BICi,
9133965Sjdp  ORRi,
92130561Sobrien
9333965Sjdp  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94130561Sobrien  // element must be identical.
9533965Sjdp  BSL,
9633965Sjdp
9733965Sjdp  // Vector arithmetic negation
9833965Sjdp  NEG,
9933965Sjdp
100218822Sdim  // Vector shuffles
101218822Sdim  ZIP1,
10233965Sjdp  ZIP2,
10333965Sjdp  UZP1,
10433965Sjdp  UZP2,
10533965Sjdp  TRN1,
10633965Sjdp  TRN2,
107104834Sobrien  REV16,
10833965Sjdp  REV32,
10933965Sjdp  REV64,
11033965Sjdp  EXT,
11133965Sjdp
11233965Sjdp  // Vector shift by scalar
11333965Sjdp  VSHL,
114218822Sdim  VLSHR,
11533965Sjdp  VASHR,
11633965Sjdp
11733965Sjdp  // Vector shift by scalar (again)
11833965Sjdp  SQSHL_I,
11933965Sjdp  UQSHL_I,
12033965Sjdp  SQSHLU_I,
12133965Sjdp  SRSHR_I,
12233965Sjdp  URSHR_I,
12333965Sjdp
12433965Sjdp  // Vector comparisons
12533965Sjdp  CMEQ,
12633965Sjdp  CMGE,
12733965Sjdp  CMGT,
12833965Sjdp  CMHI,
12933965Sjdp  CMHS,
13033965Sjdp  FCMEQ,
13133965Sjdp  FCMGE,
13233965Sjdp  FCMGT,
13333965Sjdp
13433965Sjdp  // Vector zero comparisons
13533965Sjdp  CMEQz,
13633965Sjdp  CMGEz,
137218822Sdim  CMGTz,
13833965Sjdp  CMLEz,
13933965Sjdp  CMLTz,
14033965Sjdp  FCMEQz,
14133965Sjdp  FCMGEz,
142130561Sobrien  FCMGTz,
143218822Sdim  FCMLEz,
14433965Sjdp  FCMLTz,
145218822Sdim
146218822Sdim  // Vector across-lanes addition
147218822Sdim  // Only the lower result lane is defined.
148218822Sdim  SADDV,
149218822Sdim  UADDV,
150218822Sdim
151218822Sdim  // Vector across-lanes min/max
152218822Sdim  // Only the lower result lane is defined.
153218822Sdim  SMINV,
154218822Sdim  UMINV,
155218822Sdim  SMAXV,
156218822Sdim  UMAXV,
157218822Sdim
158218822Sdim  SMAXV_PRED,
159218822Sdim  UMAXV_PRED,
160218822Sdim  SMINV_PRED,
161218822Sdim  UMINV_PRED,
162218822Sdim  ORV_PRED,
163218822Sdim  EORV_PRED,
164218822Sdim  ANDV_PRED,
165218822Sdim
166218822Sdim  // Vector bitwise negation
167218822Sdim  NOT,
168218822Sdim
16933965Sjdp  // Vector bitwise selection
17033965Sjdp  BIT,
171218822Sdim
172218822Sdim  // Compare-and-branch
173218822Sdim  CBZ,
174218822Sdim  CBNZ,
175218822Sdim  TBZ,
176218822Sdim  TBNZ,
17733965Sjdp
178218822Sdim  // Tail calls
17933965Sjdp  TC_RETURN,
18033965Sjdp
18133965Sjdp  // Custom prefetch handling
18233965Sjdp  PREFETCH,
183218822Sdim
18433965Sjdp  // {s|u}int to FP within a FP register.
18533965Sjdp  SITOF,
18633965Sjdp  UITOF,
18733965Sjdp
18833965Sjdp  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
18933965Sjdp  /// world w.r.t vectors; which causes additional REV instructions to be
19033965Sjdp  /// generated to compensate for the byte-swapping. But sometimes we do
191130561Sobrien  /// need to re-interpret the data in SIMD vector registers in big-endian
192218822Sdim  /// mode without emitting such REV instructions.
19333965Sjdp  NVCAST,
194218822Sdim
19533965Sjdp  SMULL,
19633965Sjdp  UMULL,
19733965Sjdp
19833965Sjdp  // Reciprocal estimates and steps.
19933965Sjdp  FRECPE, FRECPS,
20033965Sjdp  FRSQRTE, FRSQRTS,
20133965Sjdp
20233965Sjdp  SUNPKHI,
20333965Sjdp  SUNPKLO,
204130561Sobrien  UUNPKHI,
20533965Sjdp  UUNPKLO,
20633965Sjdp
20733965Sjdp  CLASTA_N,
20833965Sjdp  CLASTB_N,
209104834Sobrien  LASTA,
21033965Sjdp  LASTB,
211130561Sobrien  REV,
21233965Sjdp  TBL,
21333965Sjdp
21433965Sjdp  INSR,
21533965Sjdp  PTEST,
216104834Sobrien  PTRUE,
21733965Sjdp
218130561Sobrien  // Unsigned gather loads.
21933965Sjdp  GLD1,
22033965Sjdp  GLD1_SCALED,
22133965Sjdp  GLD1_UXTW,
22233965Sjdp  GLD1_SXTW,
22333965Sjdp  GLD1_UXTW_SCALED,
22433965Sjdp  GLD1_SXTW_SCALED,
22533965Sjdp  GLD1_IMM,
22633965Sjdp
22733965Sjdp  // Signed gather loads
22833965Sjdp  GLD1S,
22933965Sjdp  GLD1S_SCALED,
23033965Sjdp  GLD1S_UXTW,
23133965Sjdp  GLD1S_SXTW,
23233965Sjdp  GLD1S_UXTW_SCALED,
23333965Sjdp  GLD1S_SXTW_SCALED,
23433965Sjdp  GLD1S_IMM,
23533965Sjdp  // Scatter store
23633965Sjdp  SST1,
23733965Sjdp  SST1_SCALED,
23833965Sjdp  SST1_UXTW,
239218822Sdim  SST1_SXTW,
24033965Sjdp  SST1_UXTW_SCALED,
24133965Sjdp  SST1_SXTW_SCALED,
24233965Sjdp  SST1_IMM,
24333965Sjdp
24433965Sjdp  // Strict (exception-raising) floating point comparison
24533965Sjdp  STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
24633965Sjdp  STRICT_FCMPE,
24733965Sjdp
24833965Sjdp  // NEON Load/Store with post-increment base updates
24933965Sjdp  LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
250130561Sobrien  LD3post,
25133965Sjdp  LD4post,
25233965Sjdp  ST2post,
25333965Sjdp  ST3post,
25433965Sjdp  ST4post,
255130561Sobrien  LD1x2post,
25633965Sjdp  LD1x3post,
25733965Sjdp  LD1x4post,
25833965Sjdp  ST1x2post,
25933965Sjdp  ST1x3post,
260130561Sobrien  ST1x4post,
26133965Sjdp  LD1DUPpost,
26233965Sjdp  LD2DUPpost,
26333965Sjdp  LD3DUPpost,
26433965Sjdp  LD4DUPpost,
26533965Sjdp  LD1LANEpost,
26633965Sjdp  LD2LANEpost,
26733965Sjdp  LD3LANEpost,
26833965Sjdp  LD4LANEpost,
26933965Sjdp  ST2LANEpost,
27033965Sjdp  ST3LANEpost,
27133965Sjdp  ST4LANEpost,
27233965Sjdp
27333965Sjdp  STG,
27433965Sjdp  STZG,
27533965Sjdp  ST2G,
27633965Sjdp  STZ2G,
27733965Sjdp
27833965Sjdp  LDP,
27933965Sjdp  STP
280218822Sdim};
28133965Sjdp
282218822Sdim} // end namespace AArch64ISD
283218822Sdim
284218822Sdimnamespace {
285218822Sdim
286218822Sdim// Any instruction that defines a 32-bit result zeros out the high half of the
287218822Sdim// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
288218822Sdim// be copying from a truncate. But any other 32-bit operation will zero-extend
289218822Sdim// up to 64 bits.
290218822Sdim// FIXME: X86 also checks for CMOV here. Do we need something similar?
291218822Sdimstatic inline bool isDef32(const SDNode &N) {
292218822Sdim  unsigned Opc = N.getOpcode();
293218822Sdim  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
294218822Sdim         Opc != ISD::CopyFromReg;
295218822Sdim}
296218822Sdim
297218822Sdim} // end anonymous namespace
298218822Sdim
299218822Sdimclass AArch64Subtarget;
300218822Sdimclass AArch64TargetMachine;
301218822Sdim
302218822Sdimclass AArch64TargetLowering : public TargetLowering {
303218822Sdimpublic:
304218822Sdim  explicit AArch64TargetLowering(const TargetMachine &TM,
305218822Sdim                                 const AArch64Subtarget &STI);
306218822Sdim
307218822Sdim  /// Selects the correct CCAssignFn for a given CallingConvention value.
308218822Sdim  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
309218822Sdim
310218822Sdim  /// Selects the correct CCAssignFn for a given CallingConvention value.
311218822Sdim  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
312218822Sdim
313218822Sdim  /// Determine which of the bits specified in Mask are known to be either zero
314218822Sdim  /// or one and return them in the KnownZero/KnownOne bitsets.
31533965Sjdp  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
316218822Sdim                                     const APInt &DemandedElts,
317218822Sdim                                     const SelectionDAG &DAG,
318218822Sdim                                     unsigned Depth = 0) const override;
319218822Sdim
320218822Sdim  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
321218822Sdim    // Returning i64 unconditionally here (i.e. even for ILP32) means that the
32233965Sjdp    // *DAG* representation of pointers will always be 64-bits. They will be
323218822Sdim    // truncated and extended when transferred to memory, but the 64-bit DAG
32433965Sjdp    // allows us to use AArch64's addressing modes much more easily.
325218822Sdim    return MVT::getIntegerVT(64);
326218822Sdim  }
327218822Sdim
328218822Sdim  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
329218822Sdim                                    TargetLoweringOpt &TLO) const override;
330218822Sdim
331218822Sdim  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
332218822Sdim
333218822Sdim  /// Returns true if the target allows unaligned memory accesses of the
334218822Sdim  /// specified type.
335218822Sdim  bool allowsMisalignedMemoryAccesses(
336218822Sdim      EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
337218822Sdim      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
338218822Sdim      bool *Fast = nullptr) const override;
339218822Sdim  /// LLT variant.
34033965Sjdp  bool allowsMisalignedMemoryAccesses(
34133965Sjdp    LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
34233965Sjdp    bool *Fast = nullptr) const override;
34333965Sjdp
34433965Sjdp  /// Provide custom lowering hooks for some operations.
34533965Sjdp  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
34633965Sjdp
34733965Sjdp  const char *getTargetNodeName(unsigned Opcode) const override;
34833965Sjdp
34933965Sjdp  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
350130561Sobrien
351218822Sdim  /// Returns true if a cast between SrcAS and DestAS is a noop.
352218822Sdim  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
353218822Sdim    // Addrspacecasts are always noops.
354218822Sdim    return true;
355218822Sdim  }
35633965Sjdp
357218822Sdim  /// This method returns a target specific FastISel object, or null if the
35833965Sjdp  /// target does not support "fast" ISel.
35989857Sobrien  FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
36033965Sjdp                           const TargetLibraryInfo *libInfo) const override;
36133965Sjdp
36233965Sjdp  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
36333965Sjdp
36433965Sjdp  bool isFPImmLegal(const APFloat &Imm, EVT VT,
36533965Sjdp                    bool ForCodeSize) const override;
36633965Sjdp
36733965Sjdp  /// Return true if the given shuffle mask can be codegen'd directly, or if it
36833965Sjdp  /// should be stack expanded.
36933965Sjdp  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
37033965Sjdp
37133965Sjdp  /// Return the ISD::SETCC ValueType.
37233965Sjdp  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
37333965Sjdp                         EVT VT) const override;
37433965Sjdp
37533965Sjdp  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
37633965Sjdp
37733965Sjdp  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
37833965Sjdp                                  MachineBasicBlock *BB) const;
37933965Sjdp
38033965Sjdp  MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
38133965Sjdp                                           MachineBasicBlock *BB) const;
38233965Sjdp
38333965Sjdp  MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
38433965Sjdp                                         MachineBasicBlock *BB) const;
38533965Sjdp
38633965Sjdp  MachineBasicBlock *
38733965Sjdp  EmitInstrWithCustomInserter(MachineInstr &MI,
38833965Sjdp                              MachineBasicBlock *MBB) const override;
38933965Sjdp
39033965Sjdp  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
39133965Sjdp                          MachineFunction &MF,
39233965Sjdp                          unsigned Intrinsic) const override;
39333965Sjdp
39433965Sjdp  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
39533965Sjdp                             EVT NewVT) const override;
39633965Sjdp
39733965Sjdp  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
39833965Sjdp  bool isTruncateFree(EVT VT1, EVT VT2) const override;
39933965Sjdp
40033965Sjdp  bool isProfitableToHoist(Instruction *I) const override;
40189857Sobrien
40233965Sjdp  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
40389857Sobrien  bool isZExtFree(EVT VT1, EVT VT2) const override;
40433965Sjdp  bool isZExtFree(SDValue Val, EVT VT2) const override;
40533965Sjdp
40633965Sjdp  bool shouldSinkOperands(Instruction *I,
40789857Sobrien                          SmallVectorImpl<Use *> &Ops) const override;
40833965Sjdp
40989857Sobrien  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
41033965Sjdp
41189857Sobrien  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
41233965Sjdp
41389857Sobrien  bool lowerInterleavedLoad(LoadInst *LI,
414130561Sobrien                            ArrayRef<ShuffleVectorInst *> Shuffles,
41533965Sjdp                            ArrayRef<unsigned> Indices,
41633965Sjdp                            unsigned Factor) const override;
41733965Sjdp  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
41833965Sjdp                             unsigned Factor) const override;
419130561Sobrien
420218822Sdim  bool isLegalAddImmediate(int64_t) const override;
42133965Sjdp  bool isLegalICmpImmediate(int64_t) const override;
42233965Sjdp
42333965Sjdp  bool shouldConsiderGEPOffsetSplit() const override;
424218822Sdim
42533965Sjdp  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
42633965Sjdp                          bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
42733965Sjdp                          const AttributeList &FuncAttributes) const override;
42877298Sobrien
42933965Sjdp  LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
430130561Sobrien                          bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
43133965Sjdp                          const AttributeList &FuncAttributes) const override;
43233965Sjdp
43333965Sjdp  /// Return true if the addressing mode represented by AM is legal for this
43433965Sjdp  /// target, for a load/store of the specified type.
43533965Sjdp  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
436130561Sobrien                             unsigned AS,
43733965Sjdp                             Instruction *I = nullptr) const override;
43833965Sjdp
43933965Sjdp  /// Return the cost of the scaling factor used in the addressing
44033965Sjdp  /// mode represented by AM for this target, for a load/store
44133965Sjdp  /// of the specified type.
44233965Sjdp  /// If the AM is supported, the return value must be >= 0.
44333965Sjdp  /// If the AM is not supported, it returns a negative value.
44433965Sjdp  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
44533965Sjdp                           unsigned AS) const override;
44633965Sjdp
447130561Sobrien  /// Return true if an FMA operation is faster than a pair of fmul and fadd
44833965Sjdp  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
44933965Sjdp  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
45033965Sjdp  bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
451218822Sdim                                  EVT VT) const override;
45233965Sjdp  bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
45333965Sjdp
45433965Sjdp  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
45589857Sobrien
45689857Sobrien  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
45733965Sjdp  bool isDesirableToCommuteWithShift(const SDNode *N,
45833965Sjdp                                     CombineLevel Level) const override;
45933965Sjdp
46033965Sjdp  /// Returns true if it is beneficial to convert a load of a constant
46133965Sjdp  /// to just the constant itself.
46233965Sjdp  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
46333965Sjdp                                         Type *Ty) const override;
46433965Sjdp
46533965Sjdp  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
46633965Sjdp  /// with this index.
46733965Sjdp  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
46833965Sjdp                               unsigned Index) const override;
46933965Sjdp
47033965Sjdp  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
47133965Sjdp                        AtomicOrdering Ord) const override;
47233965Sjdp  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
473130561Sobrien                              Value *Addr, AtomicOrdering Ord) const override;
47433965Sjdp
47533965Sjdp  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
47633965Sjdp
477130561Sobrien  TargetLoweringBase::AtomicExpansionKind
47833965Sjdp  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
47933965Sjdp  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
48033965Sjdp  TargetLoweringBase::AtomicExpansionKind
48133965Sjdp  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
48233965Sjdp
48333965Sjdp  TargetLoweringBase::AtomicExpansionKind
484130561Sobrien  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
485218822Sdim
486218822Sdim  bool useLoadStackGuardNode() const override;
487218822Sdim  TargetLoweringBase::LegalizeTypeAction
48833965Sjdp  getPreferredVectorAction(MVT VT) const override;
48933965Sjdp
49033965Sjdp  /// If the target has a standard location for the stack protector cookie,
49133965Sjdp  /// returns the address of that location. Otherwise, returns nullptr.
49233965Sjdp  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
493218822Sdim
49433965Sjdp  void insertSSPDeclarations(Module &M) const override;
49533965Sjdp  Value *getSDagStackGuard(const Module &M) const override;
49633965Sjdp  Function *getSSPStackGuardCheck(const Module &M) const override;
49733965Sjdp
49833965Sjdp  /// If the target has a standard location for the unsafe stack pointer,
49933965Sjdp  /// returns the address of that location. Otherwise, returns nullptr.
50089857Sobrien  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
50189857Sobrien
50233965Sjdp  /// If a physical register, this returns the register that receives the
50333965Sjdp  /// exception address on entry to an EH pad.
50433965Sjdp  unsigned
50533965Sjdp  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
50633965Sjdp    // FIXME: This is a guess. Has this been defined yet?
507218822Sdim    return AArch64::X0;
508130561Sobrien  }
50933965Sjdp
51033965Sjdp  /// If a physical register, this returns the register that receives the
51133965Sjdp  /// exception typeid on entry to a landing pad.
512130561Sobrien  unsigned
51333965Sjdp  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
51433965Sjdp    // FIXME: This is a guess. Has this been defined yet?
51533965Sjdp    return AArch64::X1;
516130561Sobrien  }
51733965Sjdp
51833965Sjdp  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
51933965Sjdp
52033965Sjdp  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
52133965Sjdp                        const SelectionDAG &DAG) const override {
52233965Sjdp    // Do not merge to float value size (128 bytes) if no implicit
52333965Sjdp    // float attribute is set.
52433965Sjdp
52533965Sjdp    bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
52633965Sjdp        Attribute::NoImplicitFloat);
52733965Sjdp
52833965Sjdp    if (NoFloat)
52933965Sjdp      return (MemVT.getSizeInBits() <= 64);
53033965Sjdp    return true;
53133965Sjdp  }
53233965Sjdp
53333965Sjdp  bool isCheapToSpeculateCttz() const override {
53433965Sjdp    return true;
53533965Sjdp  }
53633965Sjdp
53733965Sjdp  bool isCheapToSpeculateCtlz() const override {
53833965Sjdp    return true;
53933965Sjdp  }
540104834Sobrien
541218822Sdim  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
542104834Sobrien
54333965Sjdp  bool hasAndNotCompare(SDValue V) const override {
54433965Sjdp    // We can use bics for any scalar.
54533965Sjdp    return V.getValueType().isScalarInteger();
54633965Sjdp  }
54733965Sjdp
54833965Sjdp  bool hasAndNot(SDValue Y) const override {
54933965Sjdp    EVT VT = Y.getValueType();
55033965Sjdp
55133965Sjdp    if (!VT.isVector())
55233965Sjdp      return hasAndNotCompare(Y);
55333965Sjdp
55433965Sjdp    return VT.getSizeInBits() >= 64; // vector 'bic'
55533965Sjdp  }
556130561Sobrien
55733965Sjdp  bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
558218822Sdim      SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
55933965Sjdp      unsigned OldShiftOpcode, unsigned NewShiftOpcode,
560130561Sobrien      SelectionDAG &DAG) const override;
56189857Sobrien
56289857Sobrien  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
56389857Sobrien
56489857Sobrien  bool shouldTransformSignedTruncationCheck(EVT XVT,
56589857Sobrien                                            unsigned KeptBits) const override {
56633965Sjdp    // For vectors, we don't have a preference..
56733965Sjdp    if (XVT.isVector())
568130561Sobrien      return false;
56933965Sjdp
57033965Sjdp    auto VTIsOk = [](EVT VT) -> bool {
57133965Sjdp      return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
57233965Sjdp             VT == MVT::i64;
57333965Sjdp    };
574218822Sdim
575218822Sdim    // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
576218822Sdim    // XVT will be larger than KeptBitsVT.
577218822Sdim    MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
578218822Sdim    return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
579218822Sdim  }
58033965Sjdp
581104834Sobrien  bool preferIncOfAddToSubOfNot(EVT VT) const override;
58233965Sjdp
583218822Sdim  bool hasBitPreservingFPLogic(EVT VT) const override {
584218822Sdim    // FIXME: Is this always true? It should be true for vectors at least.
585218822Sdim    return VT == MVT::f32 || VT == MVT::f64;
586218822Sdim  }
58733965Sjdp
58833965Sjdp  bool supportSplitCSR(MachineFunction *MF) const override {
58933965Sjdp    return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
590218822Sdim           MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
591218822Sdim  }
592218822Sdim  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
59333965Sjdp  void insertCopiesSplitCSR(
59433965Sjdp      MachineBasicBlock *Entry,
59533965Sjdp      const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
59633965Sjdp
59733965Sjdp  bool supportSwiftError() const override {
59833965Sjdp    return true;
59933965Sjdp  }
60033965Sjdp
60133965Sjdp  /// Enable aggressive FMA fusion on targets that want it.
60233965Sjdp  bool enableAggressiveFMAFusion(EVT VT) const override;
60389857Sobrien
60489857Sobrien  /// Returns the size of the platform's va_list object.
605218822Sdim  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
60633965Sjdp
607130561Sobrien  /// Returns true if \p VecTy is a legal interleaved access type. This
60833965Sjdp  /// function checks the vector element type and the overall width of the
60933965Sjdp  /// vector.
61033965Sjdp  bool isLegalInterleavedAccessType(VectorType *VecTy,
61133965Sjdp                                    const DataLayout &DL) const;
61233965Sjdp
613218822Sdim  /// Returns the number of interleaved accesses that will be generated when
61433965Sjdp  /// lowering accesses of the given type.
615130561Sobrien  unsigned getNumInterleavedAccesses(VectorType *VecTy,
61633965Sjdp                                     const DataLayout &DL) const;
61733965Sjdp
61833965Sjdp  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
61933965Sjdp
62033965Sjdp  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
62133965Sjdp                                                 CallingConv::ID CallConv,
62233965Sjdp                                                 bool isVarArg) const override;
62333965Sjdp  /// Used for exception handling on Win64.
62433965Sjdp  bool needsFixedCatchObjects() const override;
62533965Sjdpprivate:
62633965Sjdp  /// Keep a pointer to the AArch64Subtarget around so that we can
62733965Sjdp  /// make the right decision when generating code for different targets.
62833965Sjdp  const AArch64Subtarget *Subtarget;
62933965Sjdp
63033965Sjdp  bool isExtFreeImpl(const Instruction *Ext) const override;
63133965Sjdp
63233965Sjdp  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
633218822Sdim  void addDRTypeForNEON(MVT VT);
63433965Sjdp  void addQRTypeForNEON(MVT VT);
63533965Sjdp
63689857Sobrien  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
63733965Sjdp                               bool isVarArg,
638218822Sdim                               const SmallVectorImpl<ISD::InputArg> &Ins,
639218822Sdim                               const SDLoc &DL, SelectionDAG &DAG,
640218822Sdim                               SmallVectorImpl<SDValue> &InVals) const override;
641218822Sdim
642218822Sdim  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
643130561Sobrien                    SmallVectorImpl<SDValue> &InVals) const override;
64433965Sjdp
64533965Sjdp  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
64633965Sjdp                          CallingConv::ID CallConv, bool isVarArg,
64733965Sjdp                          const SmallVectorImpl<ISD::InputArg> &Ins,
64833965Sjdp                          const SDLoc &DL, SelectionDAG &DAG,
64933965Sjdp                          SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
65033965Sjdp                          SDValue ThisVal) const;
651130561Sobrien
652218822Sdim  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
653218822Sdim
654218822Sdim  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
655218822Sdim
656218822Sdim  bool isEligibleForTailCallOptimization(
65733965Sjdp      SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
65833965Sjdp      const SmallVectorImpl<ISD::OutputArg> &Outs,
65933965Sjdp      const SmallVectorImpl<SDValue> &OutVals,
66033965Sjdp      const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
66133965Sjdp
66233965Sjdp  /// Finds the incoming stack arguments which overlap the given fixed stack
66333965Sjdp  /// object and incorporates their load into the current chain. This prevents
66433965Sjdp  /// an upcoming store from clobbering the stack argument before it's used.
66533965Sjdp  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
66633965Sjdp                              MachineFrameInfo &MFI, int ClobberedFI) const;
66733965Sjdp
66833965Sjdp  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
66933965Sjdp
67033965Sjdp  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
67133965Sjdp                           SDValue &Chain) const;
67233965Sjdp
67333965Sjdp  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
67433965Sjdp                      bool isVarArg,
67533965Sjdp                      const SmallVectorImpl<ISD::OutputArg> &Outs,
676130561Sobrien                      LLVMContext &Context) const override;
67733965Sjdp
67833965Sjdp  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
67933965Sjdp                      const SmallVectorImpl<ISD::OutputArg> &Outs,
68033965Sjdp                      const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
681130561Sobrien                      SelectionDAG &DAG) const override;
68233965Sjdp
68333965Sjdp  SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
68433965Sjdp                        unsigned Flag) const;
68533965Sjdp  SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
68633965Sjdp                        unsigned Flag) const;
68733965Sjdp  SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
68833965Sjdp                        unsigned Flag) const;
68933965Sjdp  SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
69033965Sjdp                        unsigned Flag) const;
69133965Sjdp  template <class NodeTy>
69233965Sjdp  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
69333965Sjdp  template <class NodeTy>
69433965Sjdp  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
69533965Sjdp  template <class NodeTy>
69633965Sjdp  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
69733965Sjdp  template <class NodeTy>
69833965Sjdp  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
69933965Sjdp  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
70033965Sjdp  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
70133965Sjdp  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
702104834Sobrien  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
703104834Sobrien  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
704104834Sobrien  SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
705104834Sobrien                               const SDLoc &DL, SelectionDAG &DAG) const;
70633965Sjdp  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
70733965Sjdp                                 SelectionDAG &DAG) const;
70833965Sjdp  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
70933965Sjdp  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
71033965Sjdp  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
71133965Sjdp  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
71233965Sjdp  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
71333965Sjdp  SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
71433965Sjdp                         SDValue TVal, SDValue FVal, const SDLoc &dl,
71533965Sjdp                         SelectionDAG &DAG) const;
716218822Sdim  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
71733965Sjdp  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
71833965Sjdp  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
71933965Sjdp  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
720218822Sdim  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
72133965Sjdp  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
72233965Sjdp  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
72333965Sjdp  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
724218822Sdim  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
72533965Sjdp  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
72633965Sjdp  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
72733965Sjdp  SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
72833965Sjdp  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
72933965Sjdp  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
73033965Sjdp  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
73133965Sjdp  SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
73233965Sjdp  SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
73333965Sjdp  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
73433965Sjdp  SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
73533965Sjdp  SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
73633965Sjdp  SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
73733965Sjdp  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
73833965Sjdp  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
73933965Sjdp  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
74033965Sjdp  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
74133965Sjdp  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
74233965Sjdp  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
74333965Sjdp                        RTLIB::Libcall Call) const;
74433965Sjdp  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
74533965Sjdp  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
74633965Sjdp  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
74733965Sjdp  SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
74833965Sjdp  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
74933965Sjdp  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
75033965Sjdp  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
75133965Sjdp  SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
75233965Sjdp  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
75333965Sjdp  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
75433965Sjdp  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
75533965Sjdp  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
75633965Sjdp  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
75733965Sjdp  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
75833965Sjdp                                         SDValue &Size,
75933965Sjdp                                         SelectionDAG &DAG) const;
76033965Sjdp
76133965Sjdp  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
76233965Sjdp                        SmallVectorImpl<SDNode *> &Created) const override;
76333965Sjdp  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
76433965Sjdp                          int &ExtraSteps, bool &UseOneConst,
76533965Sjdp                          bool Reciprocal) const override;
76633965Sjdp  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
76733965Sjdp                           int &ExtraSteps) const override;
76833965Sjdp  unsigned combineRepeatedFPDivisors() const override;
76933965Sjdp
77033965Sjdp  ConstraintType getConstraintType(StringRef Constraint) const override;
77133965Sjdp  Register getRegisterByName(const char* RegName, LLT VT,
77233965Sjdp                             const MachineFunction &MF) const override;
77333965Sjdp
77433965Sjdp  /// Examine constraint string and operand type and determine a weight value.
77533965Sjdp  /// The operand object must already have been set up with the operand type.
77633965Sjdp  ConstraintWeight
77733965Sjdp  getSingleConstraintMatchWeight(AsmOperandInfo &info,
77833965Sjdp                                 const char *constraint) const override;
77933965Sjdp
78033965Sjdp  std::pair<unsigned, const TargetRegisterClass *>
78133965Sjdp  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
78233965Sjdp                               StringRef Constraint, MVT VT) const override;
78333965Sjdp
78433965Sjdp  const char *LowerXConstraint(EVT ConstraintVT) const override;
78533965Sjdp
78633965Sjdp  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
78733965Sjdp                                    std::vector<SDValue> &Ops,
78833965Sjdp                                    SelectionDAG &DAG) const override;
78933965Sjdp
79033965Sjdp  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
79133965Sjdp    if (ConstraintCode == "Q")
79233965Sjdp      return InlineAsm::Constraint_Q;
79333965Sjdp    // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
79433965Sjdp    //        followed by llvm_unreachable so we'll leave them unimplemented in
79533965Sjdp    //        the backend for now.
79633965Sjdp    return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
79733965Sjdp  }
79833965Sjdp
79933965Sjdp  bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
80033965Sjdp  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
80133965Sjdp  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
80233965Sjdp  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
80333965Sjdp                              ISD::MemIndexedMode &AM, bool &IsInc,
80433965Sjdp                              SelectionDAG &DAG) const;
80533965Sjdp  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
80633965Sjdp                                 ISD::MemIndexedMode &AM,
80733965Sjdp                                 SelectionDAG &DAG) const override;
80833965Sjdp  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
80933965Sjdp                                  SDValue &Offset, ISD::MemIndexedMode &AM,
81033965Sjdp                                  SelectionDAG &DAG) const override;
81133965Sjdp
81233965Sjdp  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
81333965Sjdp                          SelectionDAG &DAG) const override;
81433965Sjdp
81533965Sjdp  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
81633965Sjdp
81733965Sjdp  void finalizeLowering(MachineFunction &MF) const override;
81833965Sjdp};
81933965Sjdp
82033965Sjdpnamespace AArch64 {
82133965SjdpFastISel *createFastISel(FunctionLoweringInfo &funcInfo,
82233965Sjdp                         const TargetLibraryInfo *libInfo);
82333965Sjdp} // end namespace AArch64
82433965Sjdp
82533965Sjdp} // end namespace llvm
82633965Sjdp
82733965Sjdp#endif
82833965Sjdp