1//===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Interface definition of the TargetLowering class that is common
11/// to all AMD GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16#define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
17
18#include "AMDGPU.h"
19#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/TargetLowering.h"
21#include "llvm/Target/TargetMachine.h"
22
23namespace llvm {
24
25class AMDGPUMachineFunction;
26class AMDGPUSubtarget;
27struct ArgDescriptor;
28
29class AMDGPUTargetLowering : public TargetLowering {
30private:
31  const AMDGPUSubtarget *Subtarget;
32
33  /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
34  /// legalized from a smaller type VT. Need to match pre-legalized type because
35  /// the generic legalization inserts the add/sub between the select and
36  /// compare.
37  SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
38
39public:
40  static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
41  static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
42  static bool hasDefinedInitializer(const GlobalValue *GV);
43
44protected:
45  SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
46  SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
47  /// Split a vector store into multiple scalar stores.
48  /// \returns The resulting chain.
49
50  SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
51  SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
52  SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
53  SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
54  SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
55
56  SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
57  SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
58  SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG,
59                    double Log2BaseInverted) const;
60  SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
61
62  SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
63
64  SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
65  SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
66  SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
67  SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
68
69  SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
70  SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
71  SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
72  SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
73
74  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
75
76protected:
77  bool shouldCombineMemoryType(EVT VT) const;
78  SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
79  SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
80  SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
81  SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const;
82
83  SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
84                                       unsigned Opc, SDValue LHS,
85                                       uint32_t ValLo, uint32_t ValHi) const;
86  SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
87  SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
88  SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
89  SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
90  SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
91  SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
92  SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
93  SDValue performMulLoHi24Combine(SDNode *N, DAGCombinerInfo &DCI) const;
94  SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
95                             SDValue RHS, DAGCombinerInfo &DCI) const;
96  SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
97
98  bool isConstantCostlierToNegate(SDValue N) const;
99  SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
100  SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
101  SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
102
103  static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
104
105  virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
106                                     SelectionDAG &DAG) const;
107
108  /// Return 64-bit value Op as two 32-bit integers.
109  std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
110                                              SelectionDAG &DAG) const;
111  SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
112  SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
113
114  /// Split a vector type into two parts. The first part is a power of two
115  /// vector. The second part is whatever is left over, and is a scalar if it
116  /// would otherwise be a 1-vector.
117  std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const;
118
119  /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be
120  /// scalar.
121  std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL,
122                                          const EVT &LoVT, const EVT &HighVT,
123                                          SelectionDAG &DAG) const;
124
125  /// Split a vector load into 2 loads of half the vector.
126  SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
127
128  /// Widen a vector load from vec3 to vec4.
129  SDValue WidenVectorLoad(SDValue Op, SelectionDAG &DAG) const;
130
131  /// Split a vector store into 2 stores of half the vector.
132  SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
133
134  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
135  SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
136  SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
137  SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
138  void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
139                                    SmallVectorImpl<SDValue> &Results) const;
140
141  void analyzeFormalArgumentsCompute(
142    CCState &State,
143    const SmallVectorImpl<ISD::InputArg> &Ins) const;
144
145public:
146  AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
147
148  bool mayIgnoreSignedZero(SDValue Op) const {
149    if (getTargetMachine().Options.NoSignedZerosFPMath)
150      return true;
151
152    const auto Flags = Op.getNode()->getFlags();
153    if (Flags.isDefined())
154      return Flags.hasNoSignedZeros();
155
156    return false;
157  }
158
159  static inline SDValue stripBitcast(SDValue Val) {
160    return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
161  }
162
163  static bool allUsesHaveSourceMods(const SDNode *N,
164                                    unsigned CostThreshold = 4);
165  bool isFAbsFree(EVT VT) const override;
166  bool isFNegFree(EVT VT) const override;
167  bool isTruncateFree(EVT Src, EVT Dest) const override;
168  bool isTruncateFree(Type *Src, Type *Dest) const override;
169
170  bool isZExtFree(Type *Src, Type *Dest) const override;
171  bool isZExtFree(EVT Src, EVT Dest) const override;
172  bool isZExtFree(SDValue Val, EVT VT2) const override;
173
174  SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
175                               bool LegalOperations, bool ForCodeSize,
176                               NegatibleCost &Cost,
177                               unsigned Depth) const override;
178
179  bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
180
181  EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
182                          ISD::NodeType ExtendKind) const override;
183
184  MVT getVectorIdxTy(const DataLayout &) const override;
185  bool isSelectSupported(SelectSupportKind) const override;
186
187  bool isFPImmLegal(const APFloat &Imm, EVT VT,
188                    bool ForCodeSize) const override;
189  bool ShouldShrinkFPConstant(EVT VT) const override;
190  bool shouldReduceLoadWidth(SDNode *Load,
191                             ISD::LoadExtType ExtType,
192                             EVT ExtVT) const override;
193
194  bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG,
195                               const MachineMemOperand &MMO) const final;
196
197  bool storeOfVectorConstantIsCheap(EVT MemVT,
198                                    unsigned NumElem,
199                                    unsigned AS) const override;
200  bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
201  bool isCheapToSpeculateCttz() const override;
202  bool isCheapToSpeculateCtlz() const override;
203
204  bool isSDNodeAlwaysUniform(const SDNode *N) const override;
205  static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
206  static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
207
208  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
209                      const SmallVectorImpl<ISD::OutputArg> &Outs,
210                      const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
211                      SelectionDAG &DAG) const override;
212
213  SDValue addTokenForArgument(SDValue Chain,
214                              SelectionDAG &DAG,
215                              MachineFrameInfo &MFI,
216                              int ClobberedFI) const;
217
218  SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
219                             SmallVectorImpl<SDValue> &InVals,
220                             StringRef Reason) const;
221  SDValue LowerCall(CallLoweringInfo &CLI,
222                    SmallVectorImpl<SDValue> &InVals) const override;
223
224  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
225                                  SelectionDAG &DAG) const;
226
227  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
228  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
229  void ReplaceNodeResults(SDNode * N,
230                          SmallVectorImpl<SDValue> &Results,
231                          SelectionDAG &DAG) const override;
232
233  SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
234                               SDValue RHS, SDValue True, SDValue False,
235                               SDValue CC, DAGCombinerInfo &DCI) const;
236
237  const char* getTargetNodeName(unsigned Opcode) const override;
238
239  // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
240  // AMDGPU.  Commit r319036,
241  // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
242  // turned on MergeConsecutiveStores() before Instruction Selection for all
243  // targets.  Enough AMDGPU compiles go into an infinite loop (
244  // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
245  // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
246  // now.
247  bool mergeStoresAfterLegalization(EVT) const override { return false; }
248
249  bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
250    return true;
251  }
252  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
253                           int &RefinementSteps, bool &UseOneConstNR,
254                           bool Reciprocal) const override;
255  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
256                           int &RefinementSteps) const override;
257
258  virtual SDNode *PostISelFolding(MachineSDNode *N,
259                                  SelectionDAG &DAG) const = 0;
260
261  /// Determine which of the bits specified in \p Mask are known to be
262  /// either zero or one and return them in the \p KnownZero and \p KnownOne
263  /// bitsets.
264  void computeKnownBitsForTargetNode(const SDValue Op,
265                                     KnownBits &Known,
266                                     const APInt &DemandedElts,
267                                     const SelectionDAG &DAG,
268                                     unsigned Depth = 0) const override;
269
270  unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
271                                           const SelectionDAG &DAG,
272                                           unsigned Depth = 0) const override;
273
274  unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
275                                            Register R,
276                                            const APInt &DemandedElts,
277                                            const MachineRegisterInfo &MRI,
278                                            unsigned Depth = 0) const override;
279
280  bool isKnownNeverNaNForTargetNode(SDValue Op,
281                                    const SelectionDAG &DAG,
282                                    bool SNaN = false,
283                                    unsigned Depth = 0) const override;
284
285  /// Helper function that adds Reg to the LiveIn list of the DAG's
286  /// MachineFunction.
287  ///
288  /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
289  /// a copy from the register.
290  SDValue CreateLiveInRegister(SelectionDAG &DAG,
291                               const TargetRegisterClass *RC,
292                               Register Reg, EVT VT,
293                               const SDLoc &SL,
294                               bool RawReg = false) const;
295  SDValue CreateLiveInRegister(SelectionDAG &DAG,
296                               const TargetRegisterClass *RC,
297                               Register Reg, EVT VT) const {
298    return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()));
299  }
300
301  // Returns the raw live in register rather than a copy from it.
302  SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
303                                  const TargetRegisterClass *RC,
304                                  Register Reg, EVT VT) const {
305    return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true);
306  }
307
308  /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
309  /// slot rather than passed in a register.
310  SDValue loadStackInputValue(SelectionDAG &DAG,
311                              EVT VT,
312                              const SDLoc &SL,
313                              int64_t Offset) const;
314
315  SDValue storeStackInputValue(SelectionDAG &DAG,
316                               const SDLoc &SL,
317                               SDValue Chain,
318                               SDValue ArgVal,
319                               int64_t Offset) const;
320
321  SDValue loadInputValue(SelectionDAG &DAG,
322                         const TargetRegisterClass *RC,
323                         EVT VT, const SDLoc &SL,
324                         const ArgDescriptor &Arg) const;
325
326  enum ImplicitParameter {
327    FIRST_IMPLICIT,
328    GRID_DIM = FIRST_IMPLICIT,
329    GRID_OFFSET,
330  };
331
332  /// Helper function that returns the byte offset of the given
333  /// type of implicit parameter.
334  uint32_t getImplicitParameterOffset(const MachineFunction &MF,
335                                      const ImplicitParameter Param) const;
336
337  MVT getFenceOperandTy(const DataLayout &DL) const override {
338    return MVT::i32;
339  }
340
341  AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
342};
343
344namespace AMDGPUISD {
345
346enum NodeType : unsigned {
347  // AMDIL ISD Opcodes
348  FIRST_NUMBER = ISD::BUILTIN_OP_END,
349  UMUL,        // 32bit unsigned multiplication
350  BRANCH_COND,
351  // End AMDIL ISD Opcodes
352
353  // Function call.
354  CALL,
355  TC_RETURN,
356  TRAP,
357
358  // Masked control flow nodes.
359  IF,
360  ELSE,
361  LOOP,
362
363  // A uniform kernel return that terminates the wavefront.
364  ENDPGM,
365
366  // Return to a shader part's epilog code.
367  RETURN_TO_EPILOG,
368
369  // Return with values from a non-entry function.
370  RET_FLAG,
371
372  DWORDADDR,
373  FRACT,
374
375  /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
376  /// modifier behavior with dx10_enable.
377  CLAMP,
378
379  // This is SETCC with the full mask result which is used for a compare with a
380  // result bit per item in the wavefront.
381  SETCC,
382  SETREG,
383
384  DENORM_MODE,
385
386  // FP ops with input and output chain.
387  FMA_W_CHAIN,
388  FMUL_W_CHAIN,
389
390  // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
391  // Denormals handled on some parts.
392  COS_HW,
393  SIN_HW,
394  FMAX_LEGACY,
395  FMIN_LEGACY,
396
397  FMAX3,
398  SMAX3,
399  UMAX3,
400  FMIN3,
401  SMIN3,
402  UMIN3,
403  FMED3,
404  SMED3,
405  UMED3,
406  FDOT2,
407  URECIP,
408  DIV_SCALE,
409  DIV_FMAS,
410  DIV_FIXUP,
411  // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
412  // treated as an illegal operation.
413  FMAD_FTZ,
414
415  // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
416  //            For f64, max error 2^29 ULP, handles denormals.
417  RCP,
418  RSQ,
419  RCP_LEGACY,
420  RCP_IFLAG,
421  FMUL_LEGACY,
422  RSQ_CLAMP,
423  LDEXP,
424  FP_CLASS,
425  DOT4,
426  CARRY,
427  BORROW,
428  BFE_U32, // Extract range of bits with zero extension to 32-bits.
429  BFE_I32, // Extract range of bits with sign extension to 32-bits.
430  BFI, // (src0 & src1) | (~src0 & src2)
431  BFM, // Insert a range of bits into a 32-bit word.
432  FFBH_U32, // ctlz with -1 if input is zero.
433  FFBH_I32,
434  FFBL_B32, // cttz with -1 if input is zero.
435  MUL_U24,
436  MUL_I24,
437  MULHI_U24,
438  MULHI_I24,
439  MAD_U24,
440  MAD_I24,
441  MAD_U64_U32,
442  MAD_I64_I32,
443  MUL_LOHI_I24,
444  MUL_LOHI_U24,
445  PERM,
446  TEXTURE_FETCH,
447  R600_EXPORT,
448  CONST_ADDRESS,
449  REGISTER_LOAD,
450  REGISTER_STORE,
451  SAMPLE,
452  SAMPLEB,
453  SAMPLED,
454  SAMPLEL,
455
456  // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
457  CVT_F32_UBYTE0,
458  CVT_F32_UBYTE1,
459  CVT_F32_UBYTE2,
460  CVT_F32_UBYTE3,
461
462  // Convert two float 32 numbers into a single register holding two packed f16
463  // with round to zero.
464  CVT_PKRTZ_F16_F32,
465  CVT_PKNORM_I16_F32,
466  CVT_PKNORM_U16_F32,
467  CVT_PK_I16_I32,
468  CVT_PK_U16_U32,
469
470  // Same as the standard node, except the high bits of the resulting integer
471  // are known 0.
472  FP_TO_FP16,
473
474  // Wrapper around fp16 results that are known to zero the high bits.
475  FP16_ZEXT,
476
477  /// This node is for VLIW targets and it is used to represent a vector
478  /// that is stored in consecutive registers with the same channel.
479  /// For example:
480  ///   |X  |Y|Z|W|
481  /// T0|v.x| | | |
482  /// T1|v.y| | | |
483  /// T2|v.z| | | |
484  /// T3|v.w| | | |
485  BUILD_VERTICAL_VECTOR,
486  /// Pointer to the start of the shader's constant data.
487  CONST_DATA_PTR,
488  PC_ADD_REL_OFFSET,
489  LDS,
490  DUMMY_CHAIN,
491  FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
492  LOAD_D16_HI,
493  LOAD_D16_LO,
494  LOAD_D16_HI_I8,
495  LOAD_D16_HI_U8,
496  LOAD_D16_LO_I8,
497  LOAD_D16_LO_U8,
498
499  STORE_MSKOR,
500  LOAD_CONSTANT,
501  TBUFFER_STORE_FORMAT,
502  TBUFFER_STORE_FORMAT_D16,
503  TBUFFER_LOAD_FORMAT,
504  TBUFFER_LOAD_FORMAT_D16,
505  DS_ORDERED_COUNT,
506  ATOMIC_CMP_SWAP,
507  ATOMIC_INC,
508  ATOMIC_DEC,
509  ATOMIC_LOAD_FMIN,
510  ATOMIC_LOAD_FMAX,
511  ATOMIC_LOAD_CSUB,
512  BUFFER_LOAD,
513  BUFFER_LOAD_UBYTE,
514  BUFFER_LOAD_USHORT,
515  BUFFER_LOAD_BYTE,
516  BUFFER_LOAD_SHORT,
517  BUFFER_LOAD_FORMAT,
518  BUFFER_LOAD_FORMAT_D16,
519  SBUFFER_LOAD,
520  BUFFER_STORE,
521  BUFFER_STORE_BYTE,
522  BUFFER_STORE_SHORT,
523  BUFFER_STORE_FORMAT,
524  BUFFER_STORE_FORMAT_D16,
525  BUFFER_ATOMIC_SWAP,
526  BUFFER_ATOMIC_ADD,
527  BUFFER_ATOMIC_SUB,
528  BUFFER_ATOMIC_SMIN,
529  BUFFER_ATOMIC_UMIN,
530  BUFFER_ATOMIC_SMAX,
531  BUFFER_ATOMIC_UMAX,
532  BUFFER_ATOMIC_AND,
533  BUFFER_ATOMIC_OR,
534  BUFFER_ATOMIC_XOR,
535  BUFFER_ATOMIC_INC,
536  BUFFER_ATOMIC_DEC,
537  BUFFER_ATOMIC_CMPSWAP,
538  BUFFER_ATOMIC_CSUB,
539  BUFFER_ATOMIC_FADD,
540  BUFFER_ATOMIC_PK_FADD,
541  ATOMIC_PK_FADD,
542
543  LAST_AMDGPU_ISD_NUMBER
544};
545
546
547} // End namespace AMDGPUISD
548
549} // End namespace llvm
550
551#endif
552