1//===-- VOP3PInstructions.td - Vector Instruction Definitions -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10// VOP3P Classes
11//===----------------------------------------------------------------------===//
12
13class VOP3PInst<string OpName, VOPProfile P,
14                SDPatternOperator node = null_frag,
15                bit HasExplicitClamp = 0> :
16  VOP3P_Pseudo<OpName, P,
17    !if(P.HasModifiers, getVOP3PModPat<P, node, HasExplicitClamp>.ret, getVOP3Pat<P, node>.ret)
18>;
19
20// Non-packed instructions that use the VOP3P encoding.
21// VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed.
22class VOP3_VOP3PInst<string OpName, VOPProfile P, bit UseTiedOutput = 0,
23                     SDPatternOperator node = null_frag> :
24  VOP3P_Pseudo<OpName, P> {
25  // These operands are only sort of f16 operands. Depending on
26  // op_sel_hi, these may be interpreted as f32. The inline immediate
27  // values are really f16 converted to f32, so we treat these as f16
28  // operands.
29  let InOperandList =
30    !con(
31      !con(
32        (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0,
33             FP16InputMods:$src1_modifiers, VCSrc_f16:$src1,
34             FP16InputMods:$src2_modifiers, VCSrc_f16:$src2),
35         // FIXME: clampmod0 misbehaves with the non-default vdst_in
36         // following it. For now workaround this by requiring clamp
37         // in tied patterns. This should use undef_tied_input, but it
38         // seems underdeveloped and doesn't apply the right register
39         // class constraints.
40         !if(UseTiedOutput, (ins clampmod:$clamp, VGPR_32:$vdst_in),
41                            (ins clampmod0:$clamp))),
42         (ins op_sel:$op_sel, op_sel_hi:$op_sel_hi));
43
44  let Constraints = !if(UseTiedOutput, "$vdst = $vdst_in", "");
45  let DisableEncoding = !if(UseTiedOutput, "$vdst_in", "");
46  let AsmOperands =
47    " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp";
48}
49
50let isCommutable = 1 in {
51def V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>;
52def V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>;
53
54let FPDPRounding = 1 in {
55def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>;
56def V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>;
57def V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, any_fmul>;
58} // End FPDPRounding = 1
59def V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, fmaxnum_like>;
60def V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, fminnum_like>;
61
62def V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, add>;
63def V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>>;
64def V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, mul>;
65
66def V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, smin>;
67def V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, umin>;
68def V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, smax>;
69def V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, umax>;
70}
71
72def V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>>;
73def V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, sub>;
74
75def V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, lshl_rev>;
76def V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, ashr_rev>;
77def V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3_Profile<VOP_V2I16_V2I16_V2I16>, lshr_rev>;
78
79
80// Undo sub x, c -> add x, -c canonicalization since c is more likely
81// an inline immediate than -c.
82// The constant will be emitted as a mov, and folded later.
83// TODO: We could directly encode the immediate now
84def : GCNPat<
85  (add (v2i16 (VOP3PMods v2i16:$src0, i32:$src0_modifiers)), NegSubInlineConstV216:$src1),
86  (V_PK_SUB_U16 $src0_modifiers, $src0, SRCMODS.OP_SEL_1, NegSubInlineConstV216:$src1)
87>;
88
89multiclass MadFmaMixPats<SDPatternOperator fma_like,
90                         Instruction mix_inst,
91                         Instruction mixlo_inst,
92                         Instruction mixhi_inst> {
93  def : GCNPat <
94    (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
95                            (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
96                            (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))),
97    (mixlo_inst $src0_modifiers, $src0,
98                $src1_modifiers, $src1,
99                $src2_modifiers, $src2,
100                DSTCLAMP.NONE,
101                (i32 (IMPLICIT_DEF)))
102  >;
103
104  // FIXME: Special case handling for maxhi (especially for clamp)
105  // because dealing with the write to high half of the register is
106  // difficult.
107  def : GCNPat <
108    (build_vector f16:$elt0, (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
109                                                (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
110                                                (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))),
111    (v2f16 (mixhi_inst $src0_modifiers, $src0,
112                       $src1_modifiers, $src1,
113                       $src2_modifiers, $src2,
114                       DSTCLAMP.NONE,
115                       $elt0))
116  >;
117
118  def : GCNPat <
119    (build_vector
120      f16:$elt0,
121      (AMDGPUclamp (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)),
122                                      (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)),
123                                      (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))),
124    (v2f16 (mixhi_inst $src0_modifiers, $src0,
125                       $src1_modifiers, $src1,
126                       $src2_modifiers, $src2,
127                       DSTCLAMP.ENABLE,
128                       $elt0))
129  >;
130
131  def : GCNPat <
132    (AMDGPUclamp (build_vector
133      (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)),
134                         (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)),
135                         (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers)))),
136      (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)),
137                         (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)),
138                         (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers)))))),
139    (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0,
140                       $hi_src1_modifiers, $hi_src1,
141                       $hi_src2_modifiers, $hi_src2,
142                       DSTCLAMP.ENABLE,
143                       (mixlo_inst $lo_src0_modifiers, $lo_src0,
144                                   $lo_src1_modifiers, $lo_src1,
145                                   $lo_src2_modifiers, $lo_src2,
146                                   DSTCLAMP.ENABLE,
147                                   (i32 (IMPLICIT_DEF)))))
148  >;
149}
150
151let SubtargetPredicate = HasMadMixInsts in {
152
153// These are VOP3a-like opcodes which accept no omod.
154// Size of src arguments (16/32) is controlled by op_sel.
155// For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi.
156let isCommutable = 1, mayRaiseFPException = 0 in {
157def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>;
158
159let FPDPRounding = 1 in {
160// Clamp modifier is applied after conversion to f16.
161def V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>;
162
163let ClampLo = 0, ClampHi = 1 in {
164def V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>;
165}
166} // End FPDPRounding = 1
167}
168
169defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>;
170} // End SubtargetPredicate = HasMadMixInsts
171
172
173// Essentially the same as the mad_mix versions
174let SubtargetPredicate = HasFmaMixInsts in {
175let isCommutable = 1 in {
176def V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>;
177
178let FPDPRounding = 1 in {
179// Clamp modifier is applied after conversion to f16.
180def V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>;
181
182let ClampLo = 0, ClampHi = 1 in {
183def V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, 1>;
184}
185} // End FPDPRounding = 1
186}
187
188defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>;
189}
190
191// Defines patterns that extract signed 4bit from each Idx[0].
192foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in
193  def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src),
194                                          (sra (shl node:$src, (i32 Idx[1])), (i32 28))>;
195
196// Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex.
197class Extract<int FromBitIndex, int BitMask, bit U>: PatFrag<
198  (ops node:$src),
199  !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element
200       !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))),
201       !if (!eq (FromBitIndex, 0), // first element
202            !if (U, (and node:$src, (i32 BitMask)),
203                 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src),
204                                         (sext_inreg node:$src, i8))),
205            !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)),
206                 !if (!eq (BitMask, 15), (!cast<PatFrag>("ExtractSigned4bit_"#FromBitIndex) node:$src),
207                      (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>;
208
209
210foreach Type = ["I", "U"] in
211  foreach Index = 0-3 in {
212    // Defines patterns that extract each Index'ed 8bit from an unsigned
213    // 32bit scalar value;
214    def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !if (!eq (Type, "U"), 1, 0)>;
215
216    // Defines multiplication patterns where the multiplication is happening on each
217    // Index'ed 8bit of a 32bit scalar value.
218
219    def Mul#Type#_Elt#Index : PatFrag<
220      (ops node:$src0, node:$src1),
221      (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse))
222                            (!cast<Extract>(Type#Index#"_8bit") node:$src0),
223                            (!cast<Extract>(Type#Index#"_8bit") node:$src1))>;
224  }
225
226// Different variants of dot8 patterns cause a huge increase in the compile time.
227// Define non-associative/commutative add/mul to prevent permutation in the dot8
228// pattern.
229def NonACAdd        : SDNode<"ISD::ADD"       , SDTIntBinOp>;
230def NonACAdd_oneuse : HasOneUseBinOp<NonACAdd>;
231
232def NonACAMDGPUmul_u24        : SDNode<"AMDGPUISD::MUL_U24"       , SDTIntBinOp>;
233def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_u24>;
234
235def NonACAMDGPUmul_i24        : SDNode<"AMDGPUISD::MUL_I24"       , SDTIntBinOp>;
236def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp<NonACAMDGPUmul_i24>;
237
238foreach Type = ["I", "U"] in
239  foreach Index = 0-7 in {
240    // Defines patterns that extract each Index'ed 4bit from an unsigned
241    // 32bit scalar value;
242    def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !if (!eq (Type, "U"), 1, 0)>;
243
244    // Defines multiplication patterns where the multiplication is happening on each
245    // Index'ed 8bit of a 32bit scalar value.
246    def Mul#Type#Index#"_4bit" : PatFrag<
247      (ops node:$src0, node:$src1),
248      (!cast<HasOneUseBinOp>(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse))
249                             (!cast<Extract>(Type#Index#"_4bit") node:$src0),
250                             (!cast<Extract>(Type#Index#"_4bit") node:$src1))>;
251  }
252
253class UDot2Pat<Instruction Inst> : GCNPat <
254  (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
255                                         (srl i32:$src1, (i32 16))), i32:$src2),
256       (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
257                             (and i32:$src1, (i32 65535)))
258   ),
259  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> {
260  let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate;
261}
262
263class SDot2Pat<Instruction Inst> : GCNPat <
264  (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
265                                         (sra i32:$src1, (i32 16))), i32:$src2),
266       (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
267                             (sext_inreg i32:$src1, i16))),
268  (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> {
269  let SubtargetPredicate = !cast<VOP_Pseudo>(Inst).SubtargetPredicate;
270}
271
272let IsDOT = 1 in {
273let SubtargetPredicate = HasDot2Insts in {
274
275def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16",
276  VOP3_Profile<VOP_F32_V2F16_V2F16_F32>,
277  AMDGPUfdot2, 1/*ExplicitClamp*/>;
278def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16",
279  VOP3_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_sdot2, 1>;
280def V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16",
281  VOP3_Profile<VOP_I32_V2I16_V2I16_I32>, int_amdgcn_udot2, 1>;
282def V_DOT4_U32_U8  : VOP3PInst<"v_dot4_u32_u8",
283  VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
284def V_DOT8_U32_U4  : VOP3PInst<"v_dot8_u32_u4",
285  VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>;
286
287} // End SubtargetPredicate = HasDot2Insts
288
289let SubtargetPredicate = HasDot1Insts in {
290
291def V_DOT4_I32_I8  : VOP3PInst<"v_dot4_i32_i8",
292  VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
293def V_DOT8_I32_I4  : VOP3PInst<"v_dot8_i32_i4",
294  VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>;
295
296} // End SubtargetPredicate = HasDot1Insts
297} // End let IsDOT = 1
298
299def : UDot2Pat<V_DOT2_U32_U16>;
300def : SDot2Pat<V_DOT2_I32_I16>;
301
302foreach Type = ["U", "I"] in
303  let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT4_"#Type#"32_"#Type#8).SubtargetPredicate in
304  def : GCNPat <
305    !cast<dag>(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y,
306                      (add_oneuse lhs, (!cast<PatFrag>("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))),
307    (!cast<VOP3PInst>("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
308
309foreach Type = ["U", "I"] in
310  let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in
311  def : GCNPat <
312    !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)),
313                      [1, 2, 3, 4, 5, 6, 7], lhs, y,
314                      (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))),
315    (!cast<VOP3PInst>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
316
317// Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase
318// in the compile time. Directly handle the pattern generated by the FE here.
319foreach Type = ["U", "I"] in
320  let SubtargetPredicate = !cast<VOP_Pseudo>("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in
321  def : GCNPat <
322    !cast<dag>(!foldl((add_oneuse i32:$src2, (!cast<PatFrag>("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)),
323                      [7, 1, 2, 3, 4, 5, 6], lhs, y,
324                      (NonACAdd_oneuse lhs, (!cast<PatFrag>("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))),
325    (!cast<VOP3PInst>("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>;
326
327def ADst_32   : VOPDstOperand<AGPR_32>;
328def ADst_128  : VOPDstOperand<AReg_128>;
329def ADst_512  : VOPDstOperand<AReg_512>;
330def ADst_1024 : VOPDstOperand<AReg_1024>;
331
332def VOPProfileAccRead : VOP3_Profile<VOP_I32_I32, VOP3_MAI> {
333  let Src0RC64 = ARegSrc_32;
334}
335
336def VOPProfileAccWrite : VOP3_Profile<VOP_I32_I32, VOP3_MAI> {
337  let DstRC = ADst_32;
338  let Src0RC64 = VISrc_b32;
339}
340
341class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC,
342                    RegisterOperand SrcABRC = AVSrc_32>
343  : VOP3_Profile<P, VOP3_MAI> {
344  let DstRC = _DstRC;
345  let Src0RC64 = SrcABRC;
346  let Src1RC64 = SrcABRC;
347  let Src2RC64 = _SrcRC;
348  let HasOpSel = 0;
349  let HasClamp = 0;
350  let HasModifiers = 0;
351  let Asm64 = " $vdst, $src0, $src1, $src2$cbsz$abid$blgp";
352  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp);
353}
354
355def VOPProfileMAI_F32_F32_X4    : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32,       AISrc_128_f32,  ADst_128>;
356def VOPProfileMAI_F32_F32_X16   : VOPProfileMAI<VOP_V16F32_F32_F32_V16F32,     AISrc_512_f32,  ADst_512>;
357def VOPProfileMAI_F32_F32_X32   : VOPProfileMAI<VOP_V32F32_F32_F32_V32F32,     AISrc_1024_f32, ADst_1024>;
358def VOPProfileMAI_I32_I32_X4    : VOPProfileMAI<VOP_V4I32_I32_I32_V4I32,       AISrc_128_b32,  ADst_128>;
359def VOPProfileMAI_I32_I32_X16   : VOPProfileMAI<VOP_V16I32_I32_I32_V16I32,     AISrc_512_b32,  ADst_512>;
360def VOPProfileMAI_I32_I32_X32   : VOPProfileMAI<VOP_V32I32_I32_I32_V32I32,     AISrc_1024_b32, ADst_1024>;
361def VOPProfileMAI_F32_V2I16_X4  : VOPProfileMAI<VOP_V4F32_V2I16_V2I16_V4F32,   AISrc_128_b32,  ADst_128>;
362def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI<VOP_V16F32_V2I16_V2I16_V16F32, AISrc_512_b32,  ADst_512>;
363def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI<VOP_V32F32_V2I16_V2I16_V32F32, AISrc_1024_b32, ADst_1024>;
364def VOPProfileMAI_F32_V4F16_X4  : VOPProfileMAI<VOP_V4F32_V4F16_V4F16_V4F32,   AISrc_128_b32,  ADst_128,  AVSrc_64>;
365def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI<VOP_V16F32_V4F16_V4F16_V16F32, AISrc_512_b32,  ADst_512,  AVSrc_64>;
366def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI<VOP_V32F32_V4F16_V4F16_V32F32, AISrc_1024_b32, ADst_1024, AVSrc_64>;
367
368let Predicates = [HasMAIInsts] in {
369
370let isAsCheapAsAMove = 1, isReMaterializable = 1 in {
371def V_ACCVGPR_READ_B32  : VOP3Inst<"v_accvgpr_read_b32",  VOPProfileAccRead>;
372def V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite> {
373  let isMoveImm = 1;
374}
375}
376
377// FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported.
378let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in {
379def V_MFMA_F32_4X4X1F32    : VOP3Inst<"v_mfma_f32_4x4x1f32",    VOPProfileMAI_F32_F32_X4,    int_amdgcn_mfma_f32_4x4x1f32>;
380def V_MFMA_F32_4X4X4F16    : VOP3Inst<"v_mfma_f32_4x4x4f16",    VOPProfileMAI_F32_V4F16_X4,  int_amdgcn_mfma_f32_4x4x4f16>;
381def V_MFMA_I32_4X4X4I8     : VOP3Inst<"v_mfma_i32_4x4x4i8",     VOPProfileMAI_I32_I32_X4,    int_amdgcn_mfma_i32_4x4x4i8>;
382def V_MFMA_F32_4X4X2BF16   : VOP3Inst<"v_mfma_f32_4x4x2bf16",   VOPProfileMAI_F32_V2I16_X4,  int_amdgcn_mfma_f32_4x4x2bf16>;
383def V_MFMA_F32_16X16X1F32  : VOP3Inst<"v_mfma_f32_16x16x1f32",  VOPProfileMAI_F32_F32_X16,   int_amdgcn_mfma_f32_16x16x1f32>;
384def V_MFMA_F32_16X16X4F32  : VOP3Inst<"v_mfma_f32_16x16x4f32",  VOPProfileMAI_F32_F32_X4,    int_amdgcn_mfma_f32_16x16x4f32>;
385def V_MFMA_F32_16X16X4F16  : VOP3Inst<"v_mfma_f32_16x16x4f16",  VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_16x16x4f16>;
386def V_MFMA_F32_16X16X16F16 : VOP3Inst<"v_mfma_f32_16x16x16f16", VOPProfileMAI_F32_V4F16_X4,  int_amdgcn_mfma_f32_16x16x16f16>;
387def V_MFMA_I32_16X16X4I8   : VOP3Inst<"v_mfma_i32_16x16x4i8",   VOPProfileMAI_I32_I32_X16,   int_amdgcn_mfma_i32_16x16x4i8>;
388def V_MFMA_I32_16X16X16I8  : VOP3Inst<"v_mfma_i32_16x16x16i8",  VOPProfileMAI_I32_I32_X4,    int_amdgcn_mfma_i32_16x16x16i8>;
389def V_MFMA_F32_16X16X2BF16 : VOP3Inst<"v_mfma_f32_16x16x2bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_16x16x2bf16>;
390def V_MFMA_F32_16X16X8BF16 : VOP3Inst<"v_mfma_f32_16x16x8bf16", VOPProfileMAI_F32_V2I16_X4,  int_amdgcn_mfma_f32_16x16x8bf16>;
391def V_MFMA_F32_32X32X1F32  : VOP3Inst<"v_mfma_f32_32x32x1f32",  VOPProfileMAI_F32_F32_X32,   int_amdgcn_mfma_f32_32x32x1f32>;
392def V_MFMA_F32_32X32X2F32  : VOP3Inst<"v_mfma_f32_32x32x2f32",  VOPProfileMAI_F32_F32_X16,   int_amdgcn_mfma_f32_32x32x2f32>;
393def V_MFMA_F32_32X32X4F16  : VOP3Inst<"v_mfma_f32_32x32x4f16",  VOPProfileMAI_F32_V4F16_X32, int_amdgcn_mfma_f32_32x32x4f16>;
394def V_MFMA_F32_32X32X8F16  : VOP3Inst<"v_mfma_f32_32x32x8f16",  VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_32x32x8f16>;
395def V_MFMA_I32_32X32X4I8   : VOP3Inst<"v_mfma_i32_32x32x4i8",   VOPProfileMAI_I32_I32_X32,   int_amdgcn_mfma_i32_32x32x4i8>;
396def V_MFMA_I32_32X32X8I8   : VOP3Inst<"v_mfma_i32_32x32x8i8",   VOPProfileMAI_I32_I32_X16,   int_amdgcn_mfma_i32_32x32x8i8>;
397def V_MFMA_F32_32X32X2BF16 : VOP3Inst<"v_mfma_f32_32x32x2bf16", VOPProfileMAI_F32_V2I16_X32, int_amdgcn_mfma_f32_32x32x2bf16>;
398def V_MFMA_F32_32X32X4BF16 : VOP3Inst<"v_mfma_f32_32x32x4bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_32x32x4bf16>;
399} // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1
400
401} // End SubtargetPredicate = HasMAIInsts
402
403def : MnemonicAlias<"v_accvgpr_read",  "v_accvgpr_read_b32">;
404def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">;
405
406multiclass VOP3P_Real_vi<bits<10> op> {
407  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
408            VOP3Pe <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
409    let AssemblerPredicate = HasVOP3PInsts;
410    let DecoderNamespace = "GFX8";
411  }
412}
413
414multiclass VOP3P_Real_MAI<bits<10> op> {
415  def _vi : VOP3P_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
416            VOP3Pe_MAI <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
417    let AssemblerPredicate = HasMAIInsts;
418    let DecoderNamespace = "GFX8";
419  }
420}
421
422defm V_PK_MAD_I16 : VOP3P_Real_vi <0x380>;
423defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x381>;
424defm V_PK_ADD_I16 : VOP3P_Real_vi <0x382>;
425defm V_PK_SUB_I16 : VOP3P_Real_vi <0x383>;
426defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x384>;
427defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x385>;
428defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x386>;
429defm V_PK_MAX_I16 : VOP3P_Real_vi <0x387>;
430defm V_PK_MIN_I16 : VOP3P_Real_vi <0x388>;
431defm V_PK_MAD_U16 : VOP3P_Real_vi <0x389>;
432
433defm V_PK_ADD_U16 : VOP3P_Real_vi <0x38a>;
434defm V_PK_SUB_U16 : VOP3P_Real_vi <0x38b>;
435defm V_PK_MAX_U16 : VOP3P_Real_vi <0x38c>;
436defm V_PK_MIN_U16 : VOP3P_Real_vi <0x38d>;
437defm V_PK_FMA_F16 : VOP3P_Real_vi <0x38e>;
438defm V_PK_ADD_F16 : VOP3P_Real_vi <0x38f>;
439defm V_PK_MUL_F16 : VOP3P_Real_vi <0x390>;
440defm V_PK_MIN_F16 : VOP3P_Real_vi <0x391>;
441defm V_PK_MAX_F16 : VOP3P_Real_vi <0x392>;
442
443
444let SubtargetPredicate = HasMadMixInsts in {
445defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x3a0>;
446defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x3a1>;
447defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x3a2>;
448}
449
450let SubtargetPredicate = HasFmaMixInsts in {
451let DecoderNamespace = "GFX9_DL" in {
452// The mad_mix instructions were renamed and their behaviors changed,
453// but the opcode stayed the same so we need to put these in a
454// different DecoderNamespace to avoid the ambiguity.
455defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x3a0>;
456defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x3a1>;
457defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x3a2>;
458}
459}
460
461
462let SubtargetPredicate = HasDot2Insts in {
463
464defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x3a3>;
465defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x3a6>;
466defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x3a7>;
467defm V_DOT4_U32_U8  : VOP3P_Real_vi <0x3a9>;
468defm V_DOT8_U32_U4  : VOP3P_Real_vi <0x3ab>;
469
470} // End SubtargetPredicate = HasDot2Insts
471
472let SubtargetPredicate = HasDot1Insts in {
473
474defm V_DOT4_I32_I8  : VOP3P_Real_vi <0x3a8>;
475defm V_DOT8_I32_I4  : VOP3P_Real_vi <0x3aa>;
476
477} // End SubtargetPredicate = HasDot1Insts
478
479let SubtargetPredicate = HasMAIInsts in {
480
481defm V_ACCVGPR_READ_B32  : VOP3P_Real_MAI <0x3d8>;
482defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x3d9>;
483defm V_MFMA_F32_32X32X1F32  : VOP3P_Real_MAI <0x3c0>;
484defm V_MFMA_F32_16X16X1F32  : VOP3P_Real_MAI <0x3c1>;
485defm V_MFMA_F32_4X4X1F32    : VOP3P_Real_MAI <0x3c2>;
486defm V_MFMA_F32_32X32X2F32  : VOP3P_Real_MAI <0x3c4>;
487defm V_MFMA_F32_16X16X4F32  : VOP3P_Real_MAI <0x3c5>;
488defm V_MFMA_F32_32X32X4F16  : VOP3P_Real_MAI <0x3c8>;
489defm V_MFMA_F32_16X16X4F16  : VOP3P_Real_MAI <0x3c9>;
490defm V_MFMA_F32_4X4X4F16    : VOP3P_Real_MAI <0x3ca>;
491defm V_MFMA_F32_32X32X8F16  : VOP3P_Real_MAI <0x3cc>;
492defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MAI <0x3cd>;
493defm V_MFMA_I32_32X32X4I8   : VOP3P_Real_MAI <0x3d0>;
494defm V_MFMA_I32_16X16X4I8   : VOP3P_Real_MAI <0x3d1>;
495defm V_MFMA_I32_4X4X4I8     : VOP3P_Real_MAI <0x3d2>;
496defm V_MFMA_I32_32X32X8I8   : VOP3P_Real_MAI <0x3d4>;
497defm V_MFMA_I32_16X16X16I8  : VOP3P_Real_MAI <0x3d5>;
498defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MAI <0x3e8>;
499defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MAI <0x3e9>;
500defm V_MFMA_F32_4X4X2BF16   : VOP3P_Real_MAI <0x3eb>;
501defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MAI <0x3ec>;
502defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MAI <0x3ed>;
503
504} // End SubtargetPredicate = HasMAIInsts
505
506//===----------------------------------------------------------------------===//
507// GFX10.
508//===----------------------------------------------------------------------===//
509
510let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in {
511  multiclass VOP3P_Real_gfx10<bits<10> op> {
512    def _gfx10 : VOP3P_Real<!cast<VOP3P_Pseudo>(NAME), SIEncodingFamily.GFX10>,
513                 VOP3Pe_gfx10 <op, !cast<VOP3P_Pseudo>(NAME).Pfl>;
514  }
515} // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10"
516
517defm V_PK_MAD_I16     : VOP3P_Real_gfx10<0x000>;
518defm V_PK_MUL_LO_U16  : VOP3P_Real_gfx10<0x001>;
519defm V_PK_ADD_I16     : VOP3P_Real_gfx10<0x002>;
520defm V_PK_SUB_I16     : VOP3P_Real_gfx10<0x003>;
521defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10<0x004>;
522defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10<0x005>;
523defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10<0x006>;
524defm V_PK_MAX_I16     : VOP3P_Real_gfx10<0x007>;
525defm V_PK_MIN_I16     : VOP3P_Real_gfx10<0x008>;
526defm V_PK_MAD_U16     : VOP3P_Real_gfx10<0x009>;
527defm V_PK_ADD_U16     : VOP3P_Real_gfx10<0x00a>;
528defm V_PK_SUB_U16     : VOP3P_Real_gfx10<0x00b>;
529defm V_PK_MAX_U16     : VOP3P_Real_gfx10<0x00c>;
530defm V_PK_MIN_U16     : VOP3P_Real_gfx10<0x00d>;
531defm V_PK_FMA_F16     : VOP3P_Real_gfx10<0x00e>;
532defm V_PK_ADD_F16     : VOP3P_Real_gfx10<0x00f>;
533defm V_PK_MUL_F16     : VOP3P_Real_gfx10<0x010>;
534defm V_PK_MIN_F16     : VOP3P_Real_gfx10<0x011>;
535defm V_PK_MAX_F16     : VOP3P_Real_gfx10<0x012>;
536defm V_FMA_MIX_F32    : VOP3P_Real_gfx10<0x020>;
537defm V_FMA_MIXLO_F16  : VOP3P_Real_gfx10<0x021>;
538defm V_FMA_MIXHI_F16  : VOP3P_Real_gfx10<0x022>;
539
540let SubtargetPredicate = HasDot2Insts in {
541
542defm V_DOT2_F32_F16 : VOP3P_Real_gfx10 <0x013>;
543defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x014>;
544defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x015>;
545defm V_DOT4_U32_U8  : VOP3P_Real_gfx10 <0x017>;
546defm V_DOT8_U32_U4  : VOP3P_Real_gfx10 <0x019>;
547
548} // End SubtargetPredicate = HasDot2Insts
549
550let SubtargetPredicate = HasDot1Insts in {
551
552defm V_DOT4_I32_I8  : VOP3P_Real_gfx10 <0x016>;
553defm V_DOT8_I32_I4  : VOP3P_Real_gfx10 <0x018>;
554
555} // End SubtargetPredicate = HasDot1Insts
556