1//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines the builtins for RISC-V V-extension. See: 10// 11// https://github.com/riscv/rvv-intrinsic-doc 12// 13//===----------------------------------------------------------------------===// 14 15include "riscv_vector_common.td" 16 17defvar TypeList = ["c","s","i","l","x","f","d"]; 18defvar EEWList = [["8", "(Log2EEW:3)"], 19 ["16", "(Log2EEW:4)"], 20 ["32", "(Log2EEW:5)"], 21 ["64", "(Log2EEW:6)"]]; 22 23class IsFloat<string type> { 24 bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d")); 25} 26 27let SupportOverloading = false, 28 MaskedPolicyScheme = NonePolicy in { 29 class RVVVLEMaskBuiltin : RVVOutBuiltin<"m", "mPCUe", "c"> { 30 let Name = "vlm_v"; 31 let IRName = "vlm"; 32 let HasMasked = false; 33 } 34} 35 36let SupportOverloading = false, 37 UnMaskedPolicyScheme = HasPassthruOperand in { 38 multiclass RVVVLEBuiltin<list<string> types> { 39 let Name = NAME # "_v", 40 IRName = "vle", 41 MaskedIRName ="vle_mask" in { 42 foreach type = types in { 43 def : RVVOutBuiltin<"v", "vPCe", type>; 44 if !not(IsFloat<type>.val) then { 45 def : RVVOutBuiltin<"Uv", "UvPCUe", type>; 46 } 47 } 48 } 49 } 50} 51 52multiclass RVVVLEFFBuiltin<list<string> types> { 53 let Name = NAME # "_v", 54 IRName = "vleff", 55 MaskedIRName = "vleff_mask", 56 SupportOverloading = false, 57 UnMaskedPolicyScheme = HasPassthruOperand, 58 ManualCodegen = [{ 59 { 60 if (IsMasked) { 61 // Move mask to right before vl. 62 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 63 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 64 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 65 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 66 IntrinsicTypes = {ResultType, Ops[4]->getType()}; 67 } else { 68 if (PolicyAttrs & RVV_VTA) 69 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 70 IntrinsicTypes = {ResultType, Ops[3]->getType()}; 71 } 72 Value *NewVL = Ops[2]; 73 Ops.erase(Ops.begin() + 2); 74 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 75 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); 76 llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); 77 // Store new_vl. 78 clang::CharUnits Align; 79 if (IsMasked) 80 Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(E->getNumArgs()-2)->getType()); 81 else 82 Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType()); 83 llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); 84 Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); 85 return V; 86 } 87 }] in { 88 foreach type = types in { 89 def : RVVBuiltin<"v", "vPCePz", type>; 90 // Skip floating types for unsigned versions. 91 if !not(IsFloat<type>.val) then { 92 def : RVVBuiltin<"Uv", "UvPCUePz", type>; 93 } 94 } 95 } 96} 97 98multiclass RVVVLSEBuiltin<list<string> types> { 99 let Name = NAME # "_v", 100 IRName = "vlse", 101 MaskedIRName ="vlse_mask", 102 SupportOverloading = false, 103 UnMaskedPolicyScheme = HasPassthruOperand in { 104 foreach type = types in { 105 def : RVVOutBuiltin<"v", "vPCet", type>; 106 if !not(IsFloat<type>.val) then { 107 def : RVVOutBuiltin<"Uv", "UvPCUet", type>; 108 } 109 } 110 } 111} 112 113multiclass RVVIndexedLoad<string op> { 114 let UnMaskedPolicyScheme = HasPassthruOperand in { 115 foreach type = TypeList in { 116 foreach eew_list = EEWList[0-2] in { 117 defvar eew = eew_list[0]; 118 defvar eew_type = eew_list[1]; 119 let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", 120 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 121 []<string>) in { 122 def: RVVOutOp1Builtin<"v", "vPCe" # eew_type # "Uv", type>; 123 if !not(IsFloat<type>.val) then { 124 def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew_type # "Uv", type>; 125 } 126 } 127 } 128 defvar eew64 = "64"; 129 defvar eew64_type = "(Log2EEW:6)"; 130 let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", 131 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"], 132 ["RV64"]) in { 133 def: RVVOutOp1Builtin<"v", "vPCe" # eew64_type # "Uv", type>; 134 if !not(IsFloat<type>.val) then { 135 def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew64_type # "Uv", type>; 136 } 137 } 138 } 139 } 140} 141 142let HasMaskedOffOperand = false, 143 MaskedPolicyScheme = NonePolicy, 144 ManualCodegen = [{ 145 if (IsMasked) { 146 // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) 147 std::swap(Ops[0], Ops[2]); 148 } else { 149 // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) 150 std::swap(Ops[0], Ops[1]); 151 } 152 if (IsMasked) 153 IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; 154 else 155 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; 156 }] in { 157 class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> { 158 let Name = "vsm_v"; 159 let IRName = "vsm"; 160 let HasMasked = false; 161 } 162 multiclass RVVVSEBuiltin<list<string> types> { 163 let Name = NAME # "_v", 164 IRName = "vse", 165 MaskedIRName = "vse_mask" in { 166 foreach type = types in { 167 def : RVVBuiltin<"v", "0Pev", type>; 168 if !not(IsFloat<type>.val) then { 169 def : RVVBuiltin<"Uv", "0PUeUv", type>; 170 } 171 } 172 } 173 } 174} 175 176multiclass RVVVSSEBuiltin<list<string> types> { 177 let Name = NAME # "_v", 178 IRName = "vsse", 179 MaskedIRName = "vsse_mask", 180 HasMaskedOffOperand = false, 181 MaskedPolicyScheme = NonePolicy, 182 ManualCodegen = [{ 183 if (IsMasked) { 184 // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl) 185 std::swap(Ops[0], Ops[3]); 186 } else { 187 // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) 188 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); 189 } 190 if (IsMasked) 191 IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()}; 192 else 193 IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; 194 }] in { 195 foreach type = types in { 196 def : RVVBuiltin<"v", "0Petv", type>; 197 if !not(IsFloat<type>.val) then { 198 def : RVVBuiltin<"Uv", "0PUetUv", type>; 199 } 200 } 201 } 202} 203 204multiclass RVVIndexedStore<string op> { 205 let HasMaskedOffOperand = false, 206 MaskedPolicyScheme = NonePolicy, 207 ManualCodegen = [{ 208 if (IsMasked) { 209 // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl) 210 std::swap(Ops[0], Ops[3]); 211 } else { 212 // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) 213 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); 214 } 215 if (IsMasked) 216 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()}; 217 else 218 IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()}; 219 }] in { 220 foreach type = TypeList in { 221 foreach eew_list = EEWList[0-2] in { 222 defvar eew = eew_list[0]; 223 defvar eew_type = eew_list[1]; 224 let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", 225 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 226 []<string>) in { 227 def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>; 228 if !not(IsFloat<type>.val) then { 229 def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>; 230 } 231 } 232 } 233 defvar eew64 = "64"; 234 defvar eew64_type = "(Log2EEW:6)"; 235 let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", 236 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"], 237 ["RV64"]) in { 238 def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; 239 if !not(IsFloat<type>.val) then { 240 def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>; 241 } 242 } 243 } 244 } 245} 246 247defvar NFList = [2, 3, 4, 5, 6, 7, 8]; 248/* 249A segment load builtin has different variants. 250 251Therefore a segment unit-stride load builtin can have 4 variants, 2521. When unmasked and the policies are all specified as agnostic: 253(Address0, ..., Address{NF - 1}, Ptr, VL) 2542. When masked and the policies are all specified as agnostic: 255(Address0, ..., Address{NF - 1}, Mask, Ptr, VL) 2563. When unmasked and one of the policies is specified as undisturbed: 257(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 258 Ptr, VL) 2594. When masked and one of the policies is specified as undisturbed: 260(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 261 Ptr, VL) 262 263Other variants of segment load builtin share the same structure, but they 264have their own extra parameter. 265 266The segment unit-stride fault-only-first load builtin has a 'NewVL' 267operand after the 'Ptr' operand. 2681. When unmasked and the policies are all specified as agnostic: 269(Address0, ..., Address{NF - 1}, Ptr, NewVL, VL) 2702. When masked and the policies are all specified as agnostic: 271(Address0, ..., Address{NF - 1}, Mask, Ptr, NewVL, VL) 2723. When unmasked and one of the policies is specified as undisturbed: 273(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 274 Ptr, NewVL, VL) 2754. When masked and one of the policies is specified as undisturbed: 276(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 277 Ptr, NewVL, VL) 278 279The segment strided load builtin has a 'Stride' operand after the 'Ptr' 280operand. 2811. When unmasked and the policies are all specified as agnostic: 282(Address0, ..., Address{NF - 1}, Ptr, Stride, VL) 2832. When masked and the policies are all specified as agnostic: 284(Address0, ..., Address{NF - 1}, Mask, Ptr, Stride, VL) 2853. When unmasked and one of the policies is specified as undisturbed: 286(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 287 Ptr, Stride, VL) 2884. When masked and one of the policies is specified as undisturbed: 289(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 290 Ptr, Stride, VL) 291 292The segment indexed load builtin has a 'Idx' operand after the 'Ptr' operand. 2931. When unmasked and the policies are all specified as agnostic: 294(Address0, ..., Address{NF - 1}, Ptr, Idx, VL) 2952. When masked and the policies are all specified as agnostic: 296(Address0, ..., Address{NF - 1}, Mask, Ptr, Idx, VL) 2973. When unmasked and one of the policies is specified as undisturbed: 298(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1}, 299 Ptr, Idx, VL) 3004. When masked and one of the policies is specified as undisturbed: 301(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1}, 302 Ptr, Idx, VL) 303 304Segment load intrinsics has different variants similar to their builtins. 305 306Segment unit-stride load intrinsic, 307 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy) 308 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL) 309Segment unit-stride fault-only-first load intrinsic, 310 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy) 311 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL) 312Segment strided load intrinsic, 313 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, Mask, VL, Policy) 314 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, VL) 315Segment indexed load intrinsic, 316 Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, Mask, VL, Policy) 317 Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, VL) 318 319The Vector(s) is poison when the policy behavior allows us to not care 320about any masked-off elements. 321*/ 322 323class PVString<int nf, bit signed> { 324 string S = 325 !cond(!eq(nf, 2): !if(signed, "PvPv", "PUvPUv"), 326 !eq(nf, 3): !if(signed, "PvPvPv", "PUvPUvPUv"), 327 !eq(nf, 4): !if(signed, "PvPvPvPv", "PUvPUvPUvPUv"), 328 !eq(nf, 5): !if(signed, "PvPvPvPvPv", "PUvPUvPUvPUvPUv"), 329 !eq(nf, 6): !if(signed, "PvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUv"), 330 !eq(nf, 7): !if(signed, "PvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUv"), 331 !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv")); 332} 333 334class VString<int nf, bit signed> { 335 string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"), 336 !eq(nf, 3): !if(signed, "vvv", "UvUvUv"), 337 !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"), 338 !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"), 339 !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"), 340 !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"), 341 !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv")); 342} 343 344 345class FixedVString<int fixed_lmul, int num, string vec> { 346 string V = "(LFixedLog2LMUL:" # fixed_lmul # ")" # vec; 347 string S = !interleave(!listsplat(V, num), ""); 348} 349 350multiclass RVVNonTupleVCreateBuiltin<int dst_lmul, list<int> src_lmul_list> { 351 defvar dst_v = FixedVString<dst_lmul, 1, "v">.V; 352 defvar dst_uv = FixedVString<dst_lmul, 1, "Uv">.V; 353 foreach src_lmul = src_lmul_list in { 354 defvar num = !shl(1, !sub(dst_lmul, src_lmul)); 355 356 defvar src_v = FixedVString<src_lmul, num, "v">.V; 357 defvar src_s = FixedVString<src_lmul, num, "v">.S; 358 def vcreate # src_v # dst_v : RVVBuiltin<src_v # dst_v, 359 dst_v # src_s, 360 "csilxfd", dst_v>; 361 362 defvar src_uv = FixedVString<src_lmul, num, "Uv">.V; 363 defvar src_us = FixedVString<src_lmul, num, "Uv">.S; 364 def vcreate_u # src_uv # dst_uv : RVVBuiltin<src_uv # dst_uv, 365 dst_uv # src_us, 366 "csil", dst_uv>; 367 } 368} 369 370multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> { 371 let Name = NAME, 372 IRName = IR, 373 MaskedIRName = IR # "_mask", 374 UnMaskedPolicyScheme = HasPassthruOperand, 375 ManualCodegen = [{ 376 { 377 if (IsMasked) { 378 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 379 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 380 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 381 } else { 382 if (PolicyAttrs & RVV_VTA) 383 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 384 } 385 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 386 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); 387 388 if (IsMasked) { 389 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 390 // maskedoff, op1, op2, mask, vl, policy 391 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()}; 392 } else { 393 // passthru, op1, op2, vl 394 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()}; 395 } 396 break; 397 } 398 }] in { 399 def : RVVBuiltin<"v", "vv", type_range>; 400 } 401} 402 403multiclass RVVPseudoVNotBuiltin<string IR, string type_range> { 404 let Name = NAME, 405 IRName = IR, 406 MaskedIRName = IR # "_mask", 407 UnMaskedPolicyScheme = HasPassthruOperand, 408 ManualCodegen = [{ 409 { 410 if (IsMasked) { 411 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 412 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 413 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 414 } else { 415 if (PolicyAttrs & RVV_VTA) 416 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 417 } 418 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 419 Ops.insert(Ops.begin() + 2, 420 llvm::Constant::getAllOnesValue(ElemTy)); 421 if (IsMasked) { 422 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 423 // maskedoff, op1, po2, mask, vl, policy 424 IntrinsicTypes = {ResultType, 425 ElemTy, 426 Ops[4]->getType()}; 427 } else { 428 // passthru, op1, op2, vl 429 IntrinsicTypes = {ResultType, 430 ElemTy, 431 Ops[3]->getType()}; 432 } 433 break; 434 } 435 }] in { 436 def : RVVBuiltin<"v", "vv", type_range>; 437 def : RVVBuiltin<"Uv", "UvUv", type_range>; 438 } 439} 440 441multiclass RVVPseudoMaskBuiltin<string IR, string type_range> { 442 let Name = NAME, 443 IRName = IR, 444 HasMasked = false, 445 ManualCodegen = [{ 446 { 447 // op1, vl 448 IntrinsicTypes = {ResultType, 449 Ops[1]->getType()}; 450 Ops.insert(Ops.begin() + 1, Ops[0]); 451 break; 452 } 453 }] in { 454 def : RVVBuiltin<"m", "mm", type_range>; 455 } 456} 457 458multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> { 459 let Name = NAME, 460 IRName = IR, 461 MaskedIRName = IR # "_mask", 462 UnMaskedPolicyScheme = HasPassthruOperand, 463 ManualCodegen = [{ 464 { 465 if (IsMasked) { 466 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 467 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 468 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 469 Ops.insert(Ops.begin() + 2, Ops[1]); 470 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 471 // maskedoff, op1, op2, mask, vl 472 IntrinsicTypes = {ResultType, 473 Ops[2]->getType(), 474 Ops.back()->getType()}; 475 } else { 476 if (PolicyAttrs & RVV_VTA) 477 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 478 // op1, po2, vl 479 IntrinsicTypes = {ResultType, 480 Ops[1]->getType(), Ops[2]->getType()}; 481 Ops.insert(Ops.begin() + 2, Ops[1]); 482 break; 483 } 484 break; 485 } 486 }] in { 487 def : RVVBuiltin<"v", "vv", type_range>; 488 } 489} 490 491multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range, 492 list<list<string>> suffixes_prototypes> { 493 let Name = NAME, 494 OverloadedName = MName, 495 IRName = IR, 496 MaskedIRName = IR # "_mask", 497 UnMaskedPolicyScheme = HasPassthruOperand, 498 ManualCodegen = [{ 499 { 500 if (IsMasked) { 501 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 502 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 503 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 504 } else { 505 if (PolicyAttrs & RVV_VTA) 506 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 507 } 508 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType(); 509 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); 510 if (IsMasked) { 511 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 512 // maskedoff, op1, op2, mask, vl, policy 513 IntrinsicTypes = {ResultType, 514 Ops[1]->getType(), 515 ElemTy, 516 Ops[4]->getType()}; 517 } else { 518 // passtru, op1, op2, vl 519 IntrinsicTypes = {ResultType, 520 Ops[1]->getType(), 521 ElemTy, 522 Ops[3]->getType()}; 523 } 524 break; 525 } 526 }] in { 527 foreach s_p = suffixes_prototypes in { 528 def : RVVBuiltin<s_p[0], s_p[1], type_range>; 529 } 530 } 531} 532 533multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range, 534 list<list<string>> suffixes_prototypes> { 535 let Name = NAME, 536 OverloadedName = MName, 537 IRName = IR, 538 MaskedIRName = IR # "_mask", 539 UnMaskedPolicyScheme = HasPassthruOperand, 540 ManualCodegen = [{ 541 { 542 if (IsMasked) { 543 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); 544 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) 545 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 546 } else { 547 if (PolicyAttrs & RVV_VTA) 548 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 549 } 550 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType())); 551 if (IsMasked) { 552 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 553 // maskedoff, op1, xlen, mask, vl 554 IntrinsicTypes = {ResultType, 555 Ops[1]->getType(), 556 Ops[4]->getType(), 557 Ops[4]->getType()}; 558 } else { 559 // passthru, op1, xlen, vl 560 IntrinsicTypes = {ResultType, 561 Ops[1]->getType(), 562 Ops[3]->getType(), 563 Ops[3]->getType()}; 564 } 565 break; 566 } 567 }] in { 568 foreach s_p = suffixes_prototypes in { 569 def : RVVBuiltin<s_p[0], s_p[1], type_range>; 570 } 571 } 572} 573 574let HeaderCode = 575[{ 576#define __riscv_vlenb() __builtin_rvv_vlenb() 577}] in 578def vlenb_macro: RVVHeader; 579 580let HasBuiltinAlias = false, HasVL = false, HasMasked = false, 581 UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy, 582 Log2LMUL = [0], IRName = "", 583 ManualCodegen = [{ 584 { 585 LLVMContext &Context = CGM.getLLVMContext(); 586 llvm::MDBuilder MDHelper(Context); 587 588 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "vlenb")}; 589 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); 590 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); 591 llvm::Function *F = 592 CGM.getIntrinsic(llvm::Intrinsic::read_register, {SizeTy}); 593 return Builder.CreateCall(F, Metadata); 594 } 595 }] in 596{ 597 def vlenb : RVVBuiltin<"", "u", "i">; 598} 599 600// 6. Configuration-Setting Instructions 601// 6.1. vsetvli/vsetvl instructions 602 603// vsetvl/vsetvlmax are a macro because they require constant integers in SEW 604// and LMUL. 605let HeaderCode = 606[{ 607#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6) 608#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7) 609#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0) 610#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1) 611#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2) 612#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3) 613 614#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7) 615#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0) 616#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1) 617#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2) 618#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3) 619 620#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0) 621#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1) 622#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2) 623#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3) 624 625#if __riscv_v_elen >= 64 626#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) 627#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6) 628#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7) 629 630#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0) 631#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1) 632#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2) 633#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3) 634#endif 635 636#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6) 637#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7) 638#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0) 639#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1) 640#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2) 641#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3) 642 643#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7) 644#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0) 645#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1) 646#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2) 647#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3) 648 649#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0) 650#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1) 651#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2) 652#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3) 653 654#if __riscv_v_elen >= 64 655#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) 656#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6) 657#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7) 658 659#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0) 660#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1) 661#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2) 662#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3) 663#endif 664 665}] in 666def vsetvl_macro: RVVHeader; 667 668let HasBuiltinAlias = false, 669 HasVL = false, 670 HasMasked = false, 671 MaskedPolicyScheme = NonePolicy, 672 Log2LMUL = [0], 673 ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type 674{ 675 def vsetvli : RVVBuiltin<"", "zzKzKz", "i">; 676 def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">; 677} 678 679// 7. Vector Loads and Stores 680// 7.4. Vector Unit-Stride Instructions 681def vlm: RVVVLEMaskBuiltin; 682defm vle8: RVVVLEBuiltin<["c"]>; 683defm vle16: RVVVLEBuiltin<["s"]>; 684let Name = "vle16_v", RequiredFeatures = ["Zvfhmin"] in 685 defm vle16_h: RVVVLEBuiltin<["x"]>; 686defm vle32: RVVVLEBuiltin<["i","f"]>; 687defm vle64: RVVVLEBuiltin<["l","d"]>; 688 689def vsm : RVVVSEMaskBuiltin; 690defm vse8 : RVVVSEBuiltin<["c"]>; 691defm vse16: RVVVSEBuiltin<["s"]>; 692let Name = "vse16_v", RequiredFeatures = ["Zvfhmin"] in 693 defm vse16_h: RVVVSEBuiltin<["x"]>; 694defm vse32: RVVVSEBuiltin<["i","f"]>; 695defm vse64: RVVVSEBuiltin<["l","d"]>; 696 697// 7.5. Vector Strided Instructions 698defm vlse8: RVVVLSEBuiltin<["c"]>; 699defm vlse16: RVVVLSEBuiltin<["s"]>; 700let Name = "vlse16_v", RequiredFeatures = ["Zvfhmin"] in 701 defm vlse16_h: RVVVLSEBuiltin<["x"]>; 702defm vlse32: RVVVLSEBuiltin<["i","f"]>; 703defm vlse64: RVVVLSEBuiltin<["l","d"]>; 704 705defm vsse8 : RVVVSSEBuiltin<["c"]>; 706defm vsse16: RVVVSSEBuiltin<["s"]>; 707let Name = "vsse16_v", RequiredFeatures = ["Zvfhmin"] in 708 defm vsse16_h: RVVVSSEBuiltin<["x"]>; 709defm vsse32: RVVVSSEBuiltin<["i","f"]>; 710defm vsse64: RVVVSSEBuiltin<["l","d"]>; 711 712// 7.6. Vector Indexed Instructions 713defm : RVVIndexedLoad<"vluxei">; 714defm : RVVIndexedLoad<"vloxei">; 715 716defm : RVVIndexedStore<"vsuxei">; 717defm : RVVIndexedStore<"vsoxei">; 718 719// 7.7. Unit-stride Fault-Only-First Loads 720defm vle8ff: RVVVLEFFBuiltin<["c"]>; 721defm vle16ff: RVVVLEFFBuiltin<["s"]>; 722let Name = "vle16ff_v", RequiredFeatures = ["Zvfhmin"] in 723 defm vle16ff: RVVVLEFFBuiltin<["x"]>; 724defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>; 725defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; 726 727multiclass RVVUnitStridedSegLoadTuple<string op> { 728 foreach type = TypeList in { 729 defvar eew = !cond(!eq(type, "c") : "8", 730 !eq(type, "s") : "16", 731 !eq(type, "i") : "32", 732 !eq(type, "l") : "64", 733 !eq(type, "x") : "16", 734 !eq(type, "f") : "32", 735 !eq(type, "d") : "64"); 736 foreach nf = NFList in { 737 let Name = op # nf # "e" # eew # "_v", 738 IRName = op # nf, 739 MaskedIRName = op # nf # "_mask", 740 NF = nf, 741 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 742 []<string>), 743 ManualCodegen = [{ 744 { 745 llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0]; 746 IntrinsicTypes = {ElementVectorType, Ops.back()->getType()}; 747 SmallVector<llvm::Value*, 12> Operands; 748 749 bool NoPassthru = 750 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 751 (!IsMasked && (PolicyAttrs & RVV_VTA)); 752 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 753 754 if (NoPassthru) { // Push poison into passthru 755 Operands.append(NF, llvm::PoisonValue::get(ElementVectorType)); 756 } else { // Push intrinsics operands into passthru 757 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 758 for (unsigned I = 0; I < NF; ++I) 759 Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I})); 760 } 761 762 Operands.push_back(Ops[Offset]); // Ptr 763 if (IsMasked) 764 Operands.push_back(Ops[0]); 765 Operands.push_back(Ops[Offset + 1]); // VL 766 if (IsMasked) 767 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 768 769 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 770 771 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 772 if (ReturnValue.isNull()) 773 return LoadValue; 774 else 775 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 776 } 777 }] in { 778 defvar T = "(Tuple:" # nf # ")"; 779 def : RVVBuiltin<T # "v", T # "vPCe", type>; 780 if !not(IsFloat<type>.val) then { 781 def : RVVBuiltin<T # "Uv", T # "UvPCUe", type>; 782 } 783 } 784 } 785 } 786} 787 788multiclass RVVUnitStridedSegStoreTuple<string op> { 789 foreach type = TypeList in { 790 defvar eew = !cond(!eq(type, "c") : "8", 791 !eq(type, "s") : "16", 792 !eq(type, "i") : "32", 793 !eq(type, "l") : "64", 794 !eq(type, "x") : "16", 795 !eq(type, "f") : "32", 796 !eq(type, "d") : "64"); 797 foreach nf = NFList in { 798 let Name = op # nf # "e" # eew # "_v", 799 IRName = op # nf, 800 MaskedIRName = op # nf # "_mask", 801 NF = nf, 802 HasMaskedOffOperand = false, 803 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 804 []<string>), 805 ManualCodegen = [{ 806 { 807 // Masked 808 // Builtin: (mask, ptr, v_tuple, vl) 809 // Intrinsic: (val0, val1, ..., ptr, mask, vl) 810 // Unmasked 811 // Builtin: (ptr, v_tuple, vl) 812 // Intrinsic: (val0, val1, ..., ptr, vl) 813 unsigned Offset = IsMasked ? 1 : 0; 814 llvm::Value *VTupleOperand = Ops[Offset + 1]; 815 816 SmallVector<llvm::Value*, 12> Operands; 817 for (unsigned I = 0; I < NF; ++I) { 818 llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I}); 819 Operands.push_back(V); 820 } 821 Operands.push_back(Ops[Offset]); // Ptr 822 if (IsMasked) 823 Operands.push_back(Ops[0]); 824 Operands.push_back(Ops[Offset + 2]); // VL 825 826 IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()}; 827 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 828 return Builder.CreateCall(F, Operands, ""); 829 } 830 }] in { 831 defvar T = "(Tuple:" # nf # ")"; 832 def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>; 833 if !not(IsFloat<type>.val) then { 834 def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>; 835 } 836 } 837 } 838 } 839} 840 841multiclass RVVUnitStridedSegLoadFFTuple<string op> { 842 foreach type = TypeList in { 843 defvar eew = !cond(!eq(type, "c") : "8", 844 !eq(type, "s") : "16", 845 !eq(type, "i") : "32", 846 !eq(type, "l") : "64", 847 !eq(type, "x") : "16", 848 !eq(type, "f") : "32", 849 !eq(type, "d") : "64"); 850 foreach nf = NFList in { 851 let Name = op # nf # "e" # eew # "ff_v", 852 IRName = op # nf # "ff", 853 MaskedIRName = op # nf # "ff_mask", 854 NF = nf, 855 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 856 []<string>), 857 ManualCodegen = [{ 858 { 859 llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0]; 860 IntrinsicTypes = {ElementVectorType, Ops.back()->getType()}; 861 SmallVector<llvm::Value*, 12> Operands; 862 863 bool NoPassthru = 864 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 865 (!IsMasked && (PolicyAttrs & RVV_VTA)); 866 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 867 868 if (NoPassthru) { // Push poison into passthru 869 Operands.append(NF, llvm::PoisonValue::get(ElementVectorType)); 870 } else { // Push intrinsics operands into passthru 871 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 872 for (unsigned I = 0; I < NF; ++I) 873 Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I})); 874 } 875 876 Operands.push_back(Ops[Offset]); // Ptr 877 if (IsMasked) 878 Operands.push_back(Ops[0]); 879 Operands.push_back(Ops[Offset + 2]); // vl 880 if (IsMasked) 881 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 882 883 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 884 885 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 886 // Get alignment from the new vl operand 887 clang::CharUnits Align = 888 CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType()); 889 890 llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType); 891 for (unsigned I = 0; I < NF; ++I) { 892 llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I}); 893 ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I}); 894 } 895 896 // Store new_vl 897 llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF}); 898 Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align)); 899 900 if (ReturnValue.isNull()) 901 return ReturnTuple; 902 else 903 return Builder.CreateStore(ReturnTuple, ReturnValue.getValue()); 904 } 905 }] in { 906 defvar T = "(Tuple:" # nf # ")"; 907 def : RVVBuiltin<T # "v", T # "vPCePz", type>; 908 if !not(IsFloat<type>.val) then { 909 def : RVVBuiltin<T # "Uv", T # "UvPCUePz", type>; 910 } 911 } 912 } 913 } 914} 915 916multiclass RVVStridedSegLoadTuple<string op> { 917 foreach type = TypeList in { 918 defvar eew = !cond(!eq(type, "c") : "8", 919 !eq(type, "s") : "16", 920 !eq(type, "i") : "32", 921 !eq(type, "l") : "64", 922 !eq(type, "x") : "16", 923 !eq(type, "f") : "32", 924 !eq(type, "d") : "64"); 925 foreach nf = NFList in { 926 let Name = op # nf # "e" # eew # "_v", 927 IRName = op # nf, 928 MaskedIRName = op # nf # "_mask", 929 NF = nf, 930 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 931 []<string>), 932 ManualCodegen = [{ 933 { 934 llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0]; 935 IntrinsicTypes = {ElementVectorType, Ops.back()->getType()}; 936 SmallVector<llvm::Value*, 12> Operands; 937 938 bool NoPassthru = 939 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 940 (!IsMasked && (PolicyAttrs & RVV_VTA)); 941 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 942 943 if (NoPassthru) { // Push poison into passthru 944 Operands.append(NF, llvm::PoisonValue::get(ElementVectorType)); 945 } else { // Push intrinsics operands into passthru 946 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 947 for (unsigned I = 0; I < NF; ++I) 948 Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I})); 949 } 950 951 Operands.push_back(Ops[Offset]); // Ptr 952 Operands.push_back(Ops[Offset + 1]); // Stride 953 if (IsMasked) 954 Operands.push_back(Ops[0]); 955 Operands.push_back(Ops[Offset + 2]); // VL 956 if (IsMasked) 957 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 958 959 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 960 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 961 962 if (ReturnValue.isNull()) 963 return LoadValue; 964 else 965 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 966 } 967 }] in { 968 defvar T = "(Tuple:" # nf # ")"; 969 def : RVVBuiltin<T # "v", T # "vPCet", type>; 970 if !not(IsFloat<type>.val) then { 971 def : RVVBuiltin<T # "Uv", T # "UvPCUet", type>; 972 } 973 } 974 } 975 } 976} 977 978multiclass RVVStridedSegStoreTuple<string op> { 979 foreach type = TypeList in { 980 defvar eew = !cond(!eq(type, "c") : "8", 981 !eq(type, "s") : "16", 982 !eq(type, "i") : "32", 983 !eq(type, "l") : "64", 984 !eq(type, "x") : "16", 985 !eq(type, "f") : "32", 986 !eq(type, "d") : "64"); 987 foreach nf = NFList in { 988 let Name = op # nf # "e" # eew # "_v", 989 IRName = op # nf, 990 MaskedIRName = op # nf # "_mask", 991 NF = nf, 992 HasMaskedOffOperand = false, 993 MaskedPolicyScheme = NonePolicy, 994 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 995 []<string>), 996 ManualCodegen = [{ 997 { 998 // Masked 999 // Builtin: (mask, ptr, stride, v_tuple, vl) 1000 // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl) 1001 // Unmasked 1002 // Builtin: (ptr, stride, v_tuple, vl) 1003 // Intrinsic: (val0, val1, ..., ptr, stride, vl) 1004 unsigned Offset = IsMasked ? 1 : 0; 1005 llvm::Value *VTupleOperand = Ops[Offset + 2]; 1006 1007 SmallVector<llvm::Value*, 12> Operands; 1008 for (unsigned I = 0; I < NF; ++I) { 1009 llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I}); 1010 Operands.push_back(V); 1011 } 1012 Operands.push_back(Ops[Offset]); // Ptr 1013 Operands.push_back(Ops[Offset + 1]); // Stride 1014 if (IsMasked) 1015 Operands.push_back(Ops[0]); 1016 Operands.push_back(Ops[Offset + 3]); // VL 1017 1018 IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()}; 1019 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1020 return Builder.CreateCall(F, Operands, ""); 1021 } 1022 }] in { 1023 defvar T = "(Tuple:" # nf # ")"; 1024 def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>; 1025 if !not(IsFloat<type>.val) then { 1026 def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>; 1027 } 1028 } 1029 } 1030 } 1031} 1032 1033multiclass RVVIndexedSegLoadTuple<string op> { 1034 foreach type = TypeList in { 1035 foreach eew_info = EEWList in { 1036 defvar eew = eew_info[0]; 1037 defvar eew_type = eew_info[1]; 1038 foreach nf = NFList in { 1039 let Name = op # nf # "ei" # eew # "_v", 1040 IRName = op # nf, 1041 MaskedIRName = op # nf # "_mask", 1042 NF = nf, 1043 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 1044 []<string>), 1045 ManualCodegen = [{ 1046 { 1047 llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0]; 1048 IntrinsicTypes = {ElementVectorType, Ops.back()->getType()}; 1049 SmallVector<llvm::Value*, 12> Operands; 1050 1051 bool NoPassthru = 1052 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) | 1053 (!IsMasked && (PolicyAttrs & RVV_VTA)); 1054 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; 1055 1056 if (NoPassthru) { // Push poison into passthru 1057 Operands.append(NF, llvm::PoisonValue::get(ElementVectorType)); 1058 } else { // Push intrinsics operands into passthru 1059 llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0]; 1060 for (unsigned I = 0; I < NF; ++I) 1061 Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I})); 1062 } 1063 1064 Operands.push_back(Ops[Offset]); // Ptr 1065 Operands.push_back(Ops[Offset + 1]); // Idx 1066 if (IsMasked) 1067 Operands.push_back(Ops[0]); 1068 Operands.push_back(Ops[Offset + 2]); // VL 1069 if (IsMasked) 1070 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1071 1072 IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(), 1073 Ops.back()->getType()}; 1074 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1075 llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); 1076 1077 if (ReturnValue.isNull()) 1078 return LoadValue; 1079 else 1080 return Builder.CreateStore(LoadValue, ReturnValue.getValue()); 1081 } 1082 }] in { 1083 defvar T = "(Tuple:" # nf # ")"; 1084 def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>; 1085 if !not(IsFloat<type>.val) then { 1086 def : RVVBuiltin<T # "Uv", T # "UvPCUe" # eew_type # "Uv", type>; 1087 } 1088 } 1089 } 1090 } 1091 } 1092} 1093 1094multiclass RVVIndexedSegStoreTuple<string op> { 1095 foreach type = TypeList in { 1096 foreach eew_info = EEWList in { 1097 defvar eew = eew_info[0]; 1098 defvar eew_type = eew_info[1]; 1099 foreach nf = NFList in { 1100 let Name = op # nf # "ei" # eew # "_v", 1101 IRName = op # nf, 1102 MaskedIRName = op # nf # "_mask", 1103 NF = nf, 1104 HasMaskedOffOperand = false, 1105 MaskedPolicyScheme = NonePolicy, 1106 RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"], 1107 []<string>), 1108 ManualCodegen = [{ 1109 { 1110 // Masked 1111 // Builtin: (mask, ptr, index, v_tuple, vl) 1112 // Intrinsic: (val0, val1, ..., ptr, index, mask, vl) 1113 // Unmasked 1114 // Builtin: (ptr, index, v_tuple, vl) 1115 // Intrinsic: (val0, val1, ..., ptr, index, vl) 1116 unsigned Offset = IsMasked ? 1 : 0; 1117 llvm::Value *VTupleOperand = Ops[Offset + 2]; 1118 1119 SmallVector<llvm::Value*, 12> Operands; 1120 for (unsigned I = 0; I < NF; ++I) { 1121 llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I}); 1122 Operands.push_back(V); 1123 } 1124 Operands.push_back(Ops[Offset]); // Ptr 1125 Operands.push_back(Ops[Offset + 1]); // Idx 1126 if (IsMasked) 1127 Operands.push_back(Ops[0]); 1128 Operands.push_back(Ops[Offset + 3]); // VL 1129 1130 IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(), 1131 Operands.back()->getType()}; 1132 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1133 return Builder.CreateCall(F, Operands, ""); 1134 } 1135 }] in { 1136 defvar T = "(Tuple:" # nf # ")"; 1137 def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>; 1138 if !not(IsFloat<type>.val) then { 1139 def : RVVBuiltin<T # "Uv", "0PUe" # eew_type # "Uv" # T # "Uv", type>; 1140 } 1141 } 1142 } 1143 } 1144 } 1145} 1146 1147// 7.8 Vector Load/Store Segment Instructions 1148let UnMaskedPolicyScheme = HasPassthruOperand, 1149 IsTuple = true in { 1150 defm : RVVUnitStridedSegLoadTuple<"vlseg">; 1151 defm : RVVUnitStridedSegLoadFFTuple<"vlseg">; 1152 defm : RVVStridedSegLoadTuple<"vlsseg">; 1153 defm : RVVIndexedSegLoadTuple<"vluxseg">; 1154 defm : RVVIndexedSegLoadTuple<"vloxseg">; 1155} 1156 1157let UnMaskedPolicyScheme = NonePolicy, 1158 MaskedPolicyScheme = NonePolicy, 1159 IsTuple = true in { 1160defm : RVVUnitStridedSegStoreTuple<"vsseg">; 1161defm : RVVStridedSegStoreTuple<"vssseg">; 1162defm : RVVIndexedSegStoreTuple<"vsuxseg">; 1163defm : RVVIndexedSegStoreTuple<"vsoxseg">; 1164} 1165 1166// 11. Vector Integer Arithmetic Instructions 1167// 11.1. Vector Single-Width Integer Add and Subtract 1168let UnMaskedPolicyScheme = HasPassthruOperand in { 1169defm vadd : RVVIntBinBuiltinSet; 1170defm vsub : RVVIntBinBuiltinSet; 1171defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", 1172 [["vx", "v", "vve"], 1173 ["vx", "Uv", "UvUvUe"]]>; 1174} 1175defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; 1176 1177// 11.2. Vector Widening Integer Add/Subtract 1178// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW 1179let UnMaskedPolicyScheme = HasPassthruOperand in { 1180defm vwaddu : RVVUnsignedWidenBinBuiltinSet; 1181defm vwsubu : RVVUnsignedWidenBinBuiltinSet; 1182// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW 1183defm vwadd : RVVSignedWidenBinBuiltinSet; 1184defm vwsub : RVVSignedWidenBinBuiltinSet; 1185// Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW 1186defm vwaddu : RVVUnsignedWidenOp0BinBuiltinSet; 1187defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet; 1188// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW 1189defm vwadd : RVVSignedWidenOp0BinBuiltinSet; 1190defm vwsub : RVVSignedWidenOp0BinBuiltinSet; 1191} 1192defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", 1193 [["Uw", "UwUv"]]>; 1194defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", 1195 [["w", "wv"]]>; 1196 1197// 11.3. Vector Integer Extension 1198let UnMaskedPolicyScheme = HasPassthruOperand in { 1199let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1200 def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">; 1201 def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">; 1202} 1203let Log2LMUL = [-3, -2, -1, 0, 1] in { 1204 def vsext_vf4 : RVVIntExt<"vsext", "q", "qv", "cs">; 1205 def vzext_vf4 : RVVIntExt<"vzext", "Uq", "UqUv", "cs">; 1206} 1207let Log2LMUL = [-3, -2, -1, 0] in { 1208 def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">; 1209 def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">; 1210} 1211} 1212 1213// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 1214let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 1215 let UnMaskedPolicyScheme = HasPassthruOperand in { 1216 defm vadc : RVVCarryinBuiltinSet; 1217 defm vsbc : RVVCarryinBuiltinSet; 1218 } 1219 defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">; 1220 defm vmadc : RVVIntMaskOutBuiltinSet; 1221 defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">; 1222 defm vmsbc : RVVIntMaskOutBuiltinSet; 1223} 1224 1225// 11.5. Vector Bitwise Logical Instructions 1226let UnMaskedPolicyScheme = HasPassthruOperand in { 1227defm vand : RVVIntBinBuiltinSet; 1228defm vxor : RVVIntBinBuiltinSet; 1229defm vor : RVVIntBinBuiltinSet; 1230} 1231defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; 1232 1233// 11.6. Vector Single-Width Shift Instructions 1234let UnMaskedPolicyScheme = HasPassthruOperand in { 1235defm vsll : RVVShiftBuiltinSet; 1236defm vsrl : RVVUnsignedShiftBuiltinSet; 1237defm vsra : RVVSignedShiftBuiltinSet; 1238 1239// 11.7. Vector Narrowing Integer Right Shift Instructions 1240defm vnsrl : RVVUnsignedNShiftBuiltinSet; 1241defm vnsra : RVVSignedNShiftBuiltinSet; 1242} 1243defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", 1244 [["v", "vw"], 1245 ["Uv", "UvUw"]]>; 1246 1247// 11.8. Vector Integer Compare Instructions 1248let MaskedPolicyScheme = HasPassthruOperand, 1249 HasTailPolicy = false in { 1250defm vmseq : RVVIntMaskOutBuiltinSet; 1251defm vmsne : RVVIntMaskOutBuiltinSet; 1252defm vmsltu : RVVUnsignedMaskOutBuiltinSet; 1253defm vmslt : RVVSignedMaskOutBuiltinSet; 1254defm vmsleu : RVVUnsignedMaskOutBuiltinSet; 1255defm vmsle : RVVSignedMaskOutBuiltinSet; 1256defm vmsgtu : RVVUnsignedMaskOutBuiltinSet; 1257defm vmsgt : RVVSignedMaskOutBuiltinSet; 1258defm vmsgeu : RVVUnsignedMaskOutBuiltinSet; 1259defm vmsge : RVVSignedMaskOutBuiltinSet; 1260} 1261 1262// 11.9. Vector Integer Min/Max Instructions 1263let UnMaskedPolicyScheme = HasPassthruOperand in { 1264defm vminu : RVVUnsignedBinBuiltinSet; 1265defm vmin : RVVSignedBinBuiltinSet; 1266defm vmaxu : RVVUnsignedBinBuiltinSet; 1267defm vmax : RVVSignedBinBuiltinSet; 1268 1269// 11.10. Vector Single-Width Integer Multiply Instructions 1270defm vmul : RVVIntBinBuiltinSet; 1271defm vmulh : RVVSignedBinBuiltinSet; 1272defm vmulhu : RVVUnsignedBinBuiltinSet; 1273defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil", 1274 [["vv", "v", "vvUv"], 1275 ["vx", "v", "vvUe"]]>; 1276 1277// 11.11. Vector Integer Divide Instructions 1278defm vdivu : RVVUnsignedBinBuiltinSet; 1279defm vdiv : RVVSignedBinBuiltinSet; 1280defm vremu : RVVUnsignedBinBuiltinSet; 1281defm vrem : RVVSignedBinBuiltinSet; 1282} 1283 1284// 11.12. Vector Widening Integer Multiply Instructions 1285let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicyScheme = HasPassthruOperand in { 1286defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", 1287 [["vv", "w", "wvv"], 1288 ["vx", "w", "wve"]]>; 1289defm vwmulu : RVVOutOp0Op1BuiltinSet<"vwmulu", "csi", 1290 [["vv", "Uw", "UwUvUv"], 1291 ["vx", "Uw", "UwUvUe"]]>; 1292defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi", 1293 [["vv", "w", "wvUv"], 1294 ["vx", "w", "wvUe"]]>; 1295} 1296 1297// 11.13. Vector Single-Width Integer Multiply-Add Instructions 1298let UnMaskedPolicyScheme = HasPolicyOperand in { 1299defm vmacc : RVVIntTerBuiltinSet; 1300defm vnmsac : RVVIntTerBuiltinSet; 1301defm vmadd : RVVIntTerBuiltinSet; 1302defm vnmsub : RVVIntTerBuiltinSet; 1303 1304// 11.14. Vector Widening Integer Multiply-Add Instructions 1305let HasMaskedOffOperand = false, 1306 Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1307defm vwmaccu : RVVOutOp1Op2BuiltinSet<"vwmaccu", "csi", 1308 [["vv", "Uw", "UwUwUvUv"], 1309 ["vx", "Uw", "UwUwUeUv"]]>; 1310defm vwmacc : RVVOutOp1Op2BuiltinSet<"vwmacc", "csi", 1311 [["vv", "w", "wwvv"], 1312 ["vx", "w", "wwev"]]>; 1313defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi", 1314 [["vv", "w", "wwvUv"], 1315 ["vx", "w", "wweUv"]]>; 1316defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", 1317 [["vx", "w", "wwUev"]]>; 1318} 1319} 1320 1321// 11.15. Vector Integer Merge Instructions 1322// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) 1323let HasMasked = false, 1324 UnMaskedPolicyScheme = HasPassthruOperand, 1325 MaskedPolicyScheme = NonePolicy, 1326 ManualCodegen = [{ 1327 // insert poison passthru 1328 if (PolicyAttrs & RVV_VTA) 1329 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 1330 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; 1331 }] in { 1332 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", 1333 [["vvm", "v", "vvvm"], 1334 ["vxm", "v", "vvem"], 1335 ["vvm", "Uv", "UvUvUvm"], 1336 ["vxm", "Uv", "UvUvUem"]]>; 1337} 1338 1339// 11.16. Vector Integer Move Instructions 1340let HasMasked = false, 1341 UnMaskedPolicyScheme = HasPassthruOperand, 1342 MaskedPolicyScheme = NonePolicy, 1343 OverloadedName = "vmv_v" in { 1344 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", 1345 [["v", "Uv", "UvUv"]]>; 1346 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilfd", 1347 [["v", "v", "vv"]]>; 1348 let RequiredFeatures = ["Zvfhmin"] in 1349 defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x", 1350 [["v", "v", "vv"]]>; 1351 let SupportOverloading = false in 1352 defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil", 1353 [["x", "v", "ve"], 1354 ["x", "Uv", "UvUe"]]>; 1355} 1356 1357// 12. Vector Fixed-Point Arithmetic Instructions 1358let HeaderCode = 1359[{ 1360enum __RISCV_VXRM { 1361 __RISCV_VXRM_RNU = 0, 1362 __RISCV_VXRM_RNE = 1, 1363 __RISCV_VXRM_RDN = 2, 1364 __RISCV_VXRM_ROD = 3, 1365}; 1366}] in 1367def vxrm_enum : RVVHeader; 1368 1369// 12.1. Vector Single-Width Saturating Add and Subtract 1370let UnMaskedPolicyScheme = HasPassthruOperand in { 1371defm vsaddu : RVVUnsignedBinBuiltinSet; 1372defm vsadd : RVVSignedBinBuiltinSet; 1373defm vssubu : RVVUnsignedBinBuiltinSet; 1374defm vssub : RVVSignedBinBuiltinSet; 1375 1376let ManualCodegen = [{ 1377 { 1378 // LLVM intrinsic 1379 // Unmasked: (passthru, op0, op1, round_mode, vl) 1380 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 1381 1382 SmallVector<llvm::Value*, 7> Operands; 1383 bool HasMaskedOff = !( 1384 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1385 (!IsMasked && PolicyAttrs & RVV_VTA)); 1386 unsigned Offset = IsMasked ? 1387 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1388 1389 if (!HasMaskedOff) 1390 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1391 else 1392 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1393 1394 Operands.push_back(Ops[Offset]); // op0 1395 Operands.push_back(Ops[Offset + 1]); // op1 1396 1397 if (IsMasked) 1398 Operands.push_back(Ops[0]); // mask 1399 1400 Operands.push_back(Ops[Offset + 2]); // vxrm 1401 Operands.push_back(Ops[Offset + 3]); // vl 1402 1403 if (IsMasked) 1404 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1405 1406 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()}; 1407 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1408 return Builder.CreateCall(F, Operands, ""); 1409 } 1410}] in { 1411 // 12.2. Vector Single-Width Averaging Add and Subtract 1412 defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode; 1413 defm vaadd : RVVSignedBinBuiltinSetRoundingMode; 1414 defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode; 1415 defm vasub : RVVSignedBinBuiltinSetRoundingMode; 1416 1417 // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation 1418 defm vsmul : RVVSignedBinBuiltinSetRoundingMode; 1419 1420 // 12.4. Vector Single-Width Scaling Shift Instructions 1421 defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode; 1422 defm vssra : RVVSignedShiftBuiltinSetRoundingMode; 1423} 1424 1425let ManualCodegen = [{ 1426 { 1427 // LLVM intrinsic 1428 // Unmasked: (passthru, op0, op1, round_mode, vl) 1429 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 1430 1431 SmallVector<llvm::Value*, 7> Operands; 1432 bool HasMaskedOff = !( 1433 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1434 (!IsMasked && PolicyAttrs & RVV_VTA)); 1435 unsigned Offset = IsMasked ? 1436 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1437 1438 if (!HasMaskedOff) 1439 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1440 else 1441 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1442 1443 Operands.push_back(Ops[Offset]); // op0 1444 Operands.push_back(Ops[Offset + 1]); // op1 1445 1446 if (IsMasked) 1447 Operands.push_back(Ops[0]); // mask 1448 1449 Operands.push_back(Ops[Offset + 2]); // vxrm 1450 Operands.push_back(Ops[Offset + 3]); // vl 1451 1452 if (IsMasked) 1453 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1454 1455 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1456 Ops.back()->getType()}; 1457 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1458 return Builder.CreateCall(F, Operands, ""); 1459 } 1460}] in { 1461 // 12.5. Vector Narrowing Fixed-Point Clip Instructions 1462 defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode; 1463 defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode; 1464} 1465} 1466 1467// 13. Vector Floating-Point Instructions 1468let HeaderCode = 1469[{ 1470enum __RISCV_FRM { 1471 __RISCV_FRM_RNE = 0, 1472 __RISCV_FRM_RTZ = 1, 1473 __RISCV_FRM_RDN = 2, 1474 __RISCV_FRM_RUP = 3, 1475 __RISCV_FRM_RMM = 4, 1476}; 1477}] in def frm_enum : RVVHeader; 1478 1479let UnMaskedPolicyScheme = HasPassthruOperand in { 1480let ManualCodegen = [{ 1481 { 1482 // LLVM intrinsic 1483 // Unmasked: (passthru, op0, op1, round_mode, vl) 1484 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1485 1486 SmallVector<llvm::Value*, 7> Operands; 1487 bool HasMaskedOff = !( 1488 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1489 (!IsMasked && PolicyAttrs & RVV_VTA)); 1490 bool HasRoundModeOp = IsMasked ? 1491 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 1492 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 1493 1494 unsigned Offset = IsMasked ? 1495 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1496 1497 if (!HasMaskedOff) 1498 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1499 else 1500 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1501 1502 Operands.push_back(Ops[Offset]); // op0 1503 Operands.push_back(Ops[Offset + 1]); // op1 1504 1505 if (IsMasked) 1506 Operands.push_back(Ops[0]); // mask 1507 1508 if (HasRoundModeOp) { 1509 Operands.push_back(Ops[Offset + 2]); // frm 1510 Operands.push_back(Ops[Offset + 3]); // vl 1511 } else { 1512 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1513 Operands.push_back(Ops[Offset + 2]); // vl 1514 } 1515 1516 if (IsMasked) 1517 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1518 1519 IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), 1520 Operands.back()->getType()}; 1521 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1522 return Builder.CreateCall(F, Operands, ""); 1523 } 1524}] in { 1525 let HasFRMRoundModeOp = true in { 1526 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1527 defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; 1528 defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; 1529 defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; 1530 1531 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1532 // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW 1533 defm vfwadd : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; 1534 defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; 1535 1536 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1537 defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; 1538 defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode; 1539 defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode; 1540 } 1541 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 1542 defm vfadd : RVVFloatingBinBuiltinSet; 1543 defm vfsub : RVVFloatingBinBuiltinSet; 1544 defm vfrsub : RVVFloatingBinVFBuiltinSet; 1545 1546 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1547 // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW 1548 defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet; 1549 defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet; 1550 1551 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 1552 defm vfmul : RVVFloatingBinBuiltinSet; 1553 defm vfdiv : RVVFloatingBinBuiltinSet; 1554 defm vfrdiv : RVVFloatingBinVFBuiltinSet; 1555} 1556 1557let ManualCodegen = [{ 1558 { 1559 // LLVM intrinsic 1560 // Unmasked: (passthru, op0, op1, round_mode, vl) 1561 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1562 1563 SmallVector<llvm::Value*, 7> Operands; 1564 bool HasMaskedOff = !( 1565 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1566 (!IsMasked && PolicyAttrs & RVV_VTA)); 1567 bool HasRoundModeOp = IsMasked ? 1568 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 1569 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 1570 1571 unsigned Offset = IsMasked ? 1572 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1573 1574 if (!HasMaskedOff) 1575 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1576 else 1577 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1578 1579 Operands.push_back(Ops[Offset]); // op0 1580 Operands.push_back(Ops[Offset + 1]); // op1 1581 1582 if (IsMasked) 1583 Operands.push_back(Ops[0]); // mask 1584 1585 if (HasRoundModeOp) { 1586 Operands.push_back(Ops[Offset + 2]); // frm 1587 Operands.push_back(Ops[Offset + 3]); // vl 1588 } else { 1589 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1590 Operands.push_back(Ops[Offset + 2]); // vl 1591 } 1592 1593 if (IsMasked) 1594 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1595 1596 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1597 Ops.back()->getType()}; 1598 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1599 return Builder.CreateCall(F, Operands, ""); 1600 } 1601}] in { 1602 let HasFRMRoundModeOp = true in { 1603 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1604 // Widening FP add/subtract, 2*SEW = SEW +/- SEW 1605 defm vfwadd : RVVFloatingWidenBinBuiltinSetRoundingMode; 1606 defm vfwsub : RVVFloatingWidenBinBuiltinSetRoundingMode; 1607 1608 // 13.5. Vector Widening Floating-Point Multiply 1609 let Log2LMUL = [-2, -1, 0, 1, 2] in { 1610 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf", 1611 [["vv", "w", "wvvu"], 1612 ["vf", "w", "wveu"]]>; 1613 } 1614 } 1615 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions 1616 // Widening FP add/subtract, 2*SEW = SEW +/- SEW 1617 defm vfwadd : RVVFloatingWidenBinBuiltinSet; 1618 defm vfwsub : RVVFloatingWidenBinBuiltinSet; 1619 1620 // 13.5. Vector Widening Floating-Point Multiply 1621 let Log2LMUL = [-2, -1, 0, 1, 2] in { 1622 defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf", 1623 [["vv", "w", "wvv"], 1624 ["vf", "w", "wve"]]>; 1625 } 1626} 1627} 1628 1629 1630let UnMaskedPolicyScheme = HasPolicyOperand in { 1631let ManualCodegen = [{ 1632 { 1633 // LLVM intrinsic 1634 // Unmasked: (passthru, op0, op1, round_mode, vl) 1635 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1636 1637 SmallVector<llvm::Value*, 7> Operands; 1638 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5; 1639 1640 unsigned Offset = IsMasked ? 2 : 1; 1641 1642 Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough 1643 1644 Operands.push_back(Ops[Offset]); // op0 1645 Operands.push_back(Ops[Offset + 1]); // op1 1646 1647 if (IsMasked) 1648 Operands.push_back(Ops[0]); // mask 1649 1650 if (HasRoundModeOp) { 1651 Operands.push_back(Ops[Offset + 2]); // frm 1652 Operands.push_back(Ops[Offset + 3]); // vl 1653 } else { 1654 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1655 Operands.push_back(Ops[Offset + 2]); // vl 1656 } 1657 1658 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1659 1660 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 1661 Operands.back()->getType()}; 1662 1663 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1664 1665 return Builder.CreateCall(F, Operands, ""); 1666 } 1667}] in { 1668 let HasFRMRoundModeOp = 1 in { 1669 // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1670 defm vfmacc : RVVFloatingTerBuiltinSetRoundingMode; 1671 defm vfnmacc : RVVFloatingTerBuiltinSetRoundingMode; 1672 defm vfmsac : RVVFloatingTerBuiltinSetRoundingMode; 1673 defm vfnmsac : RVVFloatingTerBuiltinSetRoundingMode; 1674 defm vfmadd : RVVFloatingTerBuiltinSetRoundingMode; 1675 defm vfnmadd : RVVFloatingTerBuiltinSetRoundingMode; 1676 defm vfmsub : RVVFloatingTerBuiltinSetRoundingMode; 1677 defm vfnmsub : RVVFloatingTerBuiltinSetRoundingMode; 1678 } 1679 // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 1680 defm vfmacc : RVVFloatingTerBuiltinSet; 1681 defm vfnmacc : RVVFloatingTerBuiltinSet; 1682 defm vfmsac : RVVFloatingTerBuiltinSet; 1683 defm vfnmsac : RVVFloatingTerBuiltinSet; 1684 defm vfmadd : RVVFloatingTerBuiltinSet; 1685 defm vfnmadd : RVVFloatingTerBuiltinSet; 1686 defm vfmsub : RVVFloatingTerBuiltinSet; 1687 defm vfnmsub : RVVFloatingTerBuiltinSet; 1688} 1689 1690let ManualCodegen = [{ 1691 { 1692 // LLVM intrinsic 1693 // Unmasked: (passthru, op0, op1, round_mode, vl) 1694 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 1695 1696 SmallVector<llvm::Value*, 7> Operands; 1697 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5; 1698 1699 unsigned Offset = IsMasked ? 2 : 1; 1700 1701 Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough 1702 1703 Operands.push_back(Ops[Offset]); // op0 1704 Operands.push_back(Ops[Offset + 1]); // op1 1705 1706 if (IsMasked) 1707 Operands.push_back(Ops[0]); // mask 1708 1709 if (HasRoundModeOp) { 1710 Operands.push_back(Ops[Offset + 2]); // frm 1711 Operands.push_back(Ops[Offset + 3]); // vl 1712 } else { 1713 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 1714 Operands.push_back(Ops[Offset + 2]); // vl 1715 } 1716 1717 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1718 1719 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(), 1720 Operands.back()->getType()}; 1721 1722 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1723 1724 return Builder.CreateCall(F, Operands, ""); 1725 } 1726}] in { 1727 let HasFRMRoundModeOp = 1 in { 1728 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1729 defm vfwmacc : RVVFloatingWidenTerBuiltinSetRoundingMode; 1730 defm vfwnmacc : RVVFloatingWidenTerBuiltinSetRoundingMode; 1731 defm vfwmsac : RVVFloatingWidenTerBuiltinSetRoundingMode; 1732 defm vfwnmsac : RVVFloatingWidenTerBuiltinSetRoundingMode; 1733 } 1734 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 1735 defm vfwmacc : RVVFloatingWidenTerBuiltinSet; 1736 defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; 1737 defm vfwmsac : RVVFloatingWidenTerBuiltinSet; 1738 defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; 1739} 1740 1741} 1742 1743let UnMaskedPolicyScheme = HasPassthruOperand in { 1744let ManualCodegen = [{ 1745 { 1746 // LLVM intrinsic 1747 // Unmasked: (passthru, op0, round_mode, vl) 1748 // Masked: (passthru, op0, mask, frm, vl, policy) 1749 1750 SmallVector<llvm::Value*, 7> Operands; 1751 bool HasMaskedOff = !( 1752 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1753 (!IsMasked && PolicyAttrs & RVV_VTA)); 1754 bool HasRoundModeOp = IsMasked ? 1755 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) : 1756 (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3); 1757 1758 unsigned Offset = IsMasked ? 1759 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1760 1761 if (!HasMaskedOff) 1762 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1763 else 1764 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1765 1766 Operands.push_back(Ops[Offset]); // op0 1767 1768 if (IsMasked) 1769 Operands.push_back(Ops[0]); // mask 1770 1771 if (HasRoundModeOp) { 1772 Operands.push_back(Ops[Offset + 1]); // frm 1773 Operands.push_back(Ops[Offset + 2]); // vl 1774 } else { 1775 Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm 1776 Operands.push_back(Ops[Offset + 1]); // vl 1777 } 1778 1779 if (IsMasked) 1780 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1781 1782 IntrinsicTypes = {ResultType, Operands.back()->getType()}; 1783 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1784 return Builder.CreateCall(F, Operands, ""); 1785 } 1786}] in { 1787 let HasFRMRoundModeOp = 1 in { 1788 // 13.8. Vector Floating-Point Square-Root Instruction 1789 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vvu"]]>; 1790 1791 // 13.10. Vector Floating-Point Reciprocal Estimate Instruction 1792 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vvu"]]>; 1793 } 1794 // 13.8. Vector Floating-Point Square-Root Instruction 1795 defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vv"]]>; 1796 1797 // 13.10. Vector Floating-Point Reciprocal Estimate Instruction 1798 defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vv"]]>; 1799} 1800 1801// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction 1802def vfrsqrt7 : RVVFloatingUnaryVVBuiltin; 1803 1804// 13.11. Vector Floating-Point MIN/MAX Instructions 1805defm vfmin : RVVFloatingBinBuiltinSet; 1806defm vfmax : RVVFloatingBinBuiltinSet; 1807 1808// 13.12. Vector Floating-Point Sign-Injection Instructions 1809defm vfsgnj : RVVFloatingBinBuiltinSet; 1810defm vfsgnjn : RVVFloatingBinBuiltinSet; 1811defm vfsgnjx : RVVFloatingBinBuiltinSet; 1812} 1813defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; 1814defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; 1815 1816// 13.13. Vector Floating-Point Compare Instructions 1817let MaskedPolicyScheme = HasPassthruOperand, 1818 HasTailPolicy = false in { 1819defm vmfeq : RVVFloatingMaskOutBuiltinSet; 1820defm vmfne : RVVFloatingMaskOutBuiltinSet; 1821defm vmflt : RVVFloatingMaskOutBuiltinSet; 1822defm vmfle : RVVFloatingMaskOutBuiltinSet; 1823defm vmfgt : RVVFloatingMaskOutBuiltinSet; 1824defm vmfge : RVVFloatingMaskOutBuiltinSet; 1825} 1826 1827// 13.14. Vector Floating-Point Classify Instruction 1828let Name = "vfclass_v", UnMaskedPolicyScheme = HasPassthruOperand in 1829 def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; 1830 1831// 13.15. Vector Floating-Point Merge Instruction 1832// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) 1833let HasMasked = false, 1834 UnMaskedPolicyScheme = HasPassthruOperand, 1835 MaskedPolicyScheme = NonePolicy, 1836 ManualCodegen = [{ 1837 // insert poison passthru 1838 if (PolicyAttrs & RVV_VTA) 1839 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 1840 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; 1841 }] in { 1842 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "fd", 1843 [["vvm", "v", "vvvm"]]>; 1844 let RequiredFeatures = ["Zvfhmin"] in 1845 defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x", 1846 [["vvm", "v", "vvvm"]]>; 1847 defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", 1848 [["vfm", "v", "vvem"]]>; 1849} 1850 1851// 13.16. Vector Floating-Point Move Instruction 1852let HasMasked = false, 1853 UnMaskedPolicyScheme = HasPassthruOperand, 1854 SupportOverloading = false, 1855 MaskedPolicyScheme = NonePolicy, 1856 OverloadedName = "vfmv_v" in 1857 defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", 1858 [["f", "v", "ve"]]>; 1859 1860// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions 1861let UnMaskedPolicyScheme = HasPassthruOperand in { 1862def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">; 1863def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">; 1864 1865// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1866let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1867 def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">; 1868 def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">; 1869 def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">; 1870 def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">; 1871 def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "f", "vfwcvt_f">; 1872 let RequiredFeatures = ["Zvfhmin"] in 1873 def vfwcvt_f_f_v_fp16 : RVVConvBuiltin<"w", "wv", "x", "vfwcvt_f"> { 1874 let Name = "vfwcvt_f_f_v"; 1875 let IRName = "vfwcvt_f_f_v"; 1876 let MaskedIRName = "vfwcvt_f_f_v_mask"; 1877 } 1878} 1879 1880// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1881let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1882 def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">; 1883 def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">; 1884 def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">; 1885} 1886let ManualCodegen = [{ 1887 { 1888 // LLVM intrinsic 1889 // Unmasked: (passthru, op0, frm, vl) 1890 // Masked: (passthru, op0, mask, frm, vl, policy) 1891 SmallVector<llvm::Value*, 7> Operands; 1892 bool HasMaskedOff = !( 1893 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 1894 (!IsMasked && PolicyAttrs & RVV_VTA)); 1895 bool HasRoundModeOp = IsMasked ? 1896 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) : 1897 (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3); 1898 1899 unsigned Offset = IsMasked ? 1900 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 1901 1902 if (!HasMaskedOff) 1903 Operands.push_back(llvm::PoisonValue::get(ResultType)); 1904 else 1905 Operands.push_back(Ops[IsMasked ? 1 : 0]); 1906 1907 Operands.push_back(Ops[Offset]); // op0 1908 1909 if (IsMasked) 1910 Operands.push_back(Ops[0]); // mask 1911 1912 if (HasRoundModeOp) { 1913 Operands.push_back(Ops[Offset + 1]); // frm 1914 Operands.push_back(Ops[Offset + 2]); // vl 1915 } else { 1916 Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm 1917 Operands.push_back(Ops[Offset + 1]); // vl 1918 } 1919 1920 if (IsMasked) 1921 Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); 1922 1923 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 1924 Operands.back()->getType()}; 1925 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 1926 return Builder.CreateCall(F, Operands, ""); 1927 } 1928}] in { 1929 let HasFRMRoundModeOp = 1 in { 1930 // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions 1931 let OverloadedName = "vfcvt_x" in 1932 defm : 1933 RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivvu"]]>; 1934 let OverloadedName = "vfcvt_xu" in 1935 defm : 1936 RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvvu"]]>; 1937 let OverloadedName = "vfcvt_f" in { 1938 defm : 1939 RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvvu"]]>; 1940 defm : 1941 RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUvu"]]>; 1942 } 1943 1944 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1945 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1946 let OverloadedName = "vfwcvt_x" in 1947 defm : 1948 RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwvu"]]>; 1949 let OverloadedName = "vfwcvt_xu" in 1950 defm : 1951 RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwvu"]]>; 1952 } 1953 // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1954 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1955 let OverloadedName = "vfncvt_x" in 1956 defm : 1957 RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>; 1958 let OverloadedName = "vfncvt_xu" in 1959 defm : 1960 RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>; 1961 let OverloadedName = "vfncvt_f" in { 1962 defm : 1963 RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvwu"]]>; 1964 defm : 1965 RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUwu"]]>; 1966 } 1967 let OverloadedName = "vfncvt_f" in { 1968 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vwu"]]>; 1969 let RequiredFeatures = ["Zvfhmin"] in 1970 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>; 1971 } 1972 } 1973 } 1974 1975 // 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions 1976 let OverloadedName = "vfcvt_x" in 1977 defm : 1978 RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivv"]]>; 1979 let OverloadedName = "vfcvt_xu" in 1980 defm : 1981 RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvv"]]>; 1982 let OverloadedName = "vfcvt_f" in { 1983 defm : 1984 RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvv"]]>; 1985 defm : 1986 RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUv"]]>; 1987 } 1988 1989 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions 1990 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 1991 let OverloadedName = "vfwcvt_x" in 1992 defm : 1993 RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwv"]]>; 1994 let OverloadedName = "vfwcvt_xu" in 1995 defm : 1996 RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwv"]]>; 1997 } 1998 // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions 1999 let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { 2000 let OverloadedName = "vfncvt_x" in 2001 defm : 2002 RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>; 2003 let OverloadedName = "vfncvt_xu" in 2004 defm : 2005 RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>; 2006 let OverloadedName = "vfncvt_f" in { 2007 defm : 2008 RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvw"]]>; 2009 defm : 2010 RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUw"]]>; 2011 } 2012 let OverloadedName = "vfncvt_f" in { 2013 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vw"]]>; 2014 let RequiredFeatures = ["Zvfhmin"] in 2015 defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>; 2016 } 2017 } 2018} 2019} 2020 2021// 14. Vector Reduction Operations 2022// 14.1. Vector Single-Width Integer Reduction Instructions 2023let UnMaskedPolicyScheme = HasPassthruOperand, 2024 MaskedPolicyScheme = HasPassthruOperand, 2025 HasMaskPolicy = false in { 2026defm vredsum : RVVIntReductionBuiltinSet; 2027defm vredmaxu : RVVUnsignedReductionBuiltin; 2028defm vredmax : RVVSignedReductionBuiltin; 2029defm vredminu : RVVUnsignedReductionBuiltin; 2030defm vredmin : RVVSignedReductionBuiltin; 2031defm vredand : RVVIntReductionBuiltinSet; 2032defm vredor : RVVIntReductionBuiltinSet; 2033defm vredxor : RVVIntReductionBuiltinSet; 2034 2035// 14.2. Vector Widening Integer Reduction Instructions 2036// Vector Widening Integer Reduction Operations 2037let HasMaskedOffOperand = true in { 2038 defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi", 2039 [["vs", "vSw", "SwvSw"]]>; 2040 defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi", 2041 [["vs", "UvUSw", "USwUvUSw"]]>; 2042} 2043 2044// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2045defm vfredmax : RVVFloatingReductionBuiltin; 2046defm vfredmin : RVVFloatingReductionBuiltin; 2047let ManualCodegen = [{ 2048 { 2049 // LLVM intrinsic 2050 // Unmasked: (passthru, op0, op1, round_mode, vl) 2051 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 2052 2053 SmallVector<llvm::Value*, 7> Operands; 2054 bool HasMaskedOff = !( 2055 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || 2056 (!IsMasked && PolicyAttrs & RVV_VTA)); 2057 bool HasRoundModeOp = IsMasked ? 2058 (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : 2059 (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); 2060 2061 unsigned Offset = IsMasked ? 2062 (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); 2063 2064 if (!HasMaskedOff) 2065 Operands.push_back(llvm::PoisonValue::get(ResultType)); 2066 else 2067 Operands.push_back(Ops[IsMasked ? 1 : 0]); 2068 2069 Operands.push_back(Ops[Offset]); // op0 2070 Operands.push_back(Ops[Offset + 1]); // op1 2071 2072 if (IsMasked) 2073 Operands.push_back(Ops[0]); // mask 2074 2075 if (HasRoundModeOp) { 2076 Operands.push_back(Ops[Offset + 2]); // frm 2077 Operands.push_back(Ops[Offset + 3]); // vl 2078 } else { 2079 Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm 2080 Operands.push_back(Ops[Offset + 2]); // vl 2081 } 2082 2083 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), 2084 Ops.back()->getType()}; 2085 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); 2086 return Builder.CreateCall(F, Operands, ""); 2087 } 2088}] in { 2089 let HasFRMRoundModeOp = 1 in { 2090 // 14.3. Vector Single-Width Floating-Point Reduction Instructions 2091 defm vfredusum : RVVFloatingReductionBuiltinRoundingMode; 2092 defm vfredosum : RVVFloatingReductionBuiltinRoundingMode; 2093 2094 // 14.4. Vector Widening Floating-Point Reduction Instructions 2095 defm vfwredusum : RVVFloatingWidenReductionBuiltinRoundingMode; 2096 defm vfwredosum : RVVFloatingWidenReductionBuiltinRoundingMode; 2097 } 2098 // 14.3. Vector Single-Width Floating-Point Reduction Instructions 2099 defm vfredusum : RVVFloatingReductionBuiltin; 2100 defm vfredosum : RVVFloatingReductionBuiltin; 2101 2102 // 14.4. Vector Widening Floating-Point Reduction Instructions 2103 defm vfwredusum : RVVFloatingWidenReductionBuiltin; 2104 defm vfwredosum : RVVFloatingWidenReductionBuiltin; 2105} 2106} 2107 2108// 15. Vector Mask Instructions 2109// 15.1. Vector Mask-Register Logical Instructions 2110def vmand : RVVMaskBinBuiltin; 2111def vmnand : RVVMaskBinBuiltin; 2112def vmandn : RVVMaskBinBuiltin; 2113def vmxor : RVVMaskBinBuiltin; 2114def vmor : RVVMaskBinBuiltin; 2115def vmnor : RVVMaskBinBuiltin; 2116def vmorn : RVVMaskBinBuiltin; 2117def vmxnor : RVVMaskBinBuiltin; 2118// pseudoinstructions 2119def vmclr : RVVMaskNullaryBuiltin; 2120def vmset : RVVMaskNullaryBuiltin; 2121defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">; 2122defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; 2123 2124let MaskedPolicyScheme = NonePolicy in { 2125// 15.2. Vector count population in mask vcpop.m 2126def vcpop : RVVMaskOp0Builtin<"um">; 2127 2128// 15.3. vfirst find-first-set mask bit 2129def vfirst : RVVMaskOp0Builtin<"lm">; 2130} 2131 2132let MaskedPolicyScheme = HasPassthruOperand, 2133 HasTailPolicy = false in { 2134// 15.4. vmsbf.m set-before-first mask bit 2135def vmsbf : RVVMaskUnaryBuiltin; 2136 2137// 15.5. vmsif.m set-including-first mask bit 2138def vmsif : RVVMaskUnaryBuiltin; 2139 2140// 15.6. vmsof.m set-only-first mask bit 2141def vmsof : RVVMaskUnaryBuiltin; 2142} 2143 2144let UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in { 2145 // 15.8. Vector Iota Instruction 2146 defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>; 2147 2148 // 15.9. Vector Element Index Instruction 2149 defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], 2150 ["v", "Uv", "Uv"]]>; 2151} 2152 2153// 16. Vector Permutation Instructions 2154// 16.1. Integer Scalar Move Instructions 2155let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 2156 let HasVL = false, OverloadedName = "vmv_x" in 2157 defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", 2158 [["s", "ve", "ev"], 2159 ["s", "UvUe", "UeUv"]]>; 2160 let OverloadedName = "vmv_s", 2161 UnMaskedPolicyScheme = HasPassthruOperand, 2162 SupportOverloading = false in 2163 defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil", 2164 [["x", "v", "ve"], 2165 ["x", "Uv", "UvUe"]]>; 2166} 2167 2168// 16.2. Floating-Point Scalar Move Instructions 2169let HasMasked = false, MaskedPolicyScheme = NonePolicy in { 2170 let HasVL = false, OverloadedName = "vfmv_f" in 2171 defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", 2172 [["s", "ve", "ev"]]>; 2173 let OverloadedName = "vfmv_s", 2174 UnMaskedPolicyScheme = HasPassthruOperand, 2175 SupportOverloading = false in 2176 defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd", 2177 [["f", "v", "ve"], 2178 ["x", "Uv", "UvUe"]]>; 2179} 2180 2181// 16.3. Vector Slide Instructions 2182// 16.3.1. Vector Slideup Instructions 2183defm vslideup : RVVSlideUpBuiltinSet; 2184// 16.3.2. Vector Slidedown Instructions 2185defm vslidedown : RVVSlideDownBuiltinSet; 2186 2187// 16.3.3. Vector Slide1up Instructions 2188let UnMaskedPolicyScheme = HasPassthruOperand in { 2189defm vslide1up : RVVSlideOneBuiltinSet; 2190defm vfslide1up : RVVFloatingBinVFBuiltinSet; 2191 2192// 16.3.4. Vector Slide1down Instruction 2193defm vslide1down : RVVSlideOneBuiltinSet; 2194defm vfslide1down : RVVFloatingBinVFBuiltinSet; 2195 2196// 16.4. Vector Register Gather Instructions 2197// signed and floating type 2198defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd", 2199 [["vv", "v", "vvUv"]]>; 2200defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd", 2201 [["vx", "v", "vvz"]]>; 2202defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd", 2203 [["vv", "v", "vv(Log2EEW:4)Uv"]]>; 2204// unsigned type 2205defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil", 2206 [["vv", "Uv", "UvUvUv"]]>; 2207defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil", 2208 [["vx", "Uv", "UvUvz"]]>; 2209defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", 2210 [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; 2211} 2212 2213// 16.5. Vector Compress Instruction 2214let HasMasked = false, 2215 UnMaskedPolicyScheme = HasPassthruOperand, 2216 MaskedPolicyScheme = NonePolicy, 2217 ManualCodegen = [{ 2218 // insert poison passthru 2219 if (PolicyAttrs & RVV_VTA) 2220 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); 2221 IntrinsicTypes = {ResultType, Ops.back()->getType()}; 2222 }] in { 2223 // signed and floating type 2224 defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", 2225 [["vm", "v", "vvm"]]>; 2226 // unsigned type 2227 defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", 2228 [["vm", "Uv", "UvUvm"]]>; 2229} 2230 2231// Miscellaneous 2232let HasMasked = false, HasVL = false, IRName = "" in { 2233 let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy, 2234 ManualCodegen = [{ 2235 if (ResultType->isIntOrIntVectorTy(1) || 2236 Ops[0]->getType()->isIntOrIntVectorTy(1)) { 2237 assert(isa<ScalableVectorType>(ResultType) && 2238 isa<ScalableVectorType>(Ops[0]->getType())); 2239 2240 LLVMContext &Context = CGM.getLLVMContext(); 2241 ScalableVectorType *Boolean64Ty = 2242 ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64); 2243 2244 if (ResultType->isIntOrIntVectorTy(1)) { 2245 // Casting from m1 vector integer -> vector boolean 2246 // Ex: <vscale x 8 x i8> 2247 // --(bitcast)--------> <vscale x 64 x i1> 2248 // --(vector_extract)-> <vscale x 8 x i1> 2249 llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty); 2250 return Builder.CreateExtractVector(ResultType, BitCast, 2251 ConstantInt::get(Int64Ty, 0)); 2252 } else { 2253 // Casting from vector boolean -> m1 vector integer 2254 // Ex: <vscale x 1 x i1> 2255 // --(vector_insert)-> <vscale x 64 x i1> 2256 // --(bitcast)-------> <vscale x 8 x i8> 2257 llvm::Value *Boolean64Val = 2258 Builder.CreateInsertVector(Boolean64Ty, 2259 llvm::PoisonValue::get(Boolean64Ty), 2260 Ops[0], 2261 ConstantInt::get(Int64Ty, 0)); 2262 return Builder.CreateBitCast(Boolean64Val, ResultType); 2263 } 2264 } 2265 return Builder.CreateBitCast(Ops[0], ResultType); 2266 }] in { 2267 // Reinterpret between different type under the same SEW and LMUL 2268 def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">; 2269 def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">; 2270 def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">; 2271 def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">; 2272 def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">; 2273 def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">; 2274 let RequiredFeatures = ["Zvfhmin"] in { 2275 def vreinterpret_i_h : RVVBuiltin<"Fvv", "vFv", "s", "v">; 2276 def vreinterpret_u_h : RVVBuiltin<"FvUv", "UvFv", "s", "Uv">; 2277 def vreinterpret_h_i : RVVBuiltin<"vFv", "Fvv", "s", "Fv">; 2278 def vreinterpret_h_u : RVVBuiltin<"UvFv", "FvUv", "s", "Fv">; 2279 } 2280 2281 // Reinterpret between different SEW under the same LMUL 2282 foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", 2283 "(FixedSEW:64)"] in { 2284 def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", 2285 dst_sew # "vv", "csil", dst_sew # "v">; 2286 def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", 2287 dst_sew # "UvUv", "csil", dst_sew # "Uv">; 2288 } 2289 2290 // Existing users of FixedSEW - the reinterpretation between different SEW 2291 // and same LMUL has the implicit assumption that if FixedSEW is set to the 2292 // given element width, then the type will be identified as invalid, thus 2293 // skipping definition of reinterpret of SEW=8 to SEW=8. However this blocks 2294 // our usage here of defining all possible combinations of a fixed SEW to 2295 // any boolean. So we need to separately define SEW=8 here. 2296 // Reinterpret from LMUL=1 integer type to vector boolean type 2297 def vreintrepret_m1_b8_signed : 2298 RVVBuiltin<"Svm", 2299 "mSv", 2300 "c", "m">; 2301 def vreintrepret_m1_b8_usigned : 2302 RVVBuiltin<"USvm", 2303 "mUSv", 2304 "c", "m">; 2305 2306 // Reinterpret from vector boolean type to LMUL=1 integer type 2307 def vreintrepret_b8_m1_signed : 2308 RVVBuiltin<"mSv", 2309 "Svm", 2310 "c", "Sv">; 2311 def vreintrepret_b8_m1_usigned : 2312 RVVBuiltin<"mUSv", 2313 "USvm", 2314 "c", "USv">; 2315 2316 foreach dst_sew = ["16", "32", "64"] in { 2317 // Reinterpret from LMUL=1 integer type to vector boolean type 2318 def vreinterpret_m1_b # dst_sew # _signed: 2319 RVVBuiltin<"(FixedSEW:" # dst_sew # ")Svm", 2320 "m(FixedSEW:" # dst_sew # ")Sv", 2321 "c", "m">; 2322 def vreinterpret_m1_b # dst_sew # _unsigned: 2323 RVVBuiltin<"(FixedSEW:" # dst_sew # ")USvm", 2324 "m(FixedSEW:" # dst_sew # ")USv", 2325 "c", "m">; 2326 // Reinterpret from vector boolean type to LMUL=1 integer type 2327 def vreinterpret_b # dst_sew # _m1_signed: 2328 RVVBuiltin<"m(FixedSEW:" # dst_sew # ")Sv", 2329 "(FixedSEW:" # dst_sew # ")Svm", 2330 "c", "(FixedSEW:" # dst_sew # ")Sv">; 2331 def vreinterpret_b # dst_sew # _m1_unsigned: 2332 RVVBuiltin<"m(FixedSEW:" # dst_sew # ")USv", 2333 "(FixedSEW:" # dst_sew # ")USvm", 2334 "c", "(FixedSEW:" # dst_sew # ")USv">; 2335 } 2336 } 2337 2338 let Name = "vundefined", SupportOverloading = false, 2339 MaskedPolicyScheme = NonePolicy, 2340 ManualCodegen = [{ 2341 return llvm::PoisonValue::get(ResultType); 2342 }] in { 2343 def vundefined : RVVBuiltin<"v", "v", "csilxfd">; 2344 def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">; 2345 2346 foreach nf = NFList in { 2347 let NF = nf in { 2348 defvar T = "(Tuple:" # nf # ")"; 2349 def : RVVBuiltin<T # "v", T # "v", "csilxfd">; 2350 def : RVVBuiltin<T # "Uv", T # "Uv", "csil">; 2351 } 2352 } 2353 2354 } 2355 2356 // LMUL truncation 2357 // C/C++ Operand: VecTy, IR Operand: VecTy, Index 2358 let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", 2359 MaskedPolicyScheme = NonePolicy, 2360 ManualCodegen = [{ { 2361 return Builder.CreateExtractVector(ResultType, Ops[0], 2362 ConstantInt::get(Int64Ty, 0)); 2363 } }] in { 2364 foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)", 2365 "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { 2366 def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2367 dst_lmul # "vv", "csilxfd", dst_lmul # "v">; 2368 def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", 2369 dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; 2370 } 2371 } 2372 2373 // LMUL extension 2374 // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index 2375 let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", 2376 MaskedPolicyScheme = NonePolicy, 2377 ManualCodegen = [{ 2378 return Builder.CreateInsertVector(ResultType, 2379 llvm::PoisonValue::get(ResultType), 2380 Ops[0], ConstantInt::get(Int64Ty, 0)); 2381 }] in { 2382 foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)", 2383 "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { 2384 def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", 2385 dst_lmul # "vv", "csilxfd", dst_lmul # "v">; 2386 def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", 2387 dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; 2388 } 2389 } 2390 2391 let Name = "vget_v", MaskedPolicyScheme = NonePolicy, 2392 ManualCodegen = [{ 2393 { 2394 if (isa<StructType>(Ops[0]->getType())) // For tuple type 2395 // Extract value from index (operand 1) of vtuple (operand 0) 2396 return Builder.CreateExtractValue( 2397 Ops[0], 2398 {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()}); 2399 auto *VecTy = cast<ScalableVectorType>(ResultType); 2400 auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType()); 2401 // Mask to only valid indices. 2402 unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements(); 2403 assert(isPowerOf2_32(MaxIndex)); 2404 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty()); 2405 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1); 2406 Ops[1] = Builder.CreateMul(Ops[1], 2407 ConstantInt::get(Ops[1]->getType(), 2408 VecTy->getMinNumElements())); 2409 return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]); 2410 } 2411 }] in { 2412 foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { 2413 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilxfd", dst_lmul # "v">; 2414 def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">; 2415 } 2416 foreach nf = NFList in { 2417 defvar T = "(Tuple:" # nf # ")"; 2418 def : RVVBuiltin<T # "vv", "v" # T # "vKz", "csilxfd", "v">; 2419 def : RVVBuiltin<T # "UvUv", "Uv" # T # "UvKz", "csil", "Uv">; 2420 } 2421 } 2422 2423 let Name = "vset_v", MaskedPolicyScheme = NonePolicy, 2424 ManualCodegen = [{ 2425 { 2426 if (isa<StructType>(ResultType)) // For tuple type 2427 // Insert value (operand 2) into index (operand 1) of vtuple (operand 0) 2428 return Builder.CreateInsertValue( 2429 Ops[0], Ops[2], 2430 {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()}); 2431 auto *ResVecTy = cast<ScalableVectorType>(ResultType); 2432 auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType()); 2433 // Mask to only valid indices. 2434 unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements(); 2435 assert(isPowerOf2_32(MaxIndex)); 2436 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty()); 2437 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1); 2438 Ops[1] = Builder.CreateMul(Ops[1], 2439 ConstantInt::get(Ops[1]->getType(), 2440 VecTy->getMinNumElements())); 2441 return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]); 2442 } 2443 }] in { 2444 foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { 2445 def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfd">; 2446 def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">; 2447 } 2448 foreach nf = NFList in { 2449 defvar T = "(Tuple:" # nf # ")"; 2450 def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "csilxfd">; 2451 def : RVVBuiltin<"Uv" # T # "Uv", T # "Uv" # T # "UvKzUv", "csil">; 2452 } 2453 } 2454 2455 let Name = "vcreate_v", 2456 UnMaskedPolicyScheme = NonePolicy, 2457 MaskedPolicyScheme = NonePolicy, 2458 SupportOverloading = false, 2459 ManualCodegen = [{ 2460 { 2461 if (isa<StructType>(ResultType)) { 2462 unsigned NF = cast<StructType>(ResultType)->getNumElements(); 2463 llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType); 2464 for (unsigned I = 0; I < NF; ++I) { 2465 ReturnTuple = Builder.CreateInsertValue(ReturnTuple, Ops[I], {I}); 2466 } 2467 return ReturnTuple; 2468 } 2469 llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType); 2470 auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType()); 2471 for (unsigned I = 0, N = Ops.size(); I < N; ++I) { 2472 llvm::Value *Idx = 2473 ConstantInt::get(Builder.getInt64Ty(), 2474 VecTy->getMinNumElements() * I); 2475 ReturnVector = 2476 Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx); 2477 } 2478 return ReturnVector; 2479 } 2480 }] in { 2481 2482 defm : RVVNonTupleVCreateBuiltin<1, [0]>; 2483 defm : RVVNonTupleVCreateBuiltin<2, [0, 1]>; 2484 defm : RVVNonTupleVCreateBuiltin<3, [0, 1, 2]>; 2485 2486 foreach nf = NFList in { 2487 let NF = nf in { 2488 defvar T = "(Tuple:" # nf # ")"; 2489 defvar V = VString<nf, /*signed=*/true>.S; 2490 defvar UV = VString<nf, /*signed=*/false>.S; 2491 def : RVVBuiltin<T # "v", T # "v" # V, "csilxfd">; 2492 def : RVVBuiltin<T # "Uv", T # "Uv" # UV, "csil">; 2493 } 2494 } 2495 } 2496} 2497 2498multiclass RVVOutBuiltinSetZvbb { 2499 let OverloadedName = NAME in 2500 defm "" : RVVOutBuiltinSet<NAME, "csil", [["v", "v", "vv"], 2501 ["v", "Uv", "UvUv"]]>; 2502} 2503 2504multiclass RVVOutBuiltinSetZvk<bit HasVV = 1, bit HasVS = 1> { 2505 // vaesz only has 'vs' and vgmul only has 'vv' and they do not have ambiguous 2506 // prototypes like other zvkned instructions (e.g. vaesdf), so we don't 2507 // need to encode the operand mnemonics into its intrinsic function name. 2508 if HasVV then { 2509 defvar name = NAME # !if(!eq(NAME, "vgmul"), "", "_vv"); 2510 let OverloadedName = name in 2511 defm "" : RVVOutBuiltinSet<NAME # "_vv", "i", 2512 [["vv", "Uv", "UvUvUv"]]>; 2513 } 2514 2515 if HasVS then { 2516 foreach vs2_lmul = ["(SEFixedLog2LMUL:-1)", "(SEFixedLog2LMUL:0)", 2517 "(SEFixedLog2LMUL:1)", "(SEFixedLog2LMUL:2)", 2518 "(SEFixedLog2LMUL:3)"] in { 2519 defvar name = NAME # !if(!eq(NAME, "vaesz"), "", "_vs"); 2520 let OverloadedName = name, IRName = NAME # "_vs", Name = NAME # "_vs", 2521 IntrinsicTypes = [-1, 1] in 2522 def NAME # vs2_lmul 2523 : RVVBuiltin<vs2_lmul # "UvUv", "UvUv" # vs2_lmul # "Uv", "i">; 2524 } 2525 } 2526} 2527 2528multiclass RVVOutOp2BuiltinSetVVZvk<string type_range = "i"> 2529 : RVVOutOp2BuiltinSet<NAME, type_range, [["vv", "Uv", "UvUvUvUv"]]>; 2530 2531multiclass RVVOutOp2BuiltinSetVIZvk<string type_range = "i"> 2532 : RVVOutOp2BuiltinSet<NAME, type_range, [["vi", "Uv", "UvUvUvKz"]]>; 2533 2534multiclass RVVSignedWidenBinBuiltinSetVwsll 2535 : RVVWidenBuiltinSet<NAME, "csi", 2536 [["vv", "Uw", "UwUvUv"], 2537 ["vx", "Uw", "UwUvz"]]>; 2538 2539let UnMaskedPolicyScheme = HasPassthruOperand in { 2540 // zvkb 2541 let RequiredFeatures = ["Zvkb", "Experimental"] in { 2542 defm vandn : RVVUnsignedBinBuiltinSet; 2543 defm vbrev8 : RVVOutBuiltinSetZvbb; 2544 defm vrev8 : RVVOutBuiltinSetZvbb; 2545 defm vrol : RVVUnsignedShiftBuiltinSet; 2546 defm vror : RVVUnsignedShiftBuiltinSet; 2547 } 2548 2549 // zvbb 2550 let RequiredFeatures = ["Zvbb", "Experimental"] in { 2551 defm vbrev : RVVOutBuiltinSetZvbb; 2552 defm vclz : RVVOutBuiltinSetZvbb; 2553 defm vctz : RVVOutBuiltinSetZvbb; 2554 defm vcpopv : RVVOutBuiltinSetZvbb; 2555 let OverloadedName = "vwsll" in 2556 defm vwsll : RVVSignedWidenBinBuiltinSetVwsll; 2557 } 2558 2559 // zvbc 2560 let RequiredFeatures = ["Zvbc", "Experimental"] in { 2561 defm vclmul : RVVInt64BinBuiltinSet; 2562 defm vclmulh : RVVInt64BinBuiltinSet; 2563 } 2564} 2565 2566let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in { 2567 // zvkg 2568 let RequiredFeatures = ["Zvkg", "Experimental"] in { 2569 defm vghsh : RVVOutOp2BuiltinSetVVZvk; 2570 defm vgmul : RVVOutBuiltinSetZvk<HasVV=1, HasVS=0>; 2571 } 2572 2573 // zvkned 2574 let RequiredFeatures = ["Zvkned", "Experimental"] in { 2575 defm vaesdf : RVVOutBuiltinSetZvk; 2576 defm vaesdm : RVVOutBuiltinSetZvk; 2577 defm vaesef : RVVOutBuiltinSetZvk; 2578 defm vaesem : RVVOutBuiltinSetZvk; 2579 let UnMaskedPolicyScheme = HasPassthruOperand in 2580 defm vaeskf1 : RVVOutOp1BuiltinSet<"vaeskf1", "i", [["vi", "Uv", "UvUvKz"]]>; 2581 defm vaeskf2 : RVVOutOp2BuiltinSetVIZvk; 2582 defm vaesz : RVVOutBuiltinSetZvk<HasVV=0>; 2583 } 2584 2585 // zvknha 2586 let RequiredFeatures = ["Zvknha", "Experimental"] in { 2587 defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"i">; 2588 defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"i">; 2589 defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"i">; 2590 } 2591 2592 // zvknhb 2593 let RequiredFeatures = ["Zvknhb", "Experimental"] in { 2594 defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"il">; 2595 defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"il">; 2596 defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"il">; 2597 } 2598 2599 // zvksed 2600 let RequiredFeatures = ["Zvksed", "Experimental"] in { 2601 let UnMaskedPolicyScheme = HasPassthruOperand in 2602 defm vsm4k : RVVOutOp1BuiltinSet<"vsm4k", "i", [["vi", "Uv", "UvUvKz"]]>; 2603 defm vsm4r : RVVOutBuiltinSetZvk; 2604 } 2605 2606 // zvksh 2607 let RequiredFeatures = ["Zvksh", "Experimental"] in { 2608 defm vsm3c : RVVOutOp2BuiltinSetVIZvk; 2609 let UnMaskedPolicyScheme = HasPassthruOperand in 2610 defm vsm3me : RVVOutOp1BuiltinSet<"vsm3me", "i", [["vv", "Uv", "UvUvUv"]]>; 2611 } 2612} 2613