1//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This describes the calling conventions for the X86-32 and X86-64 11// architectures. 12// 13//===----------------------------------------------------------------------===// 14 15/// CCIfSubtarget - Match if the current subtarget has a feature F. 16class CCIfSubtarget<string F, CCAction A> 17 : CCIf<!strconcat("State.getTarget().getSubtarget<X86Subtarget>().", F), A>; 18 19//===----------------------------------------------------------------------===// 20// Return Value Calling Conventions 21//===----------------------------------------------------------------------===// 22 23// Return-value conventions common to all X86 CC's. 24def RetCC_X86Common : CallingConv<[ 25 // Scalar values are returned in AX first, then DX. For i8, the ABI 26 // requires the values to be in AL and AH, however this code uses AL and DL 27 // instead. This is because using AH for the second register conflicts with 28 // the way LLVM does multiple return values -- a return of {i16,i8} would end 29 // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI 30 // for functions that return two i8 values are currently expected to pack the 31 // values into an i16 (which uses AX, and thus AL:AH). 32 // 33 // For code that doesn't care about the ABI, we allow returning more than two 34 // integer values in registers. 35 CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>, 36 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>, 37 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>, 38 CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>, 39 40 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 41 // can only be used by ABI non-compliant code. If the target doesn't have XMM 42 // registers, it won't have vector types. 43 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 44 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 45 46 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 47 // can only be used by ABI non-compliant code. This vector type is only 48 // supported while using the AVX target feature. 49 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 50 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, 51 52 // MMX vector types are always returned in MM0. If the target doesn't have 53 // MM0, it doesn't support these vector types. 54 CCIfType<[x86mmx], CCAssignToReg<[MM0]>>, 55 56 // Long double types are always returned in ST0 (even with SSE). 57 CCIfType<[f80], CCAssignToReg<[ST0, ST1]>> 58]>; 59 60// X86-32 C return-value convention. 61def RetCC_X86_32_C : CallingConv<[ 62 // The X86-32 calling convention returns FP values in ST0, unless marked 63 // with "inreg" (used here to distinguish one kind of reg from another, 64 // weirdly; this is really the sse-regparm calling convention) in which 65 // case they use XMM0, otherwise it is the same as the common X86 calling 66 // conv. 67 CCIfInReg<CCIfSubtarget<"hasSSE2()", 68 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 69 CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>, 70 CCDelegateTo<RetCC_X86Common> 71]>; 72 73// X86-32 FastCC return-value convention. 74def RetCC_X86_32_Fast : CallingConv<[ 75 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 76 // SSE2. 77 // This can happen when a float, 2 x float, or 3 x float vector is split by 78 // target lowering, and is returned in 1-3 sse regs. 79 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 80 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 81 82 // For integers, ECX can be used as an extra return register 83 CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>, 84 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>, 85 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>, 86 87 // Otherwise, it is the same as the common X86 calling convention. 88 CCDelegateTo<RetCC_X86Common> 89]>; 90 91// Intel_OCL_BI return-value convention. 92def RetCC_Intel_OCL_BI : CallingConv<[ 93 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. 94 CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64], 95 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 96 97 // 256-bit FP vectors 98 // No more than 4 registers 99 CCIfType<[v8f32, v4f64, v8i32, v4i64], 100 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, 101 102 // i32, i64 in the standard way 103 CCDelegateTo<RetCC_X86Common> 104]>; 105 106// X86-64 C return-value convention. 107def RetCC_X86_64_C : CallingConv<[ 108 // The X86-64 calling convention always returns FP values in XMM0. 109 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>, 110 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>, 111 112 // MMX vector types are always returned in XMM0. 113 CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>, 114 CCDelegateTo<RetCC_X86Common> 115]>; 116 117// X86-Win64 C return-value convention. 118def RetCC_X86_Win64_C : CallingConv<[ 119 // The X86-Win64 calling convention always returns __m64 values in RAX. 120 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 121 122 // Otherwise, everything is the same as 'normal' X86-64 C CC. 123 CCDelegateTo<RetCC_X86_64_C> 124]>; 125 126 127// This is the root return-value convention for the X86-32 backend. 128def RetCC_X86_32 : CallingConv<[ 129 // If FastCC, use RetCC_X86_32_Fast. 130 CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>, 131 // Otherwise, use RetCC_X86_32_C. 132 CCDelegateTo<RetCC_X86_32_C> 133]>; 134 135// This is the root return-value convention for the X86-64 backend. 136def RetCC_X86_64 : CallingConv<[ 137 // Mingw64 and native Win64 use Win64 CC 138 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>, 139 140 // Otherwise, drop to normal X86-64 CC 141 CCDelegateTo<RetCC_X86_64_C> 142]>; 143 144// This is the return-value convention used for the entire X86 backend. 145def RetCC_X86 : CallingConv<[ 146 147 // Check if this is the Intel OpenCL built-ins calling convention 148 CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>, 149 150 CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>, 151 CCDelegateTo<RetCC_X86_32> 152]>; 153 154//===----------------------------------------------------------------------===// 155// X86-64 Argument Calling Conventions 156//===----------------------------------------------------------------------===// 157 158def CC_X86_64_C : CallingConv<[ 159 // Handles byval parameters. 160 CCIfByVal<CCPassByVal<8, 8>>, 161 162 // Promote i8/i16 arguments to i32. 163 CCIfType<[i8, i16], CCPromoteToType<i32>>, 164 165 // The 'nest' parameter, if any, is passed in R10. 166 CCIfNest<CCAssignToReg<[R10]>>, 167 168 // The first 6 integer arguments are passed in integer registers. 169 CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>, 170 CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>, 171 172 // The first 8 MMX vector arguments are passed in XMM registers on Darwin. 173 CCIfType<[x86mmx], 174 CCIfSubtarget<"isTargetDarwin()", 175 CCIfSubtarget<"hasSSE2()", 176 CCPromoteToType<v2i64>>>>, 177 178 // The first 8 FP/Vector arguments are passed in XMM registers. 179 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 180 CCIfSubtarget<"hasSSE1()", 181 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>, 182 183 // The first 8 256-bit vector arguments are passed in YMM registers, unless 184 // this is a vararg function. 185 // FIXME: This isn't precisely correct; the x86-64 ABI document says that 186 // fixed arguments to vararg functions are supposed to be passed in 187 // registers. Actually modeling that would be a lot of work, though. 188 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 189 CCIfSubtarget<"hasAVX()", 190 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, 191 YMM4, YMM5, YMM6, YMM7]>>>>, 192 193 // Integer/FP values get stored in stack slots that are 8 bytes in size and 194 // 8-byte aligned if there are no more registers to hold them. 195 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, 196 197 // Long doubles get stack slots whose size and alignment depends on the 198 // subtarget. 199 CCIfType<[f80], CCAssignToStack<0, 0>>, 200 201 // Vectors get 16-byte stack slots that are 16-byte aligned. 202 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 203 204 // 256-bit vectors get 32-byte stack slots that are 32-byte aligned. 205 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 206 CCAssignToStack<32, 32>> 207]>; 208 209// Calling convention used on Win64 210def CC_X86_Win64_C : CallingConv<[ 211 // FIXME: Handle byval stuff. 212 // FIXME: Handle varargs. 213 214 // Promote i8/i16 arguments to i32. 215 CCIfType<[i8, i16], CCPromoteToType<i32>>, 216 217 // The 'nest' parameter, if any, is passed in R10. 218 CCIfNest<CCAssignToReg<[R10]>>, 219 220 // 128 bit vectors are passed by pointer 221 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 222 223 224 // 256 bit vectors are passed by pointer 225 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>, 226 227 // The first 4 MMX vector arguments are passed in GPRs. 228 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 229 230 // The first 4 integer arguments are passed in integer registers. 231 CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ], 232 [XMM0, XMM1, XMM2, XMM3]>>, 233 234 // Do not pass the sret argument in RCX, the Win64 thiscall calling 235 // convention requires "this" to be passed in RCX. 236 CCIfCC<"CallingConv::X86_ThisCall", 237 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8 , R9 ], 238 [XMM1, XMM2, XMM3]>>>>, 239 240 CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8 , R9 ], 241 [XMM0, XMM1, XMM2, XMM3]>>, 242 243 // The first 4 FP/Vector arguments are passed in XMM registers. 244 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 245 CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3], 246 [RCX , RDX , R8 , R9 ]>>, 247 248 // Integer/FP values get stored in stack slots that are 8 bytes in size and 249 // 8-byte aligned if there are no more registers to hold them. 250 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, 251 252 // Long doubles get stack slots whose size and alignment depends on the 253 // subtarget. 254 CCIfType<[f80], CCAssignToStack<0, 0>> 255]>; 256 257// X86-64 Intel OpenCL built-ins calling convention. 258def CC_Intel_OCL_BI : CallingConv<[ 259 CCIfType<[i32], CCIfSubtarget<"isTargetWin32()", CCAssignToStack<4, 4>>>, 260 261 CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>, 262 CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8, R9 ]>>>, 263 264 CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX]>>, 265 CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX]>>, 266 267 // The SSE vector arguments are passed in XMM registers. 268 CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64], 269 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>, 270 271 // The 256-bit vector arguments are passed in YMM registers. 272 CCIfType<[v8f32, v4f64, v8i32, v4i64], 273 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>, 274 275 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, 276 CCDelegateTo<CC_X86_64_C> 277]>; 278 279 280def CC_X86_64_GHC : CallingConv<[ 281 // Promote i8/i16/i32 arguments to i64. 282 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 283 284 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 285 CCIfType<[i64], 286 CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>, 287 288 // Pass in STG registers: F1, F2, F3, F4, D1, D2 289 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 290 CCIfSubtarget<"hasSSE1()", 291 CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>> 292]>; 293 294//===----------------------------------------------------------------------===// 295// X86 C Calling Convention 296//===----------------------------------------------------------------------===// 297 298/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP 299/// values are spilled on the stack, and the first 4 vector values go in XMM 300/// regs. 301def CC_X86_32_Common : CallingConv<[ 302 // Handles byval parameters. 303 CCIfByVal<CCPassByVal<4, 4>>, 304 305 // The first 3 float or double arguments, if marked 'inreg' and if the call 306 // is not a vararg call and if SSE2 is available, are passed in SSE registers. 307 CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64], 308 CCIfSubtarget<"hasSSE2()", 309 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>, 310 311 // The first 3 __m64 vector arguments are passed in mmx registers if the 312 // call is not a vararg call. 313 CCIfNotVarArg<CCIfType<[x86mmx], 314 CCAssignToReg<[MM0, MM1, MM2]>>>, 315 316 // Integer/Float values get stored in stack slots that are 4 bytes in 317 // size and 4-byte aligned. 318 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 319 320 // Doubles get 8-byte slots that are 4-byte aligned. 321 CCIfType<[f64], CCAssignToStack<8, 4>>, 322 323 // Long doubles get slots whose size depends on the subtarget. 324 CCIfType<[f80], CCAssignToStack<0, 4>>, 325 326 // The first 4 SSE vector arguments are passed in XMM registers. 327 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 328 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>, 329 330 // The first 4 AVX 256-bit vector arguments are passed in YMM registers. 331 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 332 CCIfSubtarget<"hasAVX()", 333 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>, 334 335 // Other SSE vectors get 16-byte stack slots that are 16-byte aligned. 336 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 337 338 // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned. 339 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 340 CCAssignToStack<32, 32>>, 341 342 // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are 343 // passed in the parameter area. 344 CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>; 345 346def CC_X86_32_C : CallingConv<[ 347 // Promote i8/i16 arguments to i32. 348 CCIfType<[i8, i16], CCPromoteToType<i32>>, 349 350 // The 'nest' parameter, if any, is passed in ECX. 351 CCIfNest<CCAssignToReg<[ECX]>>, 352 353 // The first 3 integer arguments, if marked 'inreg' and if the call is not 354 // a vararg call, are passed in integer registers. 355 CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>, 356 357 // Otherwise, same as everything else. 358 CCDelegateTo<CC_X86_32_Common> 359]>; 360 361def CC_X86_32_FastCall : CallingConv<[ 362 // Promote i8/i16 arguments to i32. 363 CCIfType<[i8, i16], CCPromoteToType<i32>>, 364 365 // The 'nest' parameter, if any, is passed in EAX. 366 CCIfNest<CCAssignToReg<[EAX]>>, 367 368 // The first 2 integer arguments are passed in ECX/EDX 369 CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>, 370 371 // Otherwise, same as everything else. 372 CCDelegateTo<CC_X86_32_Common> 373]>; 374 375def CC_X86_32_ThisCall : CallingConv<[ 376 // Promote i8/i16 arguments to i32. 377 CCIfType<[i8, i16], CCPromoteToType<i32>>, 378 379 // Pass sret arguments indirectly through EAX 380 CCIfSRet<CCAssignToReg<[EAX]>>, 381 382 // The first integer argument is passed in ECX 383 CCIfType<[i32], CCAssignToReg<[ECX]>>, 384 385 // Otherwise, same as everything else. 386 CCDelegateTo<CC_X86_32_Common> 387]>; 388 389def CC_X86_32_FastCC : CallingConv<[ 390 // Handles byval parameters. Note that we can't rely on the delegation 391 // to CC_X86_32_Common for this because that happens after code that 392 // puts arguments in registers. 393 CCIfByVal<CCPassByVal<4, 4>>, 394 395 // Promote i8/i16 arguments to i32. 396 CCIfType<[i8, i16], CCPromoteToType<i32>>, 397 398 // The 'nest' parameter, if any, is passed in EAX. 399 CCIfNest<CCAssignToReg<[EAX]>>, 400 401 // The first 2 integer arguments are passed in ECX/EDX 402 CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>, 403 404 // The first 3 float or double arguments, if the call is not a vararg 405 // call and if SSE2 is available, are passed in SSE registers. 406 CCIfNotVarArg<CCIfType<[f32,f64], 407 CCIfSubtarget<"hasSSE2()", 408 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 409 410 // Doubles get 8-byte slots that are 8-byte aligned. 411 CCIfType<[f64], CCAssignToStack<8, 8>>, 412 413 // Otherwise, same as everything else. 414 CCDelegateTo<CC_X86_32_Common> 415]>; 416 417def CC_X86_32_GHC : CallingConv<[ 418 // Promote i8/i16 arguments to i32. 419 CCIfType<[i8, i16], CCPromoteToType<i32>>, 420 421 // Pass in STG registers: Base, Sp, Hp, R1 422 CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>> 423]>; 424 425//===----------------------------------------------------------------------===// 426// X86 Root Argument Calling Conventions 427//===----------------------------------------------------------------------===// 428 429// This is the root argument convention for the X86-32 backend. 430def CC_X86_32 : CallingConv<[ 431 CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>, 432 CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>, 433 CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>, 434 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>, 435 436 // Otherwise, drop to normal X86-32 CC 437 CCDelegateTo<CC_X86_32_C> 438]>; 439 440// This is the root argument convention for the X86-64 backend. 441def CC_X86_64 : CallingConv<[ 442 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>, 443 444 // Mingw64 and native Win64 use Win64 CC 445 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, 446 447 // Otherwise, drop to normal X86-64 CC 448 CCDelegateTo<CC_X86_64_C> 449]>; 450 451// This is the argument convention used for the entire X86 backend. 452def CC_X86 : CallingConv<[ 453 CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>, 454 CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>, 455 CCDelegateTo<CC_X86_32> 456]>; 457 458//===----------------------------------------------------------------------===// 459// Callee-saved Registers. 460//===----------------------------------------------------------------------===// 461 462def CSR_NoRegs : CalleeSavedRegs<(add)>; 463 464def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>; 465def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>; 466 467def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>; 468def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>; 469 470def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15, 471 (sequence "XMM%u", 6, 15))>; 472 473 474// Standard C + YMM6-15 475def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, 476 R13, R14, R15, 477 (sequence "YMM%u", 6, 15))>; 478 479//Standard C + XMM 8-15 480def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64, 481 (sequence "XMM%u", 8, 15))>; 482 483//Standard C + YMM 8-15 484def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64, 485 (sequence "YMM%u", 8, 15))>; 486