X86InstrCompiler.td revision 234353
187866Ssheldonh//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// 287866Ssheldonh// 387866Ssheldonh// The LLVM Compiler Infrastructure 487866Ssheldonh// 587866Ssheldonh// This file is distributed under the University of Illinois Open Source 687866Ssheldonh// License. See LICENSE.TXT for details. 787866Ssheldonh// 887866Ssheldonh//===----------------------------------------------------------------------===// 987866Ssheldonh// 1087866Ssheldonh// This file describes the various pseudo instructions used by the compiler, 1187866Ssheldonh// as well as Pat patterns used during instruction selection. 1287866Ssheldonh// 1387866Ssheldonh//===----------------------------------------------------------------------===// 1487866Ssheldonh 1587866Ssheldonh//===----------------------------------------------------------------------===// 1687866Ssheldonh// Pattern Matching Support 1787866Ssheldonh 1887866Ssheldonhdef GetLo32XForm : SDNodeXForm<imm, [{ 1987866Ssheldonh // Transformation function: get the low 32 bits. 2087866Ssheldonh return getI32Imm((unsigned)N->getZExtValue()); 2187866Ssheldonh}]>; 2287866Ssheldonh 2387866Ssheldonhdef GetLo8XForm : SDNodeXForm<imm, [{ 2487866Ssheldonh // Transformation function: get the low 8 bits. 2587866Ssheldonh return getI8Imm((uint8_t)N->getZExtValue()); 2687866Ssheldonh}]>; 2787866Ssheldonh 2887866Ssheldonh 2987866Ssheldonh//===----------------------------------------------------------------------===// 3087866Ssheldonh// Random Pseudo Instructions. 3187866Ssheldonh 3287866Ssheldonh// PIC base construction. This expands to code that looks like this: 3387866Ssheldonh// call $next_inst 3487866Ssheldonh// popl %destreg" 3587866Ssheldonhlet neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in 3687866Ssheldonh def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), 3787866Ssheldonh "", []>; 38 39 40// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into 41// a stack adjustment and the codegen must know that they may modify the stack 42// pointer before prolog-epilog rewriting occurs. 43// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 44// sub / add which can clobber EFLAGS. 45let Defs = [ESP, EFLAGS], Uses = [ESP] in { 46def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt), 47 "#ADJCALLSTACKDOWN", 48 [(X86callseq_start timm:$amt)]>, 49 Requires<[In32BitMode]>; 50def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 51 "#ADJCALLSTACKUP", 52 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 53 Requires<[In32BitMode]>; 54} 55 56// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into 57// a stack adjustment and the codegen must know that they may modify the stack 58// pointer before prolog-epilog rewriting occurs. 59// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 60// sub / add which can clobber EFLAGS. 61let Defs = [RSP, EFLAGS], Uses = [RSP] in { 62def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt), 63 "#ADJCALLSTACKDOWN", 64 [(X86callseq_start timm:$amt)]>, 65 Requires<[In64BitMode]>; 66def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 67 "#ADJCALLSTACKUP", 68 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 69 Requires<[In64BitMode]>; 70} 71 72 73 74// x86-64 va_start lowering magic. 75let usesCustomInserter = 1 in { 76def VASTART_SAVE_XMM_REGS : I<0, Pseudo, 77 (outs), 78 (ins GR8:$al, 79 i64imm:$regsavefi, i64imm:$offset, 80 variable_ops), 81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", 82 [(X86vastart_save_xmm_regs GR8:$al, 83 imm:$regsavefi, 84 imm:$offset)]>; 85 86// The VAARG_64 pseudo-instruction takes the address of the va_list, 87// and places the address of the next argument into a register. 88let Defs = [EFLAGS] in 89def VAARG_64 : I<0, Pseudo, 90 (outs GR64:$dst), 91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 92 "#VAARG_64 $dst, $ap, $size, $mode, $align", 93 [(set GR64:$dst, 94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)), 95 (implicit EFLAGS)]>; 96 97// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows 98// targets. These calls are needed to probe the stack when allocating more than 99// 4k bytes in one go. Touching the stack at 4K increments is necessary to 100// ensure that the guard pages used by the OS virtual memory manager are 101// allocated in correct sequence. 102// The main point of having separate instruction are extra unmodelled effects 103// (compared to ordinary calls) like stack pointer change. 104 105let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins), 107 "# dynamic stack allocation", 108 [(X86WinAlloca)]>; 109 110// When using segmented stacks these are lowered into instructions which first 111// check if the current stacklet has enough free memory. If it does, memory is 112// allocated by bumping the stack pointer. Otherwise memory is allocated from 113// the heap. 114 115let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 116def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 117 "# variable sized alloca for segmented stacks", 118 [(set GR32:$dst, 119 (X86SegAlloca GR32:$size))]>, 120 Requires<[In32BitMode]>; 121 122let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 123def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 124 "# variable sized alloca for segmented stacks", 125 [(set GR64:$dst, 126 (X86SegAlloca GR64:$size))]>, 127 Requires<[In64BitMode]>; 128} 129 130// The MSVC runtime contains an _ftol2 routine for converting floating-point 131// to integer values. It has a strange calling convention: the input is 132// popped from the x87 stack, and the return value is given in EDX:EAX. No 133// other registers (aside from flags) are touched. 134// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80 135// variant is unnecessary. 136 137let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in { 138 def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src), 139 "# win32 fptoui", 140 [(X86WinFTOL RFP32:$src)]>, 141 Requires<[In32BitMode]>; 142 143 def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src), 144 "# win32 fptoui", 145 [(X86WinFTOL RFP64:$src)]>, 146 Requires<[In32BitMode]>; 147} 148 149//===----------------------------------------------------------------------===// 150// EH Pseudo Instructions 151// 152let isTerminator = 1, isReturn = 1, isBarrier = 1, 153 hasCtrlDep = 1, isCodeGenOnly = 1 in { 154def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), 155 "ret\t#eh_return, addr: $addr", 156 [(X86ehret GR32:$addr)], IIC_RET>; 157 158} 159 160let isTerminator = 1, isReturn = 1, isBarrier = 1, 161 hasCtrlDep = 1, isCodeGenOnly = 1 in { 162def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), 163 "ret\t#eh_return, addr: $addr", 164 [(X86ehret GR64:$addr)], IIC_RET>; 165 166} 167 168//===----------------------------------------------------------------------===// 169// Pseudo instructions used by segmented stacks. 170// 171 172// This is lowered into a RET instruction by MCInstLower. We need 173// this so that we don't have to have a MachineBasicBlock which ends 174// with a RET and also has successors. 175let isPseudo = 1 in { 176def MORESTACK_RET: I<0, Pseudo, (outs), (ins), 177 "", []>; 178 179// This instruction is lowered to a RET followed by a MOV. The two 180// instructions are not generated on a higher level since then the 181// verifier sees a MachineBasicBlock ending with a non-terminator. 182def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), 183 "", []>; 184} 185 186//===----------------------------------------------------------------------===// 187// Alias Instructions 188//===----------------------------------------------------------------------===// 189 190// Alias instructions that map movr0 to xor. 191// FIXME: remove when we can teach regalloc that xor reg, reg is ok. 192// FIXME: Set encoding to pseudo. 193let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, 194 isCodeGenOnly = 1 in { 195def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "", 196 [(set GR8:$dst, 0)], IIC_ALU_NONMEM>; 197 198// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller 199// encoding and avoids a partial-register update sometimes, but doing so 200// at isel time interferes with rematerialization in the current register 201// allocator. For now, this is rewritten when the instruction is lowered 202// to an MCInst. 203def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), 204 "", 205 [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize; 206 207// FIXME: Set encoding to pseudo. 208def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "", 209 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>; 210} 211 212// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a 213// smaller encoding, but doing so at isel time interferes with rematerialization 214// in the current register allocator. For now, this is rewritten when the 215// instruction is lowered to an MCInst. 216// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove 217// when we have a better way to specify isel priority. 218let Defs = [EFLAGS], isCodeGenOnly=1, 219 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in 220def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "", 221 [(set GR64:$dst, 0)], IIC_ALU_NONMEM>; 222 223// Materialize i64 constant where top 32-bits are zero. This could theoretically 224// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however 225// that would make it more difficult to rematerialize. 226let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, 227 isCodeGenOnly = 1 in 228def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), 229 "", [(set GR64:$dst, i64immZExt32:$src)], 230 IIC_ALU_NONMEM>; 231 232// Use sbb to materialize carry bit. 233let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in { 234// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. 235// However, Pat<> can't replicate the destination reg into the inputs of the 236// result. 237// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces 238// X86CodeEmitter. 239def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "", 240 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))], 241 IIC_ALU_NONMEM>; 242def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "", 243 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))], 244 IIC_ALU_NONMEM>, 245 OpSize; 246def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "", 247 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))], 248 IIC_ALU_NONMEM>; 249def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "", 250 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))], 251 IIC_ALU_NONMEM>; 252} // isCodeGenOnly 253 254 255def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 256 (SETB_C16r)>; 257def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 258 (SETB_C32r)>; 259def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 260 (SETB_C64r)>; 261 262def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 263 (SETB_C16r)>; 264def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 265 (SETB_C32r)>; 266def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 267 (SETB_C64r)>; 268 269// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and 270// will be eliminated and that the sbb can be extended up to a wider type. When 271// this happens, it is great. However, if we are left with an 8-bit sbb and an 272// and, we might as well just match it as a setb. 273def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), 274 (SETBr)>; 275 276// (add OP, SETB) -> (adc OP, 0) 277def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op), 278 (ADC8ri GR8:$op, 0)>; 279def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op), 280 (ADC32ri8 GR32:$op, 0)>; 281def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op), 282 (ADC64ri8 GR64:$op, 0)>; 283 284// (sub OP, SETB) -> (sbb OP, 0) 285def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)), 286 (SBB8ri GR8:$op, 0)>; 287def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)), 288 (SBB32ri8 GR32:$op, 0)>; 289def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)), 290 (SBB64ri8 GR64:$op, 0)>; 291 292// (sub OP, SETCC_CARRY) -> (adc OP, 0) 293def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))), 294 (ADC8ri GR8:$op, 0)>; 295def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))), 296 (ADC32ri8 GR32:$op, 0)>; 297def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))), 298 (ADC64ri8 GR64:$op, 0)>; 299 300//===----------------------------------------------------------------------===// 301// String Pseudo Instructions 302// 303let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { 304def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}", 305 [(X86rep_movs i8)], IIC_REP_MOVS>, REP, 306 Requires<[In32BitMode]>; 307def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}", 308 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize, 309 Requires<[In32BitMode]>; 310def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}", 311 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, 312 Requires<[In32BitMode]>; 313} 314 315let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in { 316def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}", 317 [(X86rep_movs i8)], IIC_REP_MOVS>, REP, 318 Requires<[In64BitMode]>; 319def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}", 320 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize, 321 Requires<[In64BitMode]>; 322def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}", 323 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, 324 Requires<[In64BitMode]>; 325def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}", 326 [(X86rep_movs i64)], IIC_REP_MOVS>, REP, 327 Requires<[In64BitMode]>; 328} 329 330// FIXME: Should use "(X86rep_stos AL)" as the pattern. 331let Defs = [ECX,EDI], isCodeGenOnly = 1 in { 332 let Uses = [AL,ECX,EDI] in 333 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}", 334 [(X86rep_stos i8)], IIC_REP_STOS>, REP, 335 Requires<[In32BitMode]>; 336 let Uses = [AX,ECX,EDI] in 337 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}", 338 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize, 339 Requires<[In32BitMode]>; 340 let Uses = [EAX,ECX,EDI] in 341 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}", 342 [(X86rep_stos i32)], IIC_REP_STOS>, REP, 343 Requires<[In32BitMode]>; 344} 345 346let Defs = [RCX,RDI], isCodeGenOnly = 1 in { 347 let Uses = [AL,RCX,RDI] in 348 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}", 349 [(X86rep_stos i8)], IIC_REP_STOS>, REP, 350 Requires<[In64BitMode]>; 351 let Uses = [AX,RCX,RDI] in 352 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}", 353 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize, 354 Requires<[In64BitMode]>; 355 let Uses = [RAX,RCX,RDI] in 356 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}", 357 [(X86rep_stos i32)], IIC_REP_STOS>, REP, 358 Requires<[In64BitMode]>; 359 360 let Uses = [RAX,RCX,RDI] in 361 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}", 362 [(X86rep_stos i64)], IIC_REP_STOS>, REP, 363 Requires<[In64BitMode]>; 364} 365 366//===----------------------------------------------------------------------===// 367// Thread Local Storage Instructions 368// 369 370// ELF TLS Support 371// All calls clobber the non-callee saved registers. ESP is marked as 372// a use to prevent stack-pointer assignments that appear immediately 373// before calls from potentially appearing dead. 374let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, 375 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 376 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 377 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], 378 Uses = [ESP] in 379def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 380 "# TLS_addr32", 381 [(X86tlsaddr tls32addr:$sym)]>, 382 Requires<[In32BitMode]>; 383 384// All calls clobber the non-callee saved registers. RSP is marked as 385// a use to prevent stack-pointer assignments that appear immediately 386// before calls from potentially appearing dead. 387let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, 388 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1, 389 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 390 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 391 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], 392 Uses = [RSP] in 393def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 394 "# TLS_addr64", 395 [(X86tlsaddr tls64addr:$sym)]>, 396 Requires<[In64BitMode]>; 397 398// Darwin TLS Support 399// For i386, the address of the thunk is passed on the stack, on return the 400// address of the variable is in %eax. %ecx is trashed during the function 401// call. All other registers are preserved. 402let Defs = [EAX, ECX, EFLAGS], 403 Uses = [ESP], 404 usesCustomInserter = 1 in 405def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 406 "# TLSCall_32", 407 [(X86TLSCall addr:$sym)]>, 408 Requires<[In32BitMode]>; 409 410// For x86_64, the address of the thunk is passed in %rdi, on return 411// the address of the variable is in %rax. All other registers are preserved. 412let Defs = [RAX, EFLAGS], 413 Uses = [RSP, RDI], 414 usesCustomInserter = 1 in 415def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 416 "# TLSCall_64", 417 [(X86TLSCall addr:$sym)]>, 418 Requires<[In64BitMode]>; 419 420 421//===----------------------------------------------------------------------===// 422// Conditional Move Pseudo Instructions 423 424// X86 doesn't have 8-bit conditional moves. Use a customInserter to 425// emit control flow. An alternative to this is to mark i8 SELECT as Promote, 426// however that requires promoting the operands, and can induce additional 427// i8 register pressure. 428let usesCustomInserter = 1, Uses = [EFLAGS] in { 429def CMOV_GR8 : I<0, Pseudo, 430 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond), 431 "#CMOV_GR8 PSEUDO!", 432 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2, 433 imm:$cond, EFLAGS))]>; 434 435let Predicates = [NoCMov] in { 436def CMOV_GR32 : I<0, Pseudo, 437 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond), 438 "#CMOV_GR32* PSEUDO!", 439 [(set GR32:$dst, 440 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>; 441def CMOV_GR16 : I<0, Pseudo, 442 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond), 443 "#CMOV_GR16* PSEUDO!", 444 [(set GR16:$dst, 445 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>; 446def CMOV_RFP32 : I<0, Pseudo, 447 (outs RFP32:$dst), 448 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond), 449 "#CMOV_RFP32 PSEUDO!", 450 [(set RFP32:$dst, 451 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond, 452 EFLAGS))]>; 453def CMOV_RFP64 : I<0, Pseudo, 454 (outs RFP64:$dst), 455 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond), 456 "#CMOV_RFP64 PSEUDO!", 457 [(set RFP64:$dst, 458 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond, 459 EFLAGS))]>; 460def CMOV_RFP80 : I<0, Pseudo, 461 (outs RFP80:$dst), 462 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond), 463 "#CMOV_RFP80 PSEUDO!", 464 [(set RFP80:$dst, 465 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond, 466 EFLAGS))]>; 467} // Predicates = [NoCMov] 468} // UsesCustomInserter = 1, Uses = [EFLAGS] 469 470 471//===----------------------------------------------------------------------===// 472// Atomic Instruction Pseudo Instructions 473//===----------------------------------------------------------------------===// 474 475// Atomic exchange, and, or, xor 476let Constraints = "$val = $dst", Defs = [EFLAGS], 477 usesCustomInserter = 1 in { 478 479def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), 480 "#ATOMAND8 PSEUDO!", 481 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>; 482def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), 483 "#ATOMOR8 PSEUDO!", 484 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>; 485def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), 486 "#ATOMXOR8 PSEUDO!", 487 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>; 488def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), 489 "#ATOMNAND8 PSEUDO!", 490 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>; 491 492def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 493 "#ATOMAND16 PSEUDO!", 494 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>; 495def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 496 "#ATOMOR16 PSEUDO!", 497 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>; 498def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 499 "#ATOMXOR16 PSEUDO!", 500 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>; 501def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 502 "#ATOMNAND16 PSEUDO!", 503 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>; 504def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), 505 "#ATOMMIN16 PSEUDO!", 506 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>; 507def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 508 "#ATOMMAX16 PSEUDO!", 509 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>; 510def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 511 "#ATOMUMIN16 PSEUDO!", 512 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>; 513def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), 514 "#ATOMUMAX16 PSEUDO!", 515 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>; 516 517 518def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 519 "#ATOMAND32 PSEUDO!", 520 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>; 521def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 522 "#ATOMOR32 PSEUDO!", 523 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>; 524def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 525 "#ATOMXOR32 PSEUDO!", 526 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>; 527def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 528 "#ATOMNAND32 PSEUDO!", 529 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>; 530def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), 531 "#ATOMMIN32 PSEUDO!", 532 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>; 533def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 534 "#ATOMMAX32 PSEUDO!", 535 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>; 536def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 537 "#ATOMUMIN32 PSEUDO!", 538 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>; 539def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), 540 "#ATOMUMAX32 PSEUDO!", 541 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>; 542 543 544 545def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 546 "#ATOMAND64 PSEUDO!", 547 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>; 548def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 549 "#ATOMOR64 PSEUDO!", 550 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>; 551def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 552 "#ATOMXOR64 PSEUDO!", 553 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>; 554def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 555 "#ATOMNAND64 PSEUDO!", 556 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>; 557def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), 558 "#ATOMMIN64 PSEUDO!", 559 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>; 560def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 561 "#ATOMMAX64 PSEUDO!", 562 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>; 563def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 564 "#ATOMUMIN64 PSEUDO!", 565 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>; 566def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), 567 "#ATOMUMAX64 PSEUDO!", 568 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>; 569} 570 571let Constraints = "$val1 = $dst1, $val2 = $dst2", 572 Defs = [EFLAGS, EAX, EBX, ECX, EDX], 573 Uses = [EAX, EBX, ECX, EDX], 574 mayLoad = 1, mayStore = 1, 575 usesCustomInserter = 1 in { 576def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 577 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 578 "#ATOMAND6432 PSEUDO!", []>; 579def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 580 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 581 "#ATOMOR6432 PSEUDO!", []>; 582def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 583 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 584 "#ATOMXOR6432 PSEUDO!", []>; 585def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 586 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 587 "#ATOMNAND6432 PSEUDO!", []>; 588def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 589 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 590 "#ATOMADD6432 PSEUDO!", []>; 591def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 592 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 593 "#ATOMSUB6432 PSEUDO!", []>; 594def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), 595 (ins i64mem:$ptr, GR32:$val1, GR32:$val2), 596 "#ATOMSWAP6432 PSEUDO!", []>; 597} 598 599//===----------------------------------------------------------------------===// 600// Normal-Instructions-With-Lock-Prefix Pseudo Instructions 601//===----------------------------------------------------------------------===// 602 603// FIXME: Use normal instructions and add lock prefix dynamically. 604 605// Memory barriers 606 607// TODO: Get this to fold the constant into the instruction. 608let isCodeGenOnly = 1, Defs = [EFLAGS] in 609def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero), 610 "lock\n\t" 611 "or{l}\t{$zero, $dst|$dst, $zero}", 612 [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK; 613 614let hasSideEffects = 1 in 615def Int_MemBarrier : I<0, Pseudo, (outs), (ins), 616 "#MEMBARRIER", 617 [(X86MemBarrier)]>; 618 619// RegOpc corresponds to the mr version of the instruction 620// ImmOpc corresponds to the mi version of the instruction 621// ImmOpc8 corresponds to the mi8 version of the instruction 622// ImmMod corresponds to the instruction format of the mi and mi8 versions 623multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, 624 Format ImmMod, string mnemonic> { 625let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { 626 627def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 628 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, 629 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), 630 !strconcat("lock\n\t", mnemonic, "{b}\t", 631 "{$src2, $dst|$dst, $src2}"), 632 [], IIC_ALU_NONMEM>, LOCK; 633def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 634 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 635 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), 636 !strconcat("lock\n\t", mnemonic, "{w}\t", 637 "{$src2, $dst|$dst, $src2}"), 638 [], IIC_ALU_NONMEM>, OpSize, LOCK; 639def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 640 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 641 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), 642 !strconcat("lock\n\t", mnemonic, "{l}\t", 643 "{$src2, $dst|$dst, $src2}"), 644 [], IIC_ALU_NONMEM>, LOCK; 645def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 646 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 647 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), 648 !strconcat("lock\n\t", mnemonic, "{q}\t", 649 "{$src2, $dst|$dst, $src2}"), 650 [], IIC_ALU_NONMEM>, LOCK; 651 652def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 653 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, 654 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), 655 !strconcat("lock\n\t", mnemonic, "{b}\t", 656 "{$src2, $dst|$dst, $src2}"), 657 [], IIC_ALU_MEM>, LOCK; 658 659def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 660 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 661 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), 662 !strconcat("lock\n\t", mnemonic, "{w}\t", 663 "{$src2, $dst|$dst, $src2}"), 664 [], IIC_ALU_MEM>, LOCK; 665 666def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 667 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 668 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), 669 !strconcat("lock\n\t", mnemonic, "{l}\t", 670 "{$src2, $dst|$dst, $src2}"), 671 [], IIC_ALU_MEM>, LOCK; 672 673def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 674 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 675 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), 676 !strconcat("lock\n\t", mnemonic, "{q}\t", 677 "{$src2, $dst|$dst, $src2}"), 678 [], IIC_ALU_MEM>, LOCK; 679 680def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 681 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 682 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), 683 !strconcat("lock\n\t", mnemonic, "{w}\t", 684 "{$src2, $dst|$dst, $src2}"), 685 [], IIC_ALU_MEM>, LOCK; 686def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 687 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 688 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), 689 !strconcat("lock\n\t", mnemonic, "{l}\t", 690 "{$src2, $dst|$dst, $src2}"), 691 [], IIC_ALU_MEM>, LOCK; 692def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 693 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 694 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), 695 !strconcat("lock\n\t", mnemonic, "{q}\t", 696 "{$src2, $dst|$dst, $src2}"), 697 [], IIC_ALU_MEM>, LOCK; 698 699} 700 701} 702 703defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">; 704defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">; 705defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">; 706defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">; 707defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">; 708 709// Optimized codegen when the non-memory output is not used. 710let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { 711 712def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), 713 "lock\n\t" 714 "inc{b}\t$dst", [], IIC_UNARY_MEM>, LOCK; 715def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), 716 "lock\n\t" 717 "inc{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK; 718def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), 719 "lock\n\t" 720 "inc{l}\t$dst", [], IIC_UNARY_MEM>, LOCK; 721def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), 722 "lock\n\t" 723 "inc{q}\t$dst", [], IIC_UNARY_MEM>, LOCK; 724 725def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), 726 "lock\n\t" 727 "dec{b}\t$dst", [], IIC_UNARY_MEM>, LOCK; 728def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), 729 "lock\n\t" 730 "dec{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK; 731def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), 732 "lock\n\t" 733 "dec{l}\t$dst", [], IIC_UNARY_MEM>, LOCK; 734def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), 735 "lock\n\t" 736 "dec{q}\t$dst", [], IIC_UNARY_MEM>, LOCK; 737} 738 739// Atomic compare and swap. 740let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], 741 isCodeGenOnly = 1 in 742def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr), 743 "lock\n\t" 744 "cmpxchg8b\t$ptr", 745 [(X86cas8 addr:$ptr)], IIC_CMPX_LOCK_8B>, TB, LOCK; 746 747let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], 748 isCodeGenOnly = 1 in 749def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr), 750 "lock\n\t" 751 "cmpxchg16b\t$ptr", 752 [(X86cas16 addr:$ptr)], IIC_CMPX_LOCK_16B>, TB, LOCK, 753 Requires<[HasCmpxchg16b]>; 754 755let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in { 756def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), 757 "lock\n\t" 758 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}", 759 [(X86cas addr:$ptr, GR8:$swap, 1)], IIC_CMPX_LOCK_8>, TB, LOCK; 760} 761 762let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in { 763def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap), 764 "lock\n\t" 765 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}", 766 [(X86cas addr:$ptr, GR16:$swap, 2)], IIC_CMPX_LOCK>, TB, OpSize, LOCK; 767} 768 769let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in { 770def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap), 771 "lock\n\t" 772 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}", 773 [(X86cas addr:$ptr, GR32:$swap, 4)], IIC_CMPX_LOCK>, TB, LOCK; 774} 775 776let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in { 777def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), 778 "lock\n\t" 779 "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}", 780 [(X86cas addr:$ptr, GR64:$swap, 8)], IIC_CMPX_LOCK>, TB, LOCK; 781} 782 783// Atomic exchange and add 784let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in { 785def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), 786 "lock\n\t" 787 "xadd{b}\t{$val, $ptr|$ptr, $val}", 788 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))], 789 IIC_XADD_LOCK_MEM8>, 790 TB, LOCK; 791def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr), 792 "lock\n\t" 793 "xadd{w}\t{$val, $ptr|$ptr, $val}", 794 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))], 795 IIC_XADD_LOCK_MEM>, 796 TB, OpSize, LOCK; 797def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr), 798 "lock\n\t" 799 "xadd{l}\t{$val, $ptr|$ptr, $val}", 800 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))], 801 IIC_XADD_LOCK_MEM>, 802 TB, LOCK; 803def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr), 804 "lock\n\t" 805 "xadd{q}\t{$val, $ptr|$ptr, $val}", 806 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))], 807 IIC_XADD_LOCK_MEM>, 808 TB, LOCK; 809} 810 811def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src), 812 "#ACQUIRE_MOV PSEUDO!", 813 [(set GR8:$dst, (atomic_load_8 addr:$src))]>; 814def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src), 815 "#ACQUIRE_MOV PSEUDO!", 816 [(set GR16:$dst, (atomic_load_16 addr:$src))]>; 817def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src), 818 "#ACQUIRE_MOV PSEUDO!", 819 [(set GR32:$dst, (atomic_load_32 addr:$src))]>; 820def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src), 821 "#ACQUIRE_MOV PSEUDO!", 822 [(set GR64:$dst, (atomic_load_64 addr:$src))]>; 823 824def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src), 825 "#RELEASE_MOV PSEUDO!", 826 [(atomic_store_8 addr:$dst, GR8 :$src)]>; 827def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src), 828 "#RELEASE_MOV PSEUDO!", 829 [(atomic_store_16 addr:$dst, GR16:$src)]>; 830def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src), 831 "#RELEASE_MOV PSEUDO!", 832 [(atomic_store_32 addr:$dst, GR32:$src)]>; 833def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src), 834 "#RELEASE_MOV PSEUDO!", 835 [(atomic_store_64 addr:$dst, GR64:$src)]>; 836 837//===----------------------------------------------------------------------===// 838// Conditional Move Pseudo Instructions. 839//===----------------------------------------------------------------------===// 840 841 842// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after 843// instruction selection into a branch sequence. 844let Uses = [EFLAGS], usesCustomInserter = 1 in { 845 def CMOV_FR32 : I<0, Pseudo, 846 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), 847 "#CMOV_FR32 PSEUDO!", 848 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond, 849 EFLAGS))]>; 850 def CMOV_FR64 : I<0, Pseudo, 851 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond), 852 "#CMOV_FR64 PSEUDO!", 853 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond, 854 EFLAGS))]>; 855 def CMOV_V4F32 : I<0, Pseudo, 856 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), 857 "#CMOV_V4F32 PSEUDO!", 858 [(set VR128:$dst, 859 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, 860 EFLAGS)))]>; 861 def CMOV_V2F64 : I<0, Pseudo, 862 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), 863 "#CMOV_V2F64 PSEUDO!", 864 [(set VR128:$dst, 865 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, 866 EFLAGS)))]>; 867 def CMOV_V2I64 : I<0, Pseudo, 868 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), 869 "#CMOV_V2I64 PSEUDO!", 870 [(set VR128:$dst, 871 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond, 872 EFLAGS)))]>; 873 def CMOV_V8F32 : I<0, Pseudo, 874 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond), 875 "#CMOV_V8F32 PSEUDO!", 876 [(set VR256:$dst, 877 (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, 878 EFLAGS)))]>; 879 def CMOV_V4F64 : I<0, Pseudo, 880 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond), 881 "#CMOV_V4F64 PSEUDO!", 882 [(set VR256:$dst, 883 (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, 884 EFLAGS)))]>; 885 def CMOV_V4I64 : I<0, Pseudo, 886 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond), 887 "#CMOV_V4I64 PSEUDO!", 888 [(set VR256:$dst, 889 (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond, 890 EFLAGS)))]>; 891} 892 893 894//===----------------------------------------------------------------------===// 895// DAG Pattern Matching Rules 896//===----------------------------------------------------------------------===// 897 898// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable 899def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>; 900def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>; 901def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>; 902def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>; 903def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>; 904def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>; 905 906def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)), 907 (ADD32ri GR32:$src1, tconstpool:$src2)>; 908def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)), 909 (ADD32ri GR32:$src1, tjumptable:$src2)>; 910def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)), 911 (ADD32ri GR32:$src1, tglobaladdr:$src2)>; 912def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)), 913 (ADD32ri GR32:$src1, texternalsym:$src2)>; 914def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)), 915 (ADD32ri GR32:$src1, tblockaddress:$src2)>; 916 917def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst), 918 (MOV32mi addr:$dst, tglobaladdr:$src)>; 919def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst), 920 (MOV32mi addr:$dst, texternalsym:$src)>; 921def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst), 922 (MOV32mi addr:$dst, tblockaddress:$src)>; 923 924 925 926// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small 927// code model mode, should use 'movabs'. FIXME: This is really a hack, the 928// 'movabs' predicate should handle this sort of thing. 929def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 930 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>; 931def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 932 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>; 933def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 934 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>; 935def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 936 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>; 937def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 938 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>; 939 940// In static codegen with small code model, we can get the address of a label 941// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of 942// the MOV64ri64i32 should accept these. 943def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 944 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>; 945def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 946 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>; 947def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 948 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>; 949def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 950 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>; 951def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 952 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>; 953 954// In kernel code model, we can get the address of a label 955// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of 956// the MOV64ri32 should accept these. 957def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 958 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; 959def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 960 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; 961def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 962 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; 963def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 964 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; 965def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 966 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; 967 968// If we have small model and -static mode, it is safe to store global addresses 969// directly as immediates. FIXME: This is really a hack, the 'imm' predicate 970// for MOV64mi32 should handle this sort of thing. 971def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), 972 (MOV64mi32 addr:$dst, tconstpool:$src)>, 973 Requires<[NearData, IsStatic]>; 974def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), 975 (MOV64mi32 addr:$dst, tjumptable:$src)>, 976 Requires<[NearData, IsStatic]>; 977def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), 978 (MOV64mi32 addr:$dst, tglobaladdr:$src)>, 979 Requires<[NearData, IsStatic]>; 980def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), 981 (MOV64mi32 addr:$dst, texternalsym:$src)>, 982 Requires<[NearData, IsStatic]>; 983def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), 984 (MOV64mi32 addr:$dst, tblockaddress:$src)>, 985 Requires<[NearData, IsStatic]>; 986 987 988 989// Calls 990 991// tls has some funny stuff here... 992// This corresponds to movabs $foo@tpoff, %rax 993def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), 994 (MOV64ri tglobaltlsaddr :$dst)>; 995// This corresponds to add $foo@tpoff, %rax 996def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), 997 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; 998// This corresponds to mov foo@tpoff(%rbx), %eax 999def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))), 1000 (MOV64rm tglobaltlsaddr :$dst)>; 1001 1002 1003// Direct PC relative function call for small code model. 32-bit displacement 1004// sign extended to 64-bit. 1005def : Pat<(X86call (i64 tglobaladdr:$dst)), 1006 (CALL64pcrel32 tglobaladdr:$dst)>; 1007def : Pat<(X86call (i64 texternalsym:$dst)), 1008 (CALL64pcrel32 texternalsym:$dst)>; 1009 1010// tailcall stuff 1011def : Pat<(X86tcret GR32_TC:$dst, imm:$off), 1012 (TCRETURNri GR32_TC:$dst, imm:$off)>, 1013 Requires<[In32BitMode]>; 1014 1015// FIXME: This is disabled for 32-bit PIC mode because the global base 1016// register which is part of the address mode may be assigned a 1017// callee-saved register. 1018def : Pat<(X86tcret (load addr:$dst), imm:$off), 1019 (TCRETURNmi addr:$dst, imm:$off)>, 1020 Requires<[In32BitMode, IsNotPIC]>; 1021 1022def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), 1023 (TCRETURNdi texternalsym:$dst, imm:$off)>, 1024 Requires<[In32BitMode]>; 1025 1026def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), 1027 (TCRETURNdi texternalsym:$dst, imm:$off)>, 1028 Requires<[In32BitMode]>; 1029 1030def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1031 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>, 1032 Requires<[In64BitMode]>; 1033 1034def : Pat<(X86tcret (load addr:$dst), imm:$off), 1035 (TCRETURNmi64 addr:$dst, imm:$off)>, 1036 Requires<[In64BitMode]>; 1037 1038def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), 1039 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, 1040 Requires<[In64BitMode]>; 1041 1042def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), 1043 (TCRETURNdi64 texternalsym:$dst, imm:$off)>, 1044 Requires<[In64BitMode]>; 1045 1046// Normal calls, with various flavors of addresses. 1047def : Pat<(X86call (i32 tglobaladdr:$dst)), 1048 (CALLpcrel32 tglobaladdr:$dst)>; 1049def : Pat<(X86call (i32 texternalsym:$dst)), 1050 (CALLpcrel32 texternalsym:$dst)>; 1051def : Pat<(X86call (i32 imm:$dst)), 1052 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; 1053 1054// Comparisons. 1055 1056// TEST R,R is smaller than CMP R,0 1057def : Pat<(X86cmp GR8:$src1, 0), 1058 (TEST8rr GR8:$src1, GR8:$src1)>; 1059def : Pat<(X86cmp GR16:$src1, 0), 1060 (TEST16rr GR16:$src1, GR16:$src1)>; 1061def : Pat<(X86cmp GR32:$src1, 0), 1062 (TEST32rr GR32:$src1, GR32:$src1)>; 1063def : Pat<(X86cmp GR64:$src1, 0), 1064 (TEST64rr GR64:$src1, GR64:$src1)>; 1065 1066// Conditional moves with folded loads with operands swapped and conditions 1067// inverted. 1068multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32, 1069 Instruction Inst64> { 1070 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS), 1071 (Inst16 GR16:$src2, addr:$src1)>; 1072 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS), 1073 (Inst32 GR32:$src2, addr:$src1)>; 1074 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS), 1075 (Inst64 GR64:$src2, addr:$src1)>; 1076} 1077 1078defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>; 1079defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>; 1080defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>; 1081defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>; 1082defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>; 1083defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>; 1084defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>; 1085defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>; 1086defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>; 1087defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>; 1088defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>; 1089defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>; 1090defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>; 1091defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>; 1092defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>; 1093defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>; 1094 1095// zextload bool -> zextload byte 1096def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1097def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; 1098def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1099def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; 1100 1101// extload bool -> extload byte 1102// When extloading from 16-bit and smaller memory locations into 64-bit 1103// registers, use zero-extending loads so that the entire 64-bit register is 1104// defined, avoiding partial-register updates. 1105 1106def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1107def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; 1108def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1109def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>; 1110def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; 1111def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; 1112 1113def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; 1114def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; 1115def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; 1116// For other extloads, use subregs, since the high contents of the register are 1117// defined after an extload. 1118def : Pat<(extloadi64i32 addr:$src), 1119 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), 1120 sub_32bit)>; 1121 1122// anyext. Define these to do an explicit zero-extend to 1123// avoid partial-register updates. 1124def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG 1125 (MOVZX32rr8 GR8 :$src), sub_16bit)>; 1126def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; 1127 1128// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. 1129def : Pat<(i32 (anyext GR16:$src)), 1130 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; 1131 1132def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>; 1133def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>; 1134def : Pat<(i64 (anyext GR32:$src)), 1135 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1136 1137 1138// Any instruction that defines a 32-bit result leaves the high half of the 1139// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 1140// be copying from a truncate. And x86's cmov doesn't do anything if the 1141// condition is false. But any other 32-bit operation will zero-extend 1142// up to 64 bits. 1143def def32 : PatLeaf<(i32 GR32:$src), [{ 1144 return N->getOpcode() != ISD::TRUNCATE && 1145 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 1146 N->getOpcode() != ISD::CopyFromReg && 1147 N->getOpcode() != X86ISD::CMOV; 1148}]>; 1149 1150// In the case of a 32-bit def that is known to implicitly zero-extend, 1151// we can use a SUBREG_TO_REG. 1152def : Pat<(i64 (zext def32:$src)), 1153 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1154 1155//===----------------------------------------------------------------------===// 1156// Pattern match OR as ADD 1157//===----------------------------------------------------------------------===// 1158 1159// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be 1160// 3-addressified into an LEA instruction to avoid copies. However, we also 1161// want to finally emit these instructions as an or at the end of the code 1162// generator to make the generated code easier to read. To do this, we select 1163// into "disjoint bits" pseudo ops. 1164 1165// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 1166def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1167 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1168 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 1169 1170 APInt KnownZero0, KnownOne0; 1171 CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0); 1172 APInt KnownZero1, KnownOne1; 1173 CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0); 1174 return (~KnownZero0 & ~KnownZero1) == 0; 1175}]>; 1176 1177 1178// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. 1179let AddedComplexity = 5 in { // Try this before the selecting to OR 1180 1181let isConvertibleToThreeAddress = 1, 1182 Constraints = "$src1 = $dst", Defs = [EFLAGS] in { 1183let isCommutable = 1 in { 1184def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), 1185 "", // orw/addw REG, REG 1186 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; 1187def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 1188 "", // orl/addl REG, REG 1189 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; 1190def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 1191 "", // orq/addq REG, REG 1192 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; 1193} // isCommutable 1194 1195// NOTE: These are order specific, we want the ri8 forms to be listed 1196// first so that they are slightly preferred to the ri forms. 1197 1198def ADD16ri8_DB : I<0, Pseudo, 1199 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), 1200 "", // orw/addw REG, imm8 1201 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; 1202def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), 1203 "", // orw/addw REG, imm 1204 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; 1205 1206def ADD32ri8_DB : I<0, Pseudo, 1207 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), 1208 "", // orl/addl REG, imm8 1209 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; 1210def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), 1211 "", // orl/addl REG, imm 1212 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; 1213 1214 1215def ADD64ri8_DB : I<0, Pseudo, 1216 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), 1217 "", // orq/addq REG, imm8 1218 [(set GR64:$dst, (or_is_add GR64:$src1, 1219 i64immSExt8:$src2))]>; 1220def ADD64ri32_DB : I<0, Pseudo, 1221 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), 1222 "", // orq/addq REG, imm 1223 [(set GR64:$dst, (or_is_add GR64:$src1, 1224 i64immSExt32:$src2))]>; 1225} 1226} // AddedComplexity 1227 1228 1229//===----------------------------------------------------------------------===// 1230// Some peepholes 1231//===----------------------------------------------------------------------===// 1232 1233// Odd encoding trick: -128 fits into an 8-bit immediate field while 1234// +128 doesn't, so in this special case use a sub instead of an add. 1235def : Pat<(add GR16:$src1, 128), 1236 (SUB16ri8 GR16:$src1, -128)>; 1237def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), 1238 (SUB16mi8 addr:$dst, -128)>; 1239 1240def : Pat<(add GR32:$src1, 128), 1241 (SUB32ri8 GR32:$src1, -128)>; 1242def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), 1243 (SUB32mi8 addr:$dst, -128)>; 1244 1245def : Pat<(add GR64:$src1, 128), 1246 (SUB64ri8 GR64:$src1, -128)>; 1247def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), 1248 (SUB64mi8 addr:$dst, -128)>; 1249 1250// The same trick applies for 32-bit immediate fields in 64-bit 1251// instructions. 1252def : Pat<(add GR64:$src1, 0x0000000080000000), 1253 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1254def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst), 1255 (SUB64mi32 addr:$dst, 0xffffffff80000000)>; 1256 1257// To avoid needing to materialize an immediate in a register, use a 32-bit and 1258// with implicit zero-extension instead of a 64-bit and if the immediate has at 1259// least 32 bits of leading zeros. If in addition the last 32 bits can be 1260// represented with a sign extension of a 8 bit constant, use that. 1261 1262def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), 1263 (SUBREG_TO_REG 1264 (i64 0), 1265 (AND32ri8 1266 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1267 (i32 (GetLo8XForm imm:$imm))), 1268 sub_32bit)>; 1269 1270def : Pat<(and GR64:$src, i64immZExt32:$imm), 1271 (SUBREG_TO_REG 1272 (i64 0), 1273 (AND32ri 1274 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1275 (i32 (GetLo32XForm imm:$imm))), 1276 sub_32bit)>; 1277 1278 1279// r & (2^16-1) ==> movz 1280def : Pat<(and GR32:$src1, 0xffff), 1281 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; 1282// r & (2^8-1) ==> movz 1283def : Pat<(and GR32:$src1, 0xff), 1284 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1, 1285 GR32_ABCD)), 1286 sub_8bit))>, 1287 Requires<[In32BitMode]>; 1288// r & (2^8-1) ==> movz 1289def : Pat<(and GR16:$src1, 0xff), 1290 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG 1291 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)), 1292 sub_16bit)>, 1293 Requires<[In32BitMode]>; 1294 1295// r & (2^32-1) ==> movz 1296def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), 1297 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1298// r & (2^16-1) ==> movz 1299def : Pat<(and GR64:$src, 0xffff), 1300 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>; 1301// r & (2^8-1) ==> movz 1302def : Pat<(and GR64:$src, 0xff), 1303 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>; 1304// r & (2^8-1) ==> movz 1305def : Pat<(and GR32:$src1, 0xff), 1306 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>, 1307 Requires<[In64BitMode]>; 1308// r & (2^8-1) ==> movz 1309def : Pat<(and GR16:$src1, 0xff), 1310 (EXTRACT_SUBREG (MOVZX32rr8 (i8 1311 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>, 1312 Requires<[In64BitMode]>; 1313 1314 1315// sext_inreg patterns 1316def : Pat<(sext_inreg GR32:$src, i16), 1317 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; 1318def : Pat<(sext_inreg GR32:$src, i8), 1319 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, 1320 GR32_ABCD)), 1321 sub_8bit))>, 1322 Requires<[In32BitMode]>; 1323 1324def : Pat<(sext_inreg GR16:$src, i8), 1325 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG 1326 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))), 1327 sub_16bit)>, 1328 Requires<[In32BitMode]>; 1329 1330def : Pat<(sext_inreg GR64:$src, i32), 1331 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1332def : Pat<(sext_inreg GR64:$src, i16), 1333 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; 1334def : Pat<(sext_inreg GR64:$src, i8), 1335 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; 1336def : Pat<(sext_inreg GR32:$src, i8), 1337 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>, 1338 Requires<[In64BitMode]>; 1339def : Pat<(sext_inreg GR16:$src, i8), 1340 (EXTRACT_SUBREG (MOVSX32rr8 1341 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>, 1342 Requires<[In64BitMode]>; 1343 1344// sext, sext_load, zext, zext_load 1345def: Pat<(i16 (sext GR8:$src)), 1346 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>; 1347def: Pat<(sextloadi16i8 addr:$src), 1348 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>; 1349def: Pat<(i16 (zext GR8:$src)), 1350 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>; 1351def: Pat<(zextloadi16i8 addr:$src), 1352 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1353 1354// trunc patterns 1355def : Pat<(i16 (trunc GR32:$src)), 1356 (EXTRACT_SUBREG GR32:$src, sub_16bit)>; 1357def : Pat<(i8 (trunc GR32:$src)), 1358 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1359 sub_8bit)>, 1360 Requires<[In32BitMode]>; 1361def : Pat<(i8 (trunc GR16:$src)), 1362 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1363 sub_8bit)>, 1364 Requires<[In32BitMode]>; 1365def : Pat<(i32 (trunc GR64:$src)), 1366 (EXTRACT_SUBREG GR64:$src, sub_32bit)>; 1367def : Pat<(i16 (trunc GR64:$src)), 1368 (EXTRACT_SUBREG GR64:$src, sub_16bit)>; 1369def : Pat<(i8 (trunc GR64:$src)), 1370 (EXTRACT_SUBREG GR64:$src, sub_8bit)>; 1371def : Pat<(i8 (trunc GR32:$src)), 1372 (EXTRACT_SUBREG GR32:$src, sub_8bit)>, 1373 Requires<[In64BitMode]>; 1374def : Pat<(i8 (trunc GR16:$src)), 1375 (EXTRACT_SUBREG GR16:$src, sub_8bit)>, 1376 Requires<[In64BitMode]>; 1377 1378// h-register tricks 1379def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), 1380 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1381 sub_8bit_hi)>, 1382 Requires<[In32BitMode]>; 1383def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), 1384 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1385 sub_8bit_hi)>, 1386 Requires<[In32BitMode]>; 1387def : Pat<(srl GR16:$src, (i8 8)), 1388 (EXTRACT_SUBREG 1389 (MOVZX32rr8 1390 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1391 sub_8bit_hi)), 1392 sub_16bit)>, 1393 Requires<[In32BitMode]>; 1394def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1395 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, 1396 GR16_ABCD)), 1397 sub_8bit_hi))>, 1398 Requires<[In32BitMode]>; 1399def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1400 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, 1401 GR16_ABCD)), 1402 sub_8bit_hi))>, 1403 Requires<[In32BitMode]>; 1404def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1405 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, 1406 GR32_ABCD)), 1407 sub_8bit_hi))>, 1408 Requires<[In32BitMode]>; 1409def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), 1410 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, 1411 GR32_ABCD)), 1412 sub_8bit_hi))>, 1413 Requires<[In32BitMode]>; 1414 1415// h-register tricks. 1416// For now, be conservative on x86-64 and use an h-register extract only if the 1417// value is immediately zero-extended or stored, which are somewhat common 1418// cases. This uses a bunch of code to prevent a register requiring a REX prefix 1419// from being allocated in the same instruction as the h register, as there's 1420// currently no way to describe this requirement to the register allocator. 1421 1422// h-register extract and zero-extend. 1423def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), 1424 (SUBREG_TO_REG 1425 (i64 0), 1426 (MOVZX32_NOREXrr8 1427 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), 1428 sub_8bit_hi)), 1429 sub_32bit)>; 1430def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1431 (MOVZX32_NOREXrr8 1432 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1433 sub_8bit_hi))>, 1434 Requires<[In64BitMode]>; 1435def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), 1436 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, 1437 GR32_ABCD)), 1438 sub_8bit_hi))>, 1439 Requires<[In64BitMode]>; 1440def : Pat<(srl GR16:$src, (i8 8)), 1441 (EXTRACT_SUBREG 1442 (MOVZX32_NOREXrr8 1443 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1444 sub_8bit_hi)), 1445 sub_16bit)>, 1446 Requires<[In64BitMode]>; 1447def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1448 (MOVZX32_NOREXrr8 1449 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1450 sub_8bit_hi))>, 1451 Requires<[In64BitMode]>; 1452def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1453 (MOVZX32_NOREXrr8 1454 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1455 sub_8bit_hi))>, 1456 Requires<[In64BitMode]>; 1457def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), 1458 (SUBREG_TO_REG 1459 (i64 0), 1460 (MOVZX32_NOREXrr8 1461 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1462 sub_8bit_hi)), 1463 sub_32bit)>; 1464def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), 1465 (SUBREG_TO_REG 1466 (i64 0), 1467 (MOVZX32_NOREXrr8 1468 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1469 sub_8bit_hi)), 1470 sub_32bit)>; 1471 1472// h-register extract and store. 1473def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), 1474 (MOV8mr_NOREX 1475 addr:$dst, 1476 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), 1477 sub_8bit_hi))>; 1478def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), 1479 (MOV8mr_NOREX 1480 addr:$dst, 1481 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1482 sub_8bit_hi))>, 1483 Requires<[In64BitMode]>; 1484def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), 1485 (MOV8mr_NOREX 1486 addr:$dst, 1487 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1488 sub_8bit_hi))>, 1489 Requires<[In64BitMode]>; 1490 1491 1492// (shl x, 1) ==> (add x, x) 1493// Note that if x is undef (immediate or otherwise), we could theoretically 1494// end up with the two uses of x getting different values, producing a result 1495// where the least significant bit is not 0. However, the probability of this 1496// happening is considered low enough that this is officially not a 1497// "real problem". 1498def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; 1499def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; 1500def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; 1501def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; 1502 1503// Helper imms that check if a mask doesn't change significant shift bits. 1504def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>; 1505def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>; 1506 1507// (shl x (and y, 31)) ==> (shl x, y) 1508def : Pat<(shl GR8:$src1, (and CL, immShift32)), 1509 (SHL8rCL GR8:$src1)>; 1510def : Pat<(shl GR16:$src1, (and CL, immShift32)), 1511 (SHL16rCL GR16:$src1)>; 1512def : Pat<(shl GR32:$src1, (and CL, immShift32)), 1513 (SHL32rCL GR32:$src1)>; 1514def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), 1515 (SHL8mCL addr:$dst)>; 1516def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), 1517 (SHL16mCL addr:$dst)>; 1518def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), 1519 (SHL32mCL addr:$dst)>; 1520 1521def : Pat<(srl GR8:$src1, (and CL, immShift32)), 1522 (SHR8rCL GR8:$src1)>; 1523def : Pat<(srl GR16:$src1, (and CL, immShift32)), 1524 (SHR16rCL GR16:$src1)>; 1525def : Pat<(srl GR32:$src1, (and CL, immShift32)), 1526 (SHR32rCL GR32:$src1)>; 1527def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), 1528 (SHR8mCL addr:$dst)>; 1529def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), 1530 (SHR16mCL addr:$dst)>; 1531def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), 1532 (SHR32mCL addr:$dst)>; 1533 1534def : Pat<(sra GR8:$src1, (and CL, immShift32)), 1535 (SAR8rCL GR8:$src1)>; 1536def : Pat<(sra GR16:$src1, (and CL, immShift32)), 1537 (SAR16rCL GR16:$src1)>; 1538def : Pat<(sra GR32:$src1, (and CL, immShift32)), 1539 (SAR32rCL GR32:$src1)>; 1540def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), 1541 (SAR8mCL addr:$dst)>; 1542def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), 1543 (SAR16mCL addr:$dst)>; 1544def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), 1545 (SAR32mCL addr:$dst)>; 1546 1547// (shl x (and y, 63)) ==> (shl x, y) 1548def : Pat<(shl GR64:$src1, (and CL, immShift64)), 1549 (SHL64rCL GR64:$src1)>; 1550def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), 1551 (SHL64mCL addr:$dst)>; 1552 1553def : Pat<(srl GR64:$src1, (and CL, immShift64)), 1554 (SHR64rCL GR64:$src1)>; 1555def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), 1556 (SHR64mCL addr:$dst)>; 1557 1558def : Pat<(sra GR64:$src1, (and CL, immShift64)), 1559 (SAR64rCL GR64:$src1)>; 1560def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst), 1561 (SAR64mCL addr:$dst)>; 1562 1563 1564// (anyext (setcc_carry)) -> (setcc_carry) 1565def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 1566 (SETB_C16r)>; 1567def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 1568 (SETB_C32r)>; 1569def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))), 1570 (SETB_C32r)>; 1571 1572 1573 1574 1575//===----------------------------------------------------------------------===// 1576// EFLAGS-defining Patterns 1577//===----------------------------------------------------------------------===// 1578 1579// add reg, reg 1580def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; 1581def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; 1582def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; 1583 1584// add reg, mem 1585def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), 1586 (ADD8rm GR8:$src1, addr:$src2)>; 1587def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), 1588 (ADD16rm GR16:$src1, addr:$src2)>; 1589def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), 1590 (ADD32rm GR32:$src1, addr:$src2)>; 1591 1592// add reg, imm 1593def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; 1594def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; 1595def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; 1596def : Pat<(add GR16:$src1, i16immSExt8:$src2), 1597 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; 1598def : Pat<(add GR32:$src1, i32immSExt8:$src2), 1599 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; 1600 1601// sub reg, reg 1602def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; 1603def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; 1604def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; 1605 1606// sub reg, mem 1607def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), 1608 (SUB8rm GR8:$src1, addr:$src2)>; 1609def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), 1610 (SUB16rm GR16:$src1, addr:$src2)>; 1611def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), 1612 (SUB32rm GR32:$src1, addr:$src2)>; 1613 1614// sub reg, imm 1615def : Pat<(sub GR8:$src1, imm:$src2), 1616 (SUB8ri GR8:$src1, imm:$src2)>; 1617def : Pat<(sub GR16:$src1, imm:$src2), 1618 (SUB16ri GR16:$src1, imm:$src2)>; 1619def : Pat<(sub GR32:$src1, imm:$src2), 1620 (SUB32ri GR32:$src1, imm:$src2)>; 1621def : Pat<(sub GR16:$src1, i16immSExt8:$src2), 1622 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; 1623def : Pat<(sub GR32:$src1, i32immSExt8:$src2), 1624 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; 1625 1626// mul reg, reg 1627def : Pat<(mul GR16:$src1, GR16:$src2), 1628 (IMUL16rr GR16:$src1, GR16:$src2)>; 1629def : Pat<(mul GR32:$src1, GR32:$src2), 1630 (IMUL32rr GR32:$src1, GR32:$src2)>; 1631 1632// mul reg, mem 1633def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), 1634 (IMUL16rm GR16:$src1, addr:$src2)>; 1635def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), 1636 (IMUL32rm GR32:$src1, addr:$src2)>; 1637 1638// mul reg, imm 1639def : Pat<(mul GR16:$src1, imm:$src2), 1640 (IMUL16rri GR16:$src1, imm:$src2)>; 1641def : Pat<(mul GR32:$src1, imm:$src2), 1642 (IMUL32rri GR32:$src1, imm:$src2)>; 1643def : Pat<(mul GR16:$src1, i16immSExt8:$src2), 1644 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; 1645def : Pat<(mul GR32:$src1, i32immSExt8:$src2), 1646 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; 1647 1648// reg = mul mem, imm 1649def : Pat<(mul (loadi16 addr:$src1), imm:$src2), 1650 (IMUL16rmi addr:$src1, imm:$src2)>; 1651def : Pat<(mul (loadi32 addr:$src1), imm:$src2), 1652 (IMUL32rmi addr:$src1, imm:$src2)>; 1653def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), 1654 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; 1655def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), 1656 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; 1657 1658// Patterns for nodes that do not produce flags, for instructions that do. 1659 1660// addition 1661def : Pat<(add GR64:$src1, GR64:$src2), 1662 (ADD64rr GR64:$src1, GR64:$src2)>; 1663def : Pat<(add GR64:$src1, i64immSExt8:$src2), 1664 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; 1665def : Pat<(add GR64:$src1, i64immSExt32:$src2), 1666 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; 1667def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), 1668 (ADD64rm GR64:$src1, addr:$src2)>; 1669 1670// subtraction 1671def : Pat<(sub GR64:$src1, GR64:$src2), 1672 (SUB64rr GR64:$src1, GR64:$src2)>; 1673def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), 1674 (SUB64rm GR64:$src1, addr:$src2)>; 1675def : Pat<(sub GR64:$src1, i64immSExt8:$src2), 1676 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; 1677def : Pat<(sub GR64:$src1, i64immSExt32:$src2), 1678 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; 1679 1680// Multiply 1681def : Pat<(mul GR64:$src1, GR64:$src2), 1682 (IMUL64rr GR64:$src1, GR64:$src2)>; 1683def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), 1684 (IMUL64rm GR64:$src1, addr:$src2)>; 1685def : Pat<(mul GR64:$src1, i64immSExt8:$src2), 1686 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; 1687def : Pat<(mul GR64:$src1, i64immSExt32:$src2), 1688 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; 1689def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), 1690 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; 1691def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), 1692 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; 1693 1694// Increment reg. 1695def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>; 1696def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>; 1697def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>; 1698def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>; 1699def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>; 1700def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; 1701 1702// Decrement reg. 1703def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>; 1704def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>; 1705def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>; 1706def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>; 1707def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>; 1708def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; 1709 1710// or reg/reg. 1711def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; 1712def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; 1713def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; 1714def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; 1715 1716// or reg/mem 1717def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), 1718 (OR8rm GR8:$src1, addr:$src2)>; 1719def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), 1720 (OR16rm GR16:$src1, addr:$src2)>; 1721def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), 1722 (OR32rm GR32:$src1, addr:$src2)>; 1723def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), 1724 (OR64rm GR64:$src1, addr:$src2)>; 1725 1726// or reg/imm 1727def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; 1728def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; 1729def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; 1730def : Pat<(or GR16:$src1, i16immSExt8:$src2), 1731 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1732def : Pat<(or GR32:$src1, i32immSExt8:$src2), 1733 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1734def : Pat<(or GR64:$src1, i64immSExt8:$src2), 1735 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1736def : Pat<(or GR64:$src1, i64immSExt32:$src2), 1737 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1738 1739// xor reg/reg 1740def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; 1741def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; 1742def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; 1743def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; 1744 1745// xor reg/mem 1746def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), 1747 (XOR8rm GR8:$src1, addr:$src2)>; 1748def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), 1749 (XOR16rm GR16:$src1, addr:$src2)>; 1750def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), 1751 (XOR32rm GR32:$src1, addr:$src2)>; 1752def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), 1753 (XOR64rm GR64:$src1, addr:$src2)>; 1754 1755// xor reg/imm 1756def : Pat<(xor GR8:$src1, imm:$src2), 1757 (XOR8ri GR8:$src1, imm:$src2)>; 1758def : Pat<(xor GR16:$src1, imm:$src2), 1759 (XOR16ri GR16:$src1, imm:$src2)>; 1760def : Pat<(xor GR32:$src1, imm:$src2), 1761 (XOR32ri GR32:$src1, imm:$src2)>; 1762def : Pat<(xor GR16:$src1, i16immSExt8:$src2), 1763 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1764def : Pat<(xor GR32:$src1, i32immSExt8:$src2), 1765 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1766def : Pat<(xor GR64:$src1, i64immSExt8:$src2), 1767 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1768def : Pat<(xor GR64:$src1, i64immSExt32:$src2), 1769 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1770 1771// and reg/reg 1772def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; 1773def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; 1774def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; 1775def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; 1776 1777// and reg/mem 1778def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), 1779 (AND8rm GR8:$src1, addr:$src2)>; 1780def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), 1781 (AND16rm GR16:$src1, addr:$src2)>; 1782def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), 1783 (AND32rm GR32:$src1, addr:$src2)>; 1784def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), 1785 (AND64rm GR64:$src1, addr:$src2)>; 1786 1787// and reg/imm 1788def : Pat<(and GR8:$src1, imm:$src2), 1789 (AND8ri GR8:$src1, imm:$src2)>; 1790def : Pat<(and GR16:$src1, imm:$src2), 1791 (AND16ri GR16:$src1, imm:$src2)>; 1792def : Pat<(and GR32:$src1, imm:$src2), 1793 (AND32ri GR32:$src1, imm:$src2)>; 1794def : Pat<(and GR16:$src1, i16immSExt8:$src2), 1795 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; 1796def : Pat<(and GR32:$src1, i32immSExt8:$src2), 1797 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; 1798def : Pat<(and GR64:$src1, i64immSExt8:$src2), 1799 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; 1800def : Pat<(and GR64:$src1, i64immSExt32:$src2), 1801 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; 1802 1803// Bit scan instruction patterns to match explicit zero-undef behavior. 1804def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>; 1805def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>; 1806def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>; 1807def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>; 1808def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>; 1809def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>; 1810