X86InstrInfo.cpp revision 341825
1321936Shselasky//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 2321936Shselasky// 3321936Shselasky// The LLVM Compiler Infrastructure 4321936Shselasky// 5321936Shselasky// This file is distributed under the University of Illinois Open Source 6321936Shselasky// License. See LICENSE.TXT for details. 7321936Shselasky// 8321936Shselasky//===----------------------------------------------------------------------===// 9321936Shselasky// 10321936Shselasky// This file contains the X86 implementation of the TargetInstrInfo class. 11321936Shselasky// 12321936Shselasky//===----------------------------------------------------------------------===// 13321936Shselasky 14321936Shselasky#include "X86InstrInfo.h" 15321936Shselasky#include "X86.h" 16321936Shselasky#include "X86InstrBuilder.h" 17321936Shselasky#include "X86InstrFoldTables.h" 18321936Shselasky#include "X86MachineFunctionInfo.h" 19321936Shselasky#include "X86Subtarget.h" 20321936Shselasky#include "X86TargetMachine.h" 21321936Shselasky#include "llvm/ADT/STLExtras.h" 22321936Shselasky#include "llvm/ADT/Sequence.h" 23321936Shselasky#include "llvm/CodeGen/LivePhysRegs.h" 24321936Shselasky#include "llvm/CodeGen/LiveVariables.h" 25321936Shselasky#include "llvm/CodeGen/MachineConstantPool.h" 26321936Shselasky#include "llvm/CodeGen/MachineDominators.h" 27321936Shselasky#include "llvm/CodeGen/MachineFrameInfo.h" 28321936Shselasky#include "llvm/CodeGen/MachineInstrBuilder.h" 29321936Shselasky#include "llvm/CodeGen/MachineModuleInfo.h" 30321936Shselasky#include "llvm/CodeGen/MachineRegisterInfo.h" 31321936Shselasky#include "llvm/CodeGen/StackMaps.h" 32321936Shselasky#include "llvm/IR/DerivedTypes.h" 33321936Shselasky#include "llvm/IR/Function.h" 34321936Shselasky#include "llvm/IR/LLVMContext.h" 35321936Shselasky#include "llvm/MC/MCAsmInfo.h" 36321936Shselasky#include "llvm/MC/MCExpr.h" 37321936Shselasky#include "llvm/MC/MCInst.h" 38321936Shselasky#include "llvm/Support/CommandLine.h" 39321936Shselasky#include "llvm/Support/Debug.h" 40321936Shselasky#include "llvm/Support/ErrorHandling.h" 41321936Shselasky#include "llvm/Support/raw_ostream.h" 42321936Shselasky#include "llvm/Target/TargetOptions.h" 43321936Shselasky 44321936Shselaskyusing namespace llvm; 45321936Shselasky 46321936Shselasky#define DEBUG_TYPE "x86-instr-info" 47321936Shselasky 48321936Shselasky#define GET_INSTRINFO_CTOR_DTOR 49321936Shselasky#include "X86GenInstrInfo.inc" 50321936Shselasky 51321936Shselaskystatic cl::opt<bool> 52321936Shselasky NoFusing("disable-spill-fusing", 53321936Shselasky cl::desc("Disable fusing of spill code into instructions"), 54321936Shselasky cl::Hidden); 55321936Shselaskystatic cl::opt<bool> 56321936ShselaskyPrintFailedFusing("print-failed-fuse-candidates", 57321936Shselasky cl::desc("Print instructions that the allocator wants to" 58321936Shselasky " fuse, but the X86 backend currently can't"), 59321936Shselasky cl::Hidden); 60321936Shselaskystatic cl::opt<bool> 61321936ShselaskyReMatPICStubLoad("remat-pic-stub-load", 62321936Shselasky cl::desc("Re-materialize load from stub in PIC mode"), 63321936Shselasky cl::init(false), cl::Hidden); 64321936Shselaskystatic cl::opt<unsigned> 65321936ShselaskyPartialRegUpdateClearance("partial-reg-update-clearance", 66321936Shselasky cl::desc("Clearance between two register writes " 67321936Shselasky "for inserting XOR to avoid partial " 68321936Shselasky "register update"), 69321936Shselasky cl::init(64), cl::Hidden); 70321936Shselaskystatic cl::opt<unsigned> 71321936ShselaskyUndefRegClearance("undef-reg-clearance", 72321936Shselasky cl::desc("How many idle instructions we would like before " 73321936Shselasky "certain undef register reads"), 74321936Shselasky cl::init(128), cl::Hidden); 75321936Shselasky 76321936Shselasky 77321936Shselasky// Pin the vtable to this file. 78321936Shselaskyvoid X86InstrInfo::anchor() {} 79321936Shselasky 80321936ShselaskyX86InstrInfo::X86InstrInfo(X86Subtarget &STI) 81321936Shselasky : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 82321936Shselasky : X86::ADJCALLSTACKDOWN32), 83321936Shselasky (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 84321936Shselasky : X86::ADJCALLSTACKUP32), 85321936Shselasky X86::CATCHRET, 86321936Shselasky (STI.is64Bit() ? X86::RETQ : X86::RETL)), 87321936Shselasky Subtarget(STI), RI(STI.getTargetTriple()) { 88321936Shselasky} 89321936Shselasky 90321936Shselaskybool 91321936ShselaskyX86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 92321936Shselasky unsigned &SrcReg, unsigned &DstReg, 93321936Shselasky unsigned &SubIdx) const { 94321936Shselasky switch (MI.getOpcode()) { 95321936Shselasky default: break; 96321936Shselasky case X86::MOVSX16rr8: 97321936Shselasky case X86::MOVZX16rr8: 98321936Shselasky case X86::MOVSX32rr8: 99321936Shselasky case X86::MOVZX32rr8: 100321936Shselasky case X86::MOVSX64rr8: 101321936Shselasky if (!Subtarget.is64Bit()) 102321936Shselasky // It's not always legal to reference the low 8-bit of the larger 103321936Shselasky // register in 32-bit mode. 104321936Shselasky return false; 105321936Shselasky LLVM_FALLTHROUGH; 106321936Shselasky case X86::MOVSX32rr16: 107321936Shselasky case X86::MOVZX32rr16: 108321936Shselasky case X86::MOVSX64rr16: 109321936Shselasky case X86::MOVSX64rr32: { 110321936Shselasky if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 111321936Shselasky // Be conservative. 112321936Shselasky return false; 113321936Shselasky SrcReg = MI.getOperand(1).getReg(); 114321936Shselasky DstReg = MI.getOperand(0).getReg(); 115321936Shselasky switch (MI.getOpcode()) { 116321936Shselasky default: llvm_unreachable("Unreachable!"); 117321936Shselasky case X86::MOVSX16rr8: 118321936Shselasky case X86::MOVZX16rr8: 119321936Shselasky case X86::MOVSX32rr8: 120321936Shselasky case X86::MOVZX32rr8: 121321936Shselasky case X86::MOVSX64rr8: 122321936Shselasky SubIdx = X86::sub_8bit; 123321936Shselasky break; 124321936Shselasky case X86::MOVSX32rr16: 125321936Shselasky case X86::MOVZX32rr16: 126321936Shselasky case X86::MOVSX64rr16: 127321936Shselasky SubIdx = X86::sub_16bit; 128321936Shselasky break; 129321936Shselasky case X86::MOVSX64rr32: 130321936Shselasky SubIdx = X86::sub_32bit; 131321936Shselasky break; 132321936Shselasky } 133321936Shselasky return true; 134321936Shselasky } 135321936Shselasky } 136321936Shselasky return false; 137321936Shselasky} 138321936Shselasky 139321936Shselaskyint X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { 140321936Shselasky const MachineFunction *MF = MI.getParent()->getParent(); 141321936Shselasky const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 142321936Shselasky 143321936Shselasky if (isFrameInstr(MI)) { 144321936Shselasky unsigned StackAlign = TFI->getStackAlignment(); 145321936Shselasky int SPAdj = alignTo(getFrameSize(MI), StackAlign); 146321936Shselasky SPAdj -= getFrameAdjustment(MI); 147321936Shselasky if (!isFrameSetup(MI)) 148321936Shselasky SPAdj = -SPAdj; 149321936Shselasky return SPAdj; 150321936Shselasky } 151321936Shselasky 152321936Shselasky // To know whether a call adjusts the stack, we need information 153321936Shselasky // that is bound to the following ADJCALLSTACKUP pseudo. 154321936Shselasky // Look for the next ADJCALLSTACKUP that follows the call. 155321936Shselasky if (MI.isCall()) { 156321936Shselasky const MachineBasicBlock *MBB = MI.getParent(); 157321936Shselasky auto I = ++MachineBasicBlock::const_iterator(MI); 158 for (auto E = MBB->end(); I != E; ++I) { 159 if (I->getOpcode() == getCallFrameDestroyOpcode() || 160 I->isCall()) 161 break; 162 } 163 164 // If we could not find a frame destroy opcode, then it has already 165 // been simplified, so we don't care. 166 if (I->getOpcode() != getCallFrameDestroyOpcode()) 167 return 0; 168 169 return -(I->getOperand(1).getImm()); 170 } 171 172 // Currently handle only PUSHes we can reasonably expect to see 173 // in call sequences 174 switch (MI.getOpcode()) { 175 default: 176 return 0; 177 case X86::PUSH32i8: 178 case X86::PUSH32r: 179 case X86::PUSH32rmm: 180 case X86::PUSH32rmr: 181 case X86::PUSHi32: 182 return 4; 183 case X86::PUSH64i8: 184 case X86::PUSH64r: 185 case X86::PUSH64rmm: 186 case X86::PUSH64rmr: 187 case X86::PUSH64i32: 188 return 8; 189 } 190} 191 192/// Return true and the FrameIndex if the specified 193/// operand and follow operands form a reference to the stack frame. 194bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op, 195 int &FrameIndex) const { 196 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() && 197 MI.getOperand(Op + X86::AddrScaleAmt).isImm() && 198 MI.getOperand(Op + X86::AddrIndexReg).isReg() && 199 MI.getOperand(Op + X86::AddrDisp).isImm() && 200 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 && 201 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 && 202 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) { 203 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex(); 204 return true; 205 } 206 return false; 207} 208 209static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { 210 switch (Opcode) { 211 default: 212 return false; 213 case X86::MOV8rm: 214 case X86::KMOVBkm: 215 MemBytes = 1; 216 return true; 217 case X86::MOV16rm: 218 case X86::KMOVWkm: 219 MemBytes = 2; 220 return true; 221 case X86::MOV32rm: 222 case X86::MOVSSrm: 223 case X86::VMOVSSZrm: 224 case X86::VMOVSSrm: 225 case X86::KMOVDkm: 226 MemBytes = 4; 227 return true; 228 case X86::MOV64rm: 229 case X86::LD_Fp64m: 230 case X86::MOVSDrm: 231 case X86::VMOVSDrm: 232 case X86::VMOVSDZrm: 233 case X86::MMX_MOVD64rm: 234 case X86::MMX_MOVQ64rm: 235 case X86::KMOVQkm: 236 MemBytes = 8; 237 return true; 238 case X86::MOVAPSrm: 239 case X86::MOVUPSrm: 240 case X86::MOVAPDrm: 241 case X86::MOVUPDrm: 242 case X86::MOVDQArm: 243 case X86::MOVDQUrm: 244 case X86::VMOVAPSrm: 245 case X86::VMOVUPSrm: 246 case X86::VMOVAPDrm: 247 case X86::VMOVUPDrm: 248 case X86::VMOVDQArm: 249 case X86::VMOVDQUrm: 250 case X86::VMOVAPSZ128rm: 251 case X86::VMOVUPSZ128rm: 252 case X86::VMOVAPSZ128rm_NOVLX: 253 case X86::VMOVUPSZ128rm_NOVLX: 254 case X86::VMOVAPDZ128rm: 255 case X86::VMOVUPDZ128rm: 256 case X86::VMOVDQU8Z128rm: 257 case X86::VMOVDQU16Z128rm: 258 case X86::VMOVDQA32Z128rm: 259 case X86::VMOVDQU32Z128rm: 260 case X86::VMOVDQA64Z128rm: 261 case X86::VMOVDQU64Z128rm: 262 MemBytes = 16; 263 return true; 264 case X86::VMOVAPSYrm: 265 case X86::VMOVUPSYrm: 266 case X86::VMOVAPDYrm: 267 case X86::VMOVUPDYrm: 268 case X86::VMOVDQAYrm: 269 case X86::VMOVDQUYrm: 270 case X86::VMOVAPSZ256rm: 271 case X86::VMOVUPSZ256rm: 272 case X86::VMOVAPSZ256rm_NOVLX: 273 case X86::VMOVUPSZ256rm_NOVLX: 274 case X86::VMOVAPDZ256rm: 275 case X86::VMOVUPDZ256rm: 276 case X86::VMOVDQU8Z256rm: 277 case X86::VMOVDQU16Z256rm: 278 case X86::VMOVDQA32Z256rm: 279 case X86::VMOVDQU32Z256rm: 280 case X86::VMOVDQA64Z256rm: 281 case X86::VMOVDQU64Z256rm: 282 MemBytes = 32; 283 return true; 284 case X86::VMOVAPSZrm: 285 case X86::VMOVUPSZrm: 286 case X86::VMOVAPDZrm: 287 case X86::VMOVUPDZrm: 288 case X86::VMOVDQU8Zrm: 289 case X86::VMOVDQU16Zrm: 290 case X86::VMOVDQA32Zrm: 291 case X86::VMOVDQU32Zrm: 292 case X86::VMOVDQA64Zrm: 293 case X86::VMOVDQU64Zrm: 294 MemBytes = 64; 295 return true; 296 } 297} 298 299static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { 300 switch (Opcode) { 301 default: 302 return false; 303 case X86::MOV8mr: 304 case X86::KMOVBmk: 305 MemBytes = 1; 306 return true; 307 case X86::MOV16mr: 308 case X86::KMOVWmk: 309 MemBytes = 2; 310 return true; 311 case X86::MOV32mr: 312 case X86::MOVSSmr: 313 case X86::VMOVSSmr: 314 case X86::VMOVSSZmr: 315 case X86::KMOVDmk: 316 MemBytes = 4; 317 return true; 318 case X86::MOV64mr: 319 case X86::ST_FpP64m: 320 case X86::MOVSDmr: 321 case X86::VMOVSDmr: 322 case X86::VMOVSDZmr: 323 case X86::MMX_MOVD64mr: 324 case X86::MMX_MOVQ64mr: 325 case X86::MMX_MOVNTQmr: 326 case X86::KMOVQmk: 327 MemBytes = 8; 328 return true; 329 case X86::MOVAPSmr: 330 case X86::MOVUPSmr: 331 case X86::MOVAPDmr: 332 case X86::MOVUPDmr: 333 case X86::MOVDQAmr: 334 case X86::MOVDQUmr: 335 case X86::VMOVAPSmr: 336 case X86::VMOVUPSmr: 337 case X86::VMOVAPDmr: 338 case X86::VMOVUPDmr: 339 case X86::VMOVDQAmr: 340 case X86::VMOVDQUmr: 341 case X86::VMOVUPSZ128mr: 342 case X86::VMOVAPSZ128mr: 343 case X86::VMOVUPSZ128mr_NOVLX: 344 case X86::VMOVAPSZ128mr_NOVLX: 345 case X86::VMOVUPDZ128mr: 346 case X86::VMOVAPDZ128mr: 347 case X86::VMOVDQA32Z128mr: 348 case X86::VMOVDQU32Z128mr: 349 case X86::VMOVDQA64Z128mr: 350 case X86::VMOVDQU64Z128mr: 351 case X86::VMOVDQU8Z128mr: 352 case X86::VMOVDQU16Z128mr: 353 MemBytes = 16; 354 return true; 355 case X86::VMOVUPSYmr: 356 case X86::VMOVAPSYmr: 357 case X86::VMOVUPDYmr: 358 case X86::VMOVAPDYmr: 359 case X86::VMOVDQUYmr: 360 case X86::VMOVDQAYmr: 361 case X86::VMOVUPSZ256mr: 362 case X86::VMOVAPSZ256mr: 363 case X86::VMOVUPSZ256mr_NOVLX: 364 case X86::VMOVAPSZ256mr_NOVLX: 365 case X86::VMOVUPDZ256mr: 366 case X86::VMOVAPDZ256mr: 367 case X86::VMOVDQU8Z256mr: 368 case X86::VMOVDQU16Z256mr: 369 case X86::VMOVDQA32Z256mr: 370 case X86::VMOVDQU32Z256mr: 371 case X86::VMOVDQA64Z256mr: 372 case X86::VMOVDQU64Z256mr: 373 MemBytes = 32; 374 return true; 375 case X86::VMOVUPSZmr: 376 case X86::VMOVAPSZmr: 377 case X86::VMOVUPDZmr: 378 case X86::VMOVAPDZmr: 379 case X86::VMOVDQU8Zmr: 380 case X86::VMOVDQU16Zmr: 381 case X86::VMOVDQA32Zmr: 382 case X86::VMOVDQU32Zmr: 383 case X86::VMOVDQA64Zmr: 384 case X86::VMOVDQU64Zmr: 385 MemBytes = 64; 386 return true; 387 } 388 return false; 389} 390 391unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 392 int &FrameIndex) const { 393 unsigned Dummy; 394 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy); 395} 396 397unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 398 int &FrameIndex, 399 unsigned &MemBytes) const { 400 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes)) 401 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) 402 return MI.getOperand(0).getReg(); 403 return 0; 404} 405 406unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 407 int &FrameIndex) const { 408 unsigned Dummy; 409 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) { 410 unsigned Reg; 411 if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) 412 return Reg; 413 // Check for post-frame index elimination operations 414 const MachineMemOperand *Dummy; 415 return hasLoadFromStackSlot(MI, Dummy, FrameIndex); 416 } 417 return 0; 418} 419 420unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, 421 int &FrameIndex) const { 422 unsigned Dummy; 423 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy); 424} 425 426unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, 427 int &FrameIndex, 428 unsigned &MemBytes) const { 429 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes)) 430 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 && 431 isFrameOperand(MI, 0, FrameIndex)) 432 return MI.getOperand(X86::AddrNumOperands).getReg(); 433 return 0; 434} 435 436unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 437 int &FrameIndex) const { 438 unsigned Dummy; 439 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) { 440 unsigned Reg; 441 if ((Reg = isStoreToStackSlot(MI, FrameIndex))) 442 return Reg; 443 // Check for post-frame index elimination operations 444 const MachineMemOperand *Dummy; 445 return hasStoreToStackSlot(MI, Dummy, FrameIndex); 446 } 447 return 0; 448} 449 450/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. 451static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 452 // Don't waste compile time scanning use-def chains of physregs. 453 if (!TargetRegisterInfo::isVirtualRegister(BaseReg)) 454 return false; 455 bool isPICBase = false; 456 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), 457 E = MRI.def_instr_end(); I != E; ++I) { 458 MachineInstr *DefMI = &*I; 459 if (DefMI->getOpcode() != X86::MOVPC32r) 460 return false; 461 assert(!isPICBase && "More than one PIC base?"); 462 isPICBase = true; 463 } 464 return isPICBase; 465} 466 467bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 468 AliasAnalysis *AA) const { 469 switch (MI.getOpcode()) { 470 default: break; 471 case X86::MOV8rm: 472 case X86::MOV8rm_NOREX: 473 case X86::MOV16rm: 474 case X86::MOV32rm: 475 case X86::MOV64rm: 476 case X86::LD_Fp64m: 477 case X86::MOVSSrm: 478 case X86::MOVSDrm: 479 case X86::MOVAPSrm: 480 case X86::MOVUPSrm: 481 case X86::MOVAPDrm: 482 case X86::MOVUPDrm: 483 case X86::MOVDQArm: 484 case X86::MOVDQUrm: 485 case X86::VMOVSSrm: 486 case X86::VMOVSDrm: 487 case X86::VMOVAPSrm: 488 case X86::VMOVUPSrm: 489 case X86::VMOVAPDrm: 490 case X86::VMOVUPDrm: 491 case X86::VMOVDQArm: 492 case X86::VMOVDQUrm: 493 case X86::VMOVAPSYrm: 494 case X86::VMOVUPSYrm: 495 case X86::VMOVAPDYrm: 496 case X86::VMOVUPDYrm: 497 case X86::VMOVDQAYrm: 498 case X86::VMOVDQUYrm: 499 case X86::MMX_MOVD64rm: 500 case X86::MMX_MOVQ64rm: 501 // AVX-512 502 case X86::VMOVSSZrm: 503 case X86::VMOVSDZrm: 504 case X86::VMOVAPDZ128rm: 505 case X86::VMOVAPDZ256rm: 506 case X86::VMOVAPDZrm: 507 case X86::VMOVAPSZ128rm: 508 case X86::VMOVAPSZ256rm: 509 case X86::VMOVAPSZ128rm_NOVLX: 510 case X86::VMOVAPSZ256rm_NOVLX: 511 case X86::VMOVAPSZrm: 512 case X86::VMOVDQA32Z128rm: 513 case X86::VMOVDQA32Z256rm: 514 case X86::VMOVDQA32Zrm: 515 case X86::VMOVDQA64Z128rm: 516 case X86::VMOVDQA64Z256rm: 517 case X86::VMOVDQA64Zrm: 518 case X86::VMOVDQU16Z128rm: 519 case X86::VMOVDQU16Z256rm: 520 case X86::VMOVDQU16Zrm: 521 case X86::VMOVDQU32Z128rm: 522 case X86::VMOVDQU32Z256rm: 523 case X86::VMOVDQU32Zrm: 524 case X86::VMOVDQU64Z128rm: 525 case X86::VMOVDQU64Z256rm: 526 case X86::VMOVDQU64Zrm: 527 case X86::VMOVDQU8Z128rm: 528 case X86::VMOVDQU8Z256rm: 529 case X86::VMOVDQU8Zrm: 530 case X86::VMOVUPDZ128rm: 531 case X86::VMOVUPDZ256rm: 532 case X86::VMOVUPDZrm: 533 case X86::VMOVUPSZ128rm: 534 case X86::VMOVUPSZ256rm: 535 case X86::VMOVUPSZ128rm_NOVLX: 536 case X86::VMOVUPSZ256rm_NOVLX: 537 case X86::VMOVUPSZrm: { 538 // Loads from constant pools are trivially rematerializable. 539 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() && 540 MI.getOperand(1 + X86::AddrScaleAmt).isImm() && 541 MI.getOperand(1 + X86::AddrIndexReg).isReg() && 542 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && 543 MI.isDereferenceableInvariantLoad(AA)) { 544 unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); 545 if (BaseReg == 0 || BaseReg == X86::RIP) 546 return true; 547 // Allow re-materialization of PIC load. 548 if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal()) 549 return false; 550 const MachineFunction &MF = *MI.getParent()->getParent(); 551 const MachineRegisterInfo &MRI = MF.getRegInfo(); 552 return regIsPICBase(BaseReg, MRI); 553 } 554 return false; 555 } 556 557 case X86::LEA32r: 558 case X86::LEA64r: { 559 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() && 560 MI.getOperand(1 + X86::AddrIndexReg).isReg() && 561 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && 562 !MI.getOperand(1 + X86::AddrDisp).isReg()) { 563 // lea fi#, lea GV, etc. are all rematerializable. 564 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) 565 return true; 566 unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); 567 if (BaseReg == 0) 568 return true; 569 // Allow re-materialization of lea PICBase + x. 570 const MachineFunction &MF = *MI.getParent()->getParent(); 571 const MachineRegisterInfo &MRI = MF.getRegInfo(); 572 return regIsPICBase(BaseReg, MRI); 573 } 574 return false; 575 } 576 } 577 578 // All other instructions marked M_REMATERIALIZABLE are always trivially 579 // rematerializable. 580 return true; 581} 582 583bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 584 MachineBasicBlock::iterator I) const { 585 MachineBasicBlock::iterator E = MBB.end(); 586 587 // For compile time consideration, if we are not able to determine the 588 // safety after visiting 4 instructions in each direction, we will assume 589 // it's not safe. 590 MachineBasicBlock::iterator Iter = I; 591 for (unsigned i = 0; Iter != E && i < 4; ++i) { 592 bool SeenDef = false; 593 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 594 MachineOperand &MO = Iter->getOperand(j); 595 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 596 SeenDef = true; 597 if (!MO.isReg()) 598 continue; 599 if (MO.getReg() == X86::EFLAGS) { 600 if (MO.isUse()) 601 return false; 602 SeenDef = true; 603 } 604 } 605 606 if (SeenDef) 607 // This instruction defines EFLAGS, no need to look any further. 608 return true; 609 ++Iter; 610 // Skip over debug instructions. 611 while (Iter != E && Iter->isDebugInstr()) 612 ++Iter; 613 } 614 615 // It is safe to clobber EFLAGS at the end of a block of no successor has it 616 // live in. 617 if (Iter == E) { 618 for (MachineBasicBlock *S : MBB.successors()) 619 if (S->isLiveIn(X86::EFLAGS)) 620 return false; 621 return true; 622 } 623 624 MachineBasicBlock::iterator B = MBB.begin(); 625 Iter = I; 626 for (unsigned i = 0; i < 4; ++i) { 627 // If we make it to the beginning of the block, it's safe to clobber 628 // EFLAGS iff EFLAGS is not live-in. 629 if (Iter == B) 630 return !MBB.isLiveIn(X86::EFLAGS); 631 632 --Iter; 633 // Skip over debug instructions. 634 while (Iter != B && Iter->isDebugInstr()) 635 --Iter; 636 637 bool SawKill = false; 638 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 639 MachineOperand &MO = Iter->getOperand(j); 640 // A register mask may clobber EFLAGS, but we should still look for a 641 // live EFLAGS def. 642 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 643 SawKill = true; 644 if (MO.isReg() && MO.getReg() == X86::EFLAGS) { 645 if (MO.isDef()) return MO.isDead(); 646 if (MO.isKill()) SawKill = true; 647 } 648 } 649 650 if (SawKill) 651 // This instruction kills EFLAGS and doesn't redefine it, so 652 // there's no need to look further. 653 return true; 654 } 655 656 // Conservative answer. 657 return false; 658} 659 660void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 661 MachineBasicBlock::iterator I, 662 unsigned DestReg, unsigned SubIdx, 663 const MachineInstr &Orig, 664 const TargetRegisterInfo &TRI) const { 665 bool ClobbersEFLAGS = false; 666 for (const MachineOperand &MO : Orig.operands()) { 667 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { 668 ClobbersEFLAGS = true; 669 break; 670 } 671 } 672 673 if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) { 674 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side 675 // effects. 676 int Value; 677 switch (Orig.getOpcode()) { 678 case X86::MOV32r0: Value = 0; break; 679 case X86::MOV32r1: Value = 1; break; 680 case X86::MOV32r_1: Value = -1; break; 681 default: 682 llvm_unreachable("Unexpected instruction!"); 683 } 684 685 const DebugLoc &DL = Orig.getDebugLoc(); 686 BuildMI(MBB, I, DL, get(X86::MOV32ri)) 687 .add(Orig.getOperand(0)) 688 .addImm(Value); 689 } else { 690 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 691 MBB.insert(I, MI); 692 } 693 694 MachineInstr &NewMI = *std::prev(I); 695 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 696} 697 698/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. 699bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { 700 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 701 MachineOperand &MO = MI.getOperand(i); 702 if (MO.isReg() && MO.isDef() && 703 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 704 return true; 705 } 706 } 707 return false; 708} 709 710/// Check whether the shift count for a machine operand is non-zero. 711inline static unsigned getTruncatedShiftCount(MachineInstr &MI, 712 unsigned ShiftAmtOperandIdx) { 713 // The shift count is six bits with the REX.W prefix and five bits without. 714 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31; 715 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm(); 716 return Imm & ShiftCountMask; 717} 718 719/// Check whether the given shift count is appropriate 720/// can be represented by a LEA instruction. 721inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { 722 // Left shift instructions can be transformed into load-effective-address 723 // instructions if we can encode them appropriately. 724 // A LEA instruction utilizes a SIB byte to encode its scale factor. 725 // The SIB.scale field is two bits wide which means that we can encode any 726 // shift amount less than 4. 727 return ShAmt < 4 && ShAmt > 0; 728} 729 730bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, 731 unsigned Opc, bool AllowSP, unsigned &NewSrc, 732 bool &isKill, bool &isUndef, 733 MachineOperand &ImplicitOp, 734 LiveVariables *LV) const { 735 MachineFunction &MF = *MI.getParent()->getParent(); 736 const TargetRegisterClass *RC; 737 if (AllowSP) { 738 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; 739 } else { 740 RC = Opc != X86::LEA32r ? 741 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; 742 } 743 unsigned SrcReg = Src.getReg(); 744 745 // For both LEA64 and LEA32 the register already has essentially the right 746 // type (32-bit or 64-bit) we may just need to forbid SP. 747 if (Opc != X86::LEA64_32r) { 748 NewSrc = SrcReg; 749 isKill = Src.isKill(); 750 isUndef = Src.isUndef(); 751 752 if (TargetRegisterInfo::isVirtualRegister(NewSrc) && 753 !MF.getRegInfo().constrainRegClass(NewSrc, RC)) 754 return false; 755 756 return true; 757 } 758 759 // This is for an LEA64_32r and incoming registers are 32-bit. One way or 760 // another we need to add 64-bit registers to the final MI. 761 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { 762 ImplicitOp = Src; 763 ImplicitOp.setImplicit(); 764 765 NewSrc = getX86SubSuperRegister(Src.getReg(), 64); 766 isKill = Src.isKill(); 767 isUndef = Src.isUndef(); 768 } else { 769 // Virtual register of the wrong class, we have to create a temporary 64-bit 770 // vreg to feed into the LEA. 771 NewSrc = MF.getRegInfo().createVirtualRegister(RC); 772 MachineInstr *Copy = 773 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 774 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) 775 .add(Src); 776 777 // Which is obviously going to be dead after we're done with it. 778 isKill = true; 779 isUndef = false; 780 781 if (LV) 782 LV->replaceKillInstruction(SrcReg, MI, *Copy); 783 } 784 785 // We've set all the parameters without issue. 786 return true; 787} 788 789/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit 790/// LEA to form 3-address code by promoting to a 32-bit superregister and then 791/// truncating back down to a 16-bit subregister. 792MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( 793 unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI, 794 LiveVariables *LV) const { 795 MachineBasicBlock::iterator MBBI = MI.getIterator(); 796 unsigned Dest = MI.getOperand(0).getReg(); 797 unsigned Src = MI.getOperand(1).getReg(); 798 bool isDead = MI.getOperand(0).isDead(); 799 bool isKill = MI.getOperand(1).isKill(); 800 801 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 802 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 803 unsigned Opc, leaInReg; 804 if (Subtarget.is64Bit()) { 805 Opc = X86::LEA64_32r; 806 leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 807 } else { 808 Opc = X86::LEA32r; 809 leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 810 } 811 812 // Build and insert into an implicit UNDEF value. This is OK because 813 // well be shifting and then extracting the lower 16-bits. 814 // This has the potential to cause partial register stall. e.g. 815 // movw (%rbp,%rcx,2), %dx 816 // leal -65(%rdx), %esi 817 // But testing has shown this *does* help performance in 64-bit mode (at 818 // least on modern x86 machines). 819 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg); 820 MachineInstr *InsMI = 821 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 822 .addReg(leaInReg, RegState::Define, X86::sub_16bit) 823 .addReg(Src, getKillRegState(isKill)); 824 825 MachineInstrBuilder MIB = 826 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opc), leaOutReg); 827 switch (MIOpc) { 828 default: llvm_unreachable("Unreachable!"); 829 case X86::SHL16ri: { 830 unsigned ShAmt = MI.getOperand(2).getImm(); 831 MIB.addReg(0).addImm(1ULL << ShAmt) 832 .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0); 833 break; 834 } 835 case X86::INC16r: 836 addRegOffset(MIB, leaInReg, true, 1); 837 break; 838 case X86::DEC16r: 839 addRegOffset(MIB, leaInReg, true, -1); 840 break; 841 case X86::ADD16ri: 842 case X86::ADD16ri8: 843 case X86::ADD16ri_DB: 844 case X86::ADD16ri8_DB: 845 addRegOffset(MIB, leaInReg, true, MI.getOperand(2).getImm()); 846 break; 847 case X86::ADD16rr: 848 case X86::ADD16rr_DB: { 849 unsigned Src2 = MI.getOperand(2).getReg(); 850 bool isKill2 = MI.getOperand(2).isKill(); 851 unsigned leaInReg2 = 0; 852 MachineInstr *InsMI2 = nullptr; 853 if (Src == Src2) { 854 // ADD16rr killed %reg1028, %reg1028 855 // just a single insert_subreg. 856 addRegReg(MIB, leaInReg, true, leaInReg, false); 857 } else { 858 if (Subtarget.is64Bit()) 859 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 860 else 861 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 862 // Build and insert into an implicit UNDEF value. This is OK because 863 // well be shifting and then extracting the lower 16-bits. 864 BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2); 865 InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) 866 .addReg(leaInReg2, RegState::Define, X86::sub_16bit) 867 .addReg(Src2, getKillRegState(isKill2)); 868 addRegReg(MIB, leaInReg, true, leaInReg2, true); 869 } 870 if (LV && isKill2 && InsMI2) 871 LV->replaceKillInstruction(Src2, MI, *InsMI2); 872 break; 873 } 874 } 875 876 MachineInstr *NewMI = MIB; 877 MachineInstr *ExtMI = 878 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 879 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 880 .addReg(leaOutReg, RegState::Kill, X86::sub_16bit); 881 882 if (LV) { 883 // Update live variables 884 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 885 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 886 if (isKill) 887 LV->replaceKillInstruction(Src, MI, *InsMI); 888 if (isDead) 889 LV->replaceKillInstruction(Dest, MI, *ExtMI); 890 } 891 892 return ExtMI; 893} 894 895/// This method must be implemented by targets that 896/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 897/// may be able to convert a two-address instruction into a true 898/// three-address instruction on demand. This allows the X86 target (for 899/// example) to convert ADD and SHL instructions into LEA instructions if they 900/// would require register copies due to two-addressness. 901/// 902/// This method returns a null pointer if the transformation cannot be 903/// performed, otherwise it returns the new instruction. 904/// 905MachineInstr * 906X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 907 MachineInstr &MI, LiveVariables *LV) const { 908 // The following opcodes also sets the condition code register(s). Only 909 // convert them to equivalent lea if the condition code register def's 910 // are dead! 911 if (hasLiveCondCodeDef(MI)) 912 return nullptr; 913 914 MachineFunction &MF = *MI.getParent()->getParent(); 915 // All instructions input are two-addr instructions. Get the known operands. 916 const MachineOperand &Dest = MI.getOperand(0); 917 const MachineOperand &Src = MI.getOperand(1); 918 919 MachineInstr *NewMI = nullptr; 920 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 921 // we have better subtarget support, enable the 16-bit LEA generation here. 922 // 16-bit LEA is also slow on Core2. 923 bool DisableLEA16 = true; 924 bool is64Bit = Subtarget.is64Bit(); 925 926 unsigned MIOpc = MI.getOpcode(); 927 switch (MIOpc) { 928 default: return nullptr; 929 case X86::SHL64ri: { 930 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 931 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 932 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 933 934 // LEA can't handle RSP. 935 if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && 936 !MF.getRegInfo().constrainRegClass(Src.getReg(), 937 &X86::GR64_NOSPRegClass)) 938 return nullptr; 939 940 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) 941 .add(Dest) 942 .addReg(0) 943 .addImm(1ULL << ShAmt) 944 .add(Src) 945 .addImm(0) 946 .addReg(0); 947 break; 948 } 949 case X86::SHL32ri: { 950 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 951 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 952 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 953 954 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 955 956 // LEA can't handle ESP. 957 bool isKill, isUndef; 958 unsigned SrcReg; 959 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 960 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 961 SrcReg, isKill, isUndef, ImplicitOp, LV)) 962 return nullptr; 963 964 MachineInstrBuilder MIB = 965 BuildMI(MF, MI.getDebugLoc(), get(Opc)) 966 .add(Dest) 967 .addReg(0) 968 .addImm(1ULL << ShAmt) 969 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) 970 .addImm(0) 971 .addReg(0); 972 if (ImplicitOp.getReg() != 0) 973 MIB.add(ImplicitOp); 974 NewMI = MIB; 975 976 break; 977 } 978 case X86::SHL16ri: { 979 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 980 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 981 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 982 983 if (DisableLEA16) 984 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) 985 : nullptr; 986 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) 987 .add(Dest) 988 .addReg(0) 989 .addImm(1ULL << ShAmt) 990 .add(Src) 991 .addImm(0) 992 .addReg(0); 993 break; 994 } 995 case X86::INC64r: 996 case X86::INC32r: { 997 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); 998 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 999 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1000 bool isKill, isUndef; 1001 unsigned SrcReg; 1002 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1003 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 1004 SrcReg, isKill, isUndef, ImplicitOp, LV)) 1005 return nullptr; 1006 1007 MachineInstrBuilder MIB = 1008 BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1009 .add(Dest) 1010 .addReg(SrcReg, 1011 getKillRegState(isKill) | getUndefRegState(isUndef)); 1012 if (ImplicitOp.getReg() != 0) 1013 MIB.add(ImplicitOp); 1014 1015 NewMI = addOffset(MIB, 1); 1016 break; 1017 } 1018 case X86::INC16r: 1019 if (DisableLEA16) 1020 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) 1021 : nullptr; 1022 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); 1023 NewMI = addOffset( 1024 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1); 1025 break; 1026 case X86::DEC64r: 1027 case X86::DEC32r: { 1028 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); 1029 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 1030 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1031 1032 bool isKill, isUndef; 1033 unsigned SrcReg; 1034 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1035 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 1036 SrcReg, isKill, isUndef, ImplicitOp, LV)) 1037 return nullptr; 1038 1039 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1040 .add(Dest) 1041 .addReg(SrcReg, getUndefRegState(isUndef) | 1042 getKillRegState(isKill)); 1043 if (ImplicitOp.getReg() != 0) 1044 MIB.add(ImplicitOp); 1045 1046 NewMI = addOffset(MIB, -1); 1047 1048 break; 1049 } 1050 case X86::DEC16r: 1051 if (DisableLEA16) 1052 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) 1053 : nullptr; 1054 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); 1055 NewMI = addOffset( 1056 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1); 1057 break; 1058 case X86::ADD64rr: 1059 case X86::ADD64rr_DB: 1060 case X86::ADD32rr: 1061 case X86::ADD32rr_DB: { 1062 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1063 unsigned Opc; 1064 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) 1065 Opc = X86::LEA64r; 1066 else 1067 Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1068 1069 bool isKill, isUndef; 1070 unsigned SrcReg; 1071 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1072 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 1073 SrcReg, isKill, isUndef, ImplicitOp, LV)) 1074 return nullptr; 1075 1076 const MachineOperand &Src2 = MI.getOperand(2); 1077 bool isKill2, isUndef2; 1078 unsigned SrcReg2; 1079 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); 1080 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, 1081 SrcReg2, isKill2, isUndef2, ImplicitOp2, LV)) 1082 return nullptr; 1083 1084 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); 1085 if (ImplicitOp.getReg() != 0) 1086 MIB.add(ImplicitOp); 1087 if (ImplicitOp2.getReg() != 0) 1088 MIB.add(ImplicitOp2); 1089 1090 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); 1091 1092 // Preserve undefness of the operands. 1093 NewMI->getOperand(1).setIsUndef(isUndef); 1094 NewMI->getOperand(3).setIsUndef(isUndef2); 1095 1096 if (LV && Src2.isKill()) 1097 LV->replaceKillInstruction(SrcReg2, MI, *NewMI); 1098 break; 1099 } 1100 case X86::ADD16rr: 1101 case X86::ADD16rr_DB: { 1102 if (DisableLEA16) 1103 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) 1104 : nullptr; 1105 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1106 unsigned Src2 = MI.getOperand(2).getReg(); 1107 bool isKill2 = MI.getOperand(2).isKill(); 1108 NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest), 1109 Src.getReg(), Src.isKill(), Src2, isKill2); 1110 1111 // Preserve undefness of the operands. 1112 bool isUndef = MI.getOperand(1).isUndef(); 1113 bool isUndef2 = MI.getOperand(2).isUndef(); 1114 NewMI->getOperand(1).setIsUndef(isUndef); 1115 NewMI->getOperand(3).setIsUndef(isUndef2); 1116 1117 if (LV && isKill2) 1118 LV->replaceKillInstruction(Src2, MI, *NewMI); 1119 break; 1120 } 1121 case X86::ADD64ri32: 1122 case X86::ADD64ri8: 1123 case X86::ADD64ri32_DB: 1124 case X86::ADD64ri8_DB: 1125 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1126 NewMI = addOffset( 1127 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), 1128 MI.getOperand(2)); 1129 break; 1130 case X86::ADD32ri: 1131 case X86::ADD32ri8: 1132 case X86::ADD32ri_DB: 1133 case X86::ADD32ri8_DB: { 1134 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1135 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1136 1137 bool isKill, isUndef; 1138 unsigned SrcReg; 1139 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1140 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 1141 SrcReg, isKill, isUndef, ImplicitOp, LV)) 1142 return nullptr; 1143 1144 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1145 .add(Dest) 1146 .addReg(SrcReg, getUndefRegState(isUndef) | 1147 getKillRegState(isKill)); 1148 if (ImplicitOp.getReg() != 0) 1149 MIB.add(ImplicitOp); 1150 1151 NewMI = addOffset(MIB, MI.getOperand(2)); 1152 break; 1153 } 1154 case X86::ADD16ri: 1155 case X86::ADD16ri8: 1156 case X86::ADD16ri_DB: 1157 case X86::ADD16ri8_DB: 1158 if (DisableLEA16) 1159 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) 1160 : nullptr; 1161 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1162 NewMI = addOffset( 1163 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1164 MI.getOperand(2)); 1165 break; 1166 1167 case X86::VMOVDQU8Z128rmk: 1168 case X86::VMOVDQU8Z256rmk: 1169 case X86::VMOVDQU8Zrmk: 1170 case X86::VMOVDQU16Z128rmk: 1171 case X86::VMOVDQU16Z256rmk: 1172 case X86::VMOVDQU16Zrmk: 1173 case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk: 1174 case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk: 1175 case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk: 1176 case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk: 1177 case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk: 1178 case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk: 1179 case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk: 1180 case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk: 1181 case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk: 1182 case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk: 1183 case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk: 1184 case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: { 1185 unsigned Opc; 1186 switch (MIOpc) { 1187 default: llvm_unreachable("Unreachable!"); 1188 case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break; 1189 case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break; 1190 case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break; 1191 case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break; 1192 case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break; 1193 case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break; 1194 case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; 1195 case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; 1196 case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break; 1197 case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; 1198 case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; 1199 case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break; 1200 case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; 1201 case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; 1202 case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break; 1203 case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; 1204 case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; 1205 case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break; 1206 case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; 1207 case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; 1208 case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break; 1209 case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; 1210 case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; 1211 case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break; 1212 case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; 1213 case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; 1214 case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break; 1215 case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; 1216 case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; 1217 case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break; 1218 } 1219 1220 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1221 .add(Dest) 1222 .add(MI.getOperand(2)) 1223 .add(Src) 1224 .add(MI.getOperand(3)) 1225 .add(MI.getOperand(4)) 1226 .add(MI.getOperand(5)) 1227 .add(MI.getOperand(6)) 1228 .add(MI.getOperand(7)); 1229 break; 1230 } 1231 case X86::VMOVDQU8Z128rrk: 1232 case X86::VMOVDQU8Z256rrk: 1233 case X86::VMOVDQU8Zrrk: 1234 case X86::VMOVDQU16Z128rrk: 1235 case X86::VMOVDQU16Z256rrk: 1236 case X86::VMOVDQU16Zrrk: 1237 case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk: 1238 case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk: 1239 case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk: 1240 case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk: 1241 case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk: 1242 case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk: 1243 case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk: 1244 case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk: 1245 case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk: 1246 case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk: 1247 case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk: 1248 case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: { 1249 unsigned Opc; 1250 switch (MIOpc) { 1251 default: llvm_unreachable("Unreachable!"); 1252 case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break; 1253 case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break; 1254 case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break; 1255 case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break; 1256 case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break; 1257 case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break; 1258 case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; 1259 case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; 1260 case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break; 1261 case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; 1262 case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; 1263 case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break; 1264 case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; 1265 case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; 1266 case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break; 1267 case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; 1268 case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; 1269 case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break; 1270 case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; 1271 case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; 1272 case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break; 1273 case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; 1274 case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; 1275 case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break; 1276 case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; 1277 case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; 1278 case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break; 1279 case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; 1280 case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; 1281 case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break; 1282 } 1283 1284 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1285 .add(Dest) 1286 .add(MI.getOperand(2)) 1287 .add(Src) 1288 .add(MI.getOperand(3)); 1289 break; 1290 } 1291 } 1292 1293 if (!NewMI) return nullptr; 1294 1295 if (LV) { // Update live variables 1296 if (Src.isKill()) 1297 LV->replaceKillInstruction(Src.getReg(), MI, *NewMI); 1298 if (Dest.isDead()) 1299 LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI); 1300 } 1301 1302 MFI->insert(MI.getIterator(), NewMI); // Insert the new inst 1303 return NewMI; 1304} 1305 1306/// This determines which of three possible cases of a three source commute 1307/// the source indexes correspond to taking into account any mask operands. 1308/// All prevents commuting a passthru operand. Returns -1 if the commute isn't 1309/// possible. 1310/// Case 0 - Possible to commute the first and second operands. 1311/// Case 1 - Possible to commute the first and third operands. 1312/// Case 2 - Possible to commute the second and third operands. 1313static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, 1314 unsigned SrcOpIdx2) { 1315 // Put the lowest index to SrcOpIdx1 to simplify the checks below. 1316 if (SrcOpIdx1 > SrcOpIdx2) 1317 std::swap(SrcOpIdx1, SrcOpIdx2); 1318 1319 unsigned Op1 = 1, Op2 = 2, Op3 = 3; 1320 if (X86II::isKMasked(TSFlags)) { 1321 Op2++; 1322 Op3++; 1323 } 1324 1325 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2) 1326 return 0; 1327 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3) 1328 return 1; 1329 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3) 1330 return 2; 1331 llvm_unreachable("Unknown three src commute case."); 1332} 1333 1334unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands( 1335 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, 1336 const X86InstrFMA3Group &FMA3Group) const { 1337 1338 unsigned Opc = MI.getOpcode(); 1339 1340 // TODO: Commuting the 1st operand of FMA*_Int requires some additional 1341 // analysis. The commute optimization is legal only if all users of FMA*_Int 1342 // use only the lowest element of the FMA*_Int instruction. Such analysis are 1343 // not implemented yet. So, just return 0 in that case. 1344 // When such analysis are available this place will be the right place for 1345 // calling it. 1346 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && 1347 "Intrinsic instructions can't commute operand 1"); 1348 1349 // Determine which case this commute is or if it can't be done. 1350 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, 1351 SrcOpIdx2); 1352 assert(Case < 3 && "Unexpected case number!"); 1353 1354 // Define the FMA forms mapping array that helps to map input FMA form 1355 // to output FMA form to preserve the operation semantics after 1356 // commuting the operands. 1357 const unsigned Form132Index = 0; 1358 const unsigned Form213Index = 1; 1359 const unsigned Form231Index = 2; 1360 static const unsigned FormMapping[][3] = { 1361 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2; 1362 // FMA132 A, C, b; ==> FMA231 C, A, b; 1363 // FMA213 B, A, c; ==> FMA213 A, B, c; 1364 // FMA231 C, A, b; ==> FMA132 A, C, b; 1365 { Form231Index, Form213Index, Form132Index }, 1366 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3; 1367 // FMA132 A, c, B; ==> FMA132 B, c, A; 1368 // FMA213 B, a, C; ==> FMA231 C, a, B; 1369 // FMA231 C, a, B; ==> FMA213 B, a, C; 1370 { Form132Index, Form231Index, Form213Index }, 1371 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3; 1372 // FMA132 a, C, B; ==> FMA213 a, B, C; 1373 // FMA213 b, A, C; ==> FMA132 b, C, A; 1374 // FMA231 c, A, B; ==> FMA231 c, B, A; 1375 { Form213Index, Form132Index, Form231Index } 1376 }; 1377 1378 unsigned FMAForms[3]; 1379 FMAForms[0] = FMA3Group.get132Opcode(); 1380 FMAForms[1] = FMA3Group.get213Opcode(); 1381 FMAForms[2] = FMA3Group.get231Opcode(); 1382 unsigned FormIndex; 1383 for (FormIndex = 0; FormIndex < 3; FormIndex++) 1384 if (Opc == FMAForms[FormIndex]) 1385 break; 1386 1387 // Everything is ready, just adjust the FMA opcode and return it. 1388 FormIndex = FormMapping[Case][FormIndex]; 1389 return FMAForms[FormIndex]; 1390} 1391 1392static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, 1393 unsigned SrcOpIdx2) { 1394 // Determine which case this commute is or if it can't be done. 1395 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, 1396 SrcOpIdx2); 1397 assert(Case < 3 && "Unexpected case value!"); 1398 1399 // For each case we need to swap two pairs of bits in the final immediate. 1400 static const uint8_t SwapMasks[3][4] = { 1401 { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5. 1402 { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6. 1403 { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6. 1404 }; 1405 1406 uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm(); 1407 // Clear out the bits we are swapping. 1408 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] | 1409 SwapMasks[Case][2] | SwapMasks[Case][3]); 1410 // If the immediate had a bit of the pair set, then set the opposite bit. 1411 if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1]; 1412 if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0]; 1413 if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3]; 1414 if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2]; 1415 MI.getOperand(MI.getNumOperands()-1).setImm(NewImm); 1416} 1417 1418// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be 1419// commuted. 1420static bool isCommutableVPERMV3Instruction(unsigned Opcode) { 1421#define VPERM_CASES(Suffix) \ 1422 case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \ 1423 case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \ 1424 case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \ 1425 case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \ 1426 case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \ 1427 case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \ 1428 case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \ 1429 case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \ 1430 case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \ 1431 case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \ 1432 case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \ 1433 case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz: 1434 1435#define VPERM_CASES_BROADCAST(Suffix) \ 1436 VPERM_CASES(Suffix) \ 1437 case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \ 1438 case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \ 1439 case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \ 1440 case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \ 1441 case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \ 1442 case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz: 1443 1444 switch (Opcode) { 1445 default: return false; 1446 VPERM_CASES(B) 1447 VPERM_CASES_BROADCAST(D) 1448 VPERM_CASES_BROADCAST(PD) 1449 VPERM_CASES_BROADCAST(PS) 1450 VPERM_CASES_BROADCAST(Q) 1451 VPERM_CASES(W) 1452 return true; 1453 } 1454#undef VPERM_CASES_BROADCAST 1455#undef VPERM_CASES 1456} 1457 1458// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching 1459// from the I opcode to the T opcode and vice versa. 1460static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { 1461#define VPERM_CASES(Orig, New) \ 1462 case X86::Orig##128rr: return X86::New##128rr; \ 1463 case X86::Orig##128rrkz: return X86::New##128rrkz; \ 1464 case X86::Orig##128rm: return X86::New##128rm; \ 1465 case X86::Orig##128rmkz: return X86::New##128rmkz; \ 1466 case X86::Orig##256rr: return X86::New##256rr; \ 1467 case X86::Orig##256rrkz: return X86::New##256rrkz; \ 1468 case X86::Orig##256rm: return X86::New##256rm; \ 1469 case X86::Orig##256rmkz: return X86::New##256rmkz; \ 1470 case X86::Orig##rr: return X86::New##rr; \ 1471 case X86::Orig##rrkz: return X86::New##rrkz; \ 1472 case X86::Orig##rm: return X86::New##rm; \ 1473 case X86::Orig##rmkz: return X86::New##rmkz; 1474 1475#define VPERM_CASES_BROADCAST(Orig, New) \ 1476 VPERM_CASES(Orig, New) \ 1477 case X86::Orig##128rmb: return X86::New##128rmb; \ 1478 case X86::Orig##128rmbkz: return X86::New##128rmbkz; \ 1479 case X86::Orig##256rmb: return X86::New##256rmb; \ 1480 case X86::Orig##256rmbkz: return X86::New##256rmbkz; \ 1481 case X86::Orig##rmb: return X86::New##rmb; \ 1482 case X86::Orig##rmbkz: return X86::New##rmbkz; 1483 1484 switch (Opcode) { 1485 VPERM_CASES(VPERMI2B, VPERMT2B) 1486 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D) 1487 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD) 1488 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS) 1489 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q) 1490 VPERM_CASES(VPERMI2W, VPERMT2W) 1491 VPERM_CASES(VPERMT2B, VPERMI2B) 1492 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D) 1493 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD) 1494 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS) 1495 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q) 1496 VPERM_CASES(VPERMT2W, VPERMI2W) 1497 } 1498 1499 llvm_unreachable("Unreachable!"); 1500#undef VPERM_CASES_BROADCAST 1501#undef VPERM_CASES 1502} 1503 1504MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1505 unsigned OpIdx1, 1506 unsigned OpIdx2) const { 1507 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { 1508 if (NewMI) 1509 return *MI.getParent()->getParent()->CloneMachineInstr(&MI); 1510 return MI; 1511 }; 1512 1513 switch (MI.getOpcode()) { 1514 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 1515 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 1516 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 1517 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 1518 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 1519 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 1520 unsigned Opc; 1521 unsigned Size; 1522 switch (MI.getOpcode()) { 1523 default: llvm_unreachable("Unreachable!"); 1524 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 1525 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 1526 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 1527 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 1528 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 1529 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 1530 } 1531 unsigned Amt = MI.getOperand(3).getImm(); 1532 auto &WorkingMI = cloneIfNew(MI); 1533 WorkingMI.setDesc(get(Opc)); 1534 WorkingMI.getOperand(3).setImm(Size - Amt); 1535 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1536 OpIdx1, OpIdx2); 1537 } 1538 case X86::PFSUBrr: 1539 case X86::PFSUBRrr: { 1540 // PFSUB x, y: x = x - y 1541 // PFSUBR x, y: x = y - x 1542 unsigned Opc = 1543 (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr); 1544 auto &WorkingMI = cloneIfNew(MI); 1545 WorkingMI.setDesc(get(Opc)); 1546 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1547 OpIdx1, OpIdx2); 1548 } 1549 case X86::BLENDPDrri: 1550 case X86::BLENDPSrri: 1551 case X86::VBLENDPDrri: 1552 case X86::VBLENDPSrri: 1553 // If we're optimizing for size, try to use MOVSD/MOVSS. 1554 if (MI.getParent()->getParent()->getFunction().optForSize()) { 1555 unsigned Mask, Opc; 1556 switch (MI.getOpcode()) { 1557 default: llvm_unreachable("Unreachable!"); 1558 case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break; 1559 case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break; 1560 case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break; 1561 case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break; 1562 } 1563 if ((MI.getOperand(3).getImm() ^ Mask) == 1) { 1564 auto &WorkingMI = cloneIfNew(MI); 1565 WorkingMI.setDesc(get(Opc)); 1566 WorkingMI.RemoveOperand(3); 1567 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, 1568 /*NewMI=*/false, 1569 OpIdx1, OpIdx2); 1570 } 1571 } 1572 LLVM_FALLTHROUGH; 1573 case X86::PBLENDWrri: 1574 case X86::VBLENDPDYrri: 1575 case X86::VBLENDPSYrri: 1576 case X86::VPBLENDDrri: 1577 case X86::VPBLENDWrri: 1578 case X86::VPBLENDDYrri: 1579 case X86::VPBLENDWYrri:{ 1580 unsigned Mask; 1581 switch (MI.getOpcode()) { 1582 default: llvm_unreachable("Unreachable!"); 1583 case X86::BLENDPDrri: Mask = 0x03; break; 1584 case X86::BLENDPSrri: Mask = 0x0F; break; 1585 case X86::PBLENDWrri: Mask = 0xFF; break; 1586 case X86::VBLENDPDrri: Mask = 0x03; break; 1587 case X86::VBLENDPSrri: Mask = 0x0F; break; 1588 case X86::VBLENDPDYrri: Mask = 0x0F; break; 1589 case X86::VBLENDPSYrri: Mask = 0xFF; break; 1590 case X86::VPBLENDDrri: Mask = 0x0F; break; 1591 case X86::VPBLENDWrri: Mask = 0xFF; break; 1592 case X86::VPBLENDDYrri: Mask = 0xFF; break; 1593 case X86::VPBLENDWYrri: Mask = 0xFF; break; 1594 } 1595 // Only the least significant bits of Imm are used. 1596 unsigned Imm = MI.getOperand(3).getImm() & Mask; 1597 auto &WorkingMI = cloneIfNew(MI); 1598 WorkingMI.getOperand(3).setImm(Mask ^ Imm); 1599 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1600 OpIdx1, OpIdx2); 1601 } 1602 case X86::MOVSDrr: 1603 case X86::MOVSSrr: 1604 case X86::VMOVSDrr: 1605 case X86::VMOVSSrr:{ 1606 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD. 1607 assert(Subtarget.hasSSE41() && "Commuting MOVSD/MOVSS requires SSE41!"); 1608 1609 unsigned Mask, Opc; 1610 switch (MI.getOpcode()) { 1611 default: llvm_unreachable("Unreachable!"); 1612 case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break; 1613 case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break; 1614 case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break; 1615 case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break; 1616 } 1617 1618 auto &WorkingMI = cloneIfNew(MI); 1619 WorkingMI.setDesc(get(Opc)); 1620 WorkingMI.addOperand(MachineOperand::CreateImm(Mask)); 1621 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1622 OpIdx1, OpIdx2); 1623 } 1624 case X86::PCLMULQDQrr: 1625 case X86::VPCLMULQDQrr: 1626 case X86::VPCLMULQDQYrr: 1627 case X86::VPCLMULQDQZrr: 1628 case X86::VPCLMULQDQZ128rr: 1629 case X86::VPCLMULQDQZ256rr: { 1630 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] 1631 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] 1632 unsigned Imm = MI.getOperand(3).getImm(); 1633 unsigned Src1Hi = Imm & 0x01; 1634 unsigned Src2Hi = Imm & 0x10; 1635 auto &WorkingMI = cloneIfNew(MI); 1636 WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); 1637 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1638 OpIdx1, OpIdx2); 1639 } 1640 case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri: 1641 case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri: 1642 case X86::VPCMPBZrri: case X86::VPCMPUBZrri: 1643 case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri: 1644 case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri: 1645 case X86::VPCMPDZrri: case X86::VPCMPUDZrri: 1646 case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri: 1647 case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri: 1648 case X86::VPCMPQZrri: case X86::VPCMPUQZrri: 1649 case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri: 1650 case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri: 1651 case X86::VPCMPWZrri: case X86::VPCMPUWZrri: 1652 case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik: 1653 case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik: 1654 case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik: 1655 case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik: 1656 case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik: 1657 case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik: 1658 case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik: 1659 case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik: 1660 case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik: 1661 case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik: 1662 case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik: 1663 case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: { 1664 // Flip comparison mode immediate (if necessary). 1665 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7; 1666 Imm = X86::getSwappedVPCMPImm(Imm); 1667 auto &WorkingMI = cloneIfNew(MI); 1668 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm); 1669 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1670 OpIdx1, OpIdx2); 1671 } 1672 case X86::VPCOMBri: case X86::VPCOMUBri: 1673 case X86::VPCOMDri: case X86::VPCOMUDri: 1674 case X86::VPCOMQri: case X86::VPCOMUQri: 1675 case X86::VPCOMWri: case X86::VPCOMUWri: { 1676 // Flip comparison mode immediate (if necessary). 1677 unsigned Imm = MI.getOperand(3).getImm() & 0x7; 1678 Imm = X86::getSwappedVPCOMImm(Imm); 1679 auto &WorkingMI = cloneIfNew(MI); 1680 WorkingMI.getOperand(3).setImm(Imm); 1681 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1682 OpIdx1, OpIdx2); 1683 } 1684 case X86::VPERM2F128rr: 1685 case X86::VPERM2I128rr: { 1686 // Flip permute source immediate. 1687 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi. 1688 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi. 1689 unsigned Imm = MI.getOperand(3).getImm() & 0xFF; 1690 auto &WorkingMI = cloneIfNew(MI); 1691 WorkingMI.getOperand(3).setImm(Imm ^ 0x22); 1692 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1693 OpIdx1, OpIdx2); 1694 } 1695 case X86::MOVHLPSrr: 1696 case X86::UNPCKHPDrr: 1697 case X86::VMOVHLPSrr: 1698 case X86::VUNPCKHPDrr: 1699 case X86::VMOVHLPSZrr: 1700 case X86::VUNPCKHPDZ128rr: { 1701 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!"); 1702 1703 unsigned Opc = MI.getOpcode(); 1704 switch (Opc) { 1705 default: llvm_unreachable("Unreachable!"); 1706 case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; 1707 case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; 1708 case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break; 1709 case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break; 1710 case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break; 1711 case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break; 1712 } 1713 auto &WorkingMI = cloneIfNew(MI); 1714 WorkingMI.setDesc(get(Opc)); 1715 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1716 OpIdx1, OpIdx2); 1717 } 1718 case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: 1719 case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: 1720 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: 1721 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr: 1722 case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr: 1723 case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr: 1724 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr: 1725 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr: 1726 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr: 1727 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr: 1728 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr: 1729 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr: 1730 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr: 1731 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr: 1732 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr: 1733 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: { 1734 unsigned Opc; 1735 switch (MI.getOpcode()) { 1736 default: llvm_unreachable("Unreachable!"); 1737 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 1738 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 1739 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 1740 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 1741 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 1742 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 1743 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 1744 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 1745 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 1746 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 1747 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 1748 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 1749 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 1750 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 1751 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 1752 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 1753 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 1754 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 1755 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 1756 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 1757 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 1758 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 1759 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 1760 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 1761 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 1762 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 1763 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 1764 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 1765 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 1766 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 1767 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 1768 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 1769 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break; 1770 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 1771 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 1772 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 1773 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 1774 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 1775 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break; 1776 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 1777 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 1778 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 1779 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break; 1780 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break; 1781 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break; 1782 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break; 1783 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break; 1784 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break; 1785 } 1786 auto &WorkingMI = cloneIfNew(MI); 1787 WorkingMI.setDesc(get(Opc)); 1788 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1789 OpIdx1, OpIdx2); 1790 } 1791 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: 1792 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: 1793 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: 1794 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: 1795 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: 1796 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: 1797 case X86::VPTERNLOGDZrrik: 1798 case X86::VPTERNLOGDZ128rrik: 1799 case X86::VPTERNLOGDZ256rrik: 1800 case X86::VPTERNLOGQZrrik: 1801 case X86::VPTERNLOGQZ128rrik: 1802 case X86::VPTERNLOGQZ256rrik: 1803 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: 1804 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: 1805 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: 1806 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: 1807 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: 1808 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: 1809 case X86::VPTERNLOGDZ128rmbi: 1810 case X86::VPTERNLOGDZ256rmbi: 1811 case X86::VPTERNLOGDZrmbi: 1812 case X86::VPTERNLOGQZ128rmbi: 1813 case X86::VPTERNLOGQZ256rmbi: 1814 case X86::VPTERNLOGQZrmbi: 1815 case X86::VPTERNLOGDZ128rmbikz: 1816 case X86::VPTERNLOGDZ256rmbikz: 1817 case X86::VPTERNLOGDZrmbikz: 1818 case X86::VPTERNLOGQZ128rmbikz: 1819 case X86::VPTERNLOGQZ256rmbikz: 1820 case X86::VPTERNLOGQZrmbikz: { 1821 auto &WorkingMI = cloneIfNew(MI); 1822 commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2); 1823 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1824 OpIdx1, OpIdx2); 1825 } 1826 default: { 1827 if (isCommutableVPERMV3Instruction(MI.getOpcode())) { 1828 unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode()); 1829 auto &WorkingMI = cloneIfNew(MI); 1830 WorkingMI.setDesc(get(Opc)); 1831 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1832 OpIdx1, OpIdx2); 1833 } 1834 1835 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), 1836 MI.getDesc().TSFlags); 1837 if (FMA3Group) { 1838 unsigned Opc = 1839 getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group); 1840 auto &WorkingMI = cloneIfNew(MI); 1841 WorkingMI.setDesc(get(Opc)); 1842 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1843 OpIdx1, OpIdx2); 1844 } 1845 1846 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 1847 } 1848 } 1849} 1850 1851bool 1852X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, 1853 unsigned &SrcOpIdx1, 1854 unsigned &SrcOpIdx2, 1855 bool IsIntrinsic) const { 1856 uint64_t TSFlags = MI.getDesc().TSFlags; 1857 1858 unsigned FirstCommutableVecOp = 1; 1859 unsigned LastCommutableVecOp = 3; 1860 unsigned KMaskOp = -1U; 1861 if (X86II::isKMasked(TSFlags)) { 1862 // For k-zero-masked operations it is Ok to commute the first vector 1863 // operand. 1864 // For regular k-masked operations a conservative choice is done as the 1865 // elements of the first vector operand, for which the corresponding bit 1866 // in the k-mask operand is set to 0, are copied to the result of the 1867 // instruction. 1868 // TODO/FIXME: The commute still may be legal if it is known that the 1869 // k-mask operand is set to either all ones or all zeroes. 1870 // It is also Ok to commute the 1st operand if all users of MI use only 1871 // the elements enabled by the k-mask operand. For example, 1872 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i] 1873 // : v1[i]; 1874 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 -> 1875 // // Ok, to commute v1 in FMADD213PSZrk. 1876 1877 // The k-mask operand has index = 2 for masked and zero-masked operations. 1878 KMaskOp = 2; 1879 1880 // The operand with index = 1 is used as a source for those elements for 1881 // which the corresponding bit in the k-mask is set to 0. 1882 if (X86II::isKMergeMasked(TSFlags)) 1883 FirstCommutableVecOp = 3; 1884 1885 LastCommutableVecOp++; 1886 } else if (IsIntrinsic) { 1887 // Commuting the first operand of an intrinsic instruction isn't possible 1888 // unless we can prove that only the lowest element of the result is used. 1889 FirstCommutableVecOp = 2; 1890 } 1891 1892 if (isMem(MI, LastCommutableVecOp)) 1893 LastCommutableVecOp--; 1894 1895 // Only the first RegOpsNum operands are commutable. 1896 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means 1897 // that the operand is not specified/fixed. 1898 if (SrcOpIdx1 != CommuteAnyOperandIndex && 1899 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp || 1900 SrcOpIdx1 == KMaskOp)) 1901 return false; 1902 if (SrcOpIdx2 != CommuteAnyOperandIndex && 1903 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp || 1904 SrcOpIdx2 == KMaskOp)) 1905 return false; 1906 1907 // Look for two different register operands assumed to be commutable 1908 // regardless of the FMA opcode. The FMA opcode is adjusted later. 1909 if (SrcOpIdx1 == CommuteAnyOperandIndex || 1910 SrcOpIdx2 == CommuteAnyOperandIndex) { 1911 unsigned CommutableOpIdx1 = SrcOpIdx1; 1912 unsigned CommutableOpIdx2 = SrcOpIdx2; 1913 1914 // At least one of operands to be commuted is not specified and 1915 // this method is free to choose appropriate commutable operands. 1916 if (SrcOpIdx1 == SrcOpIdx2) 1917 // Both of operands are not fixed. By default set one of commutable 1918 // operands to the last register operand of the instruction. 1919 CommutableOpIdx2 = LastCommutableVecOp; 1920 else if (SrcOpIdx2 == CommuteAnyOperandIndex) 1921 // Only one of operands is not fixed. 1922 CommutableOpIdx2 = SrcOpIdx1; 1923 1924 // CommutableOpIdx2 is well defined now. Let's choose another commutable 1925 // operand and assign its index to CommutableOpIdx1. 1926 unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); 1927 for (CommutableOpIdx1 = LastCommutableVecOp; 1928 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) { 1929 // Just ignore and skip the k-mask operand. 1930 if (CommutableOpIdx1 == KMaskOp) 1931 continue; 1932 1933 // The commuted operands must have different registers. 1934 // Otherwise, the commute transformation does not change anything and 1935 // is useless then. 1936 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg()) 1937 break; 1938 } 1939 1940 // No appropriate commutable operands were found. 1941 if (CommutableOpIdx1 < FirstCommutableVecOp) 1942 return false; 1943 1944 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2 1945 // to return those values. 1946 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1947 CommutableOpIdx1, CommutableOpIdx2)) 1948 return false; 1949 } 1950 1951 return true; 1952} 1953 1954bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, 1955 unsigned &SrcOpIdx2) const { 1956 const MCInstrDesc &Desc = MI.getDesc(); 1957 if (!Desc.isCommutable()) 1958 return false; 1959 1960 switch (MI.getOpcode()) { 1961 case X86::CMPSDrr: 1962 case X86::CMPSSrr: 1963 case X86::CMPPDrri: 1964 case X86::CMPPSrri: 1965 case X86::VCMPSDrr: 1966 case X86::VCMPSSrr: 1967 case X86::VCMPPDrri: 1968 case X86::VCMPPSrri: 1969 case X86::VCMPPDYrri: 1970 case X86::VCMPPSYrri: 1971 case X86::VCMPSDZrr: 1972 case X86::VCMPSSZrr: 1973 case X86::VCMPPDZrri: 1974 case X86::VCMPPSZrri: 1975 case X86::VCMPPDZ128rri: 1976 case X86::VCMPPSZ128rri: 1977 case X86::VCMPPDZ256rri: 1978 case X86::VCMPPSZ256rri: { 1979 // Float comparison can be safely commuted for 1980 // Ordered/Unordered/Equal/NotEqual tests 1981 unsigned Imm = MI.getOperand(3).getImm() & 0x7; 1982 switch (Imm) { 1983 case 0x00: // EQUAL 1984 case 0x03: // UNORDERED 1985 case 0x04: // NOT EQUAL 1986 case 0x07: // ORDERED 1987 // The indices of the commutable operands are 1 and 2. 1988 // Assign them to the returned operand indices here. 1989 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2); 1990 } 1991 return false; 1992 } 1993 case X86::MOVSDrr: 1994 case X86::MOVSSrr: 1995 case X86::VMOVSDrr: 1996 case X86::VMOVSSrr: 1997 if (Subtarget.hasSSE41()) 1998 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 1999 return false; 2000 case X86::MOVHLPSrr: 2001 case X86::UNPCKHPDrr: 2002 case X86::VMOVHLPSrr: 2003 case X86::VUNPCKHPDrr: 2004 case X86::VMOVHLPSZrr: 2005 case X86::VUNPCKHPDZ128rr: 2006 if (Subtarget.hasSSE2()) 2007 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 2008 return false; 2009 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: 2010 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: 2011 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: 2012 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: 2013 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: 2014 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: 2015 case X86::VPTERNLOGDZrrik: 2016 case X86::VPTERNLOGDZ128rrik: 2017 case X86::VPTERNLOGDZ256rrik: 2018 case X86::VPTERNLOGQZrrik: 2019 case X86::VPTERNLOGQZ128rrik: 2020 case X86::VPTERNLOGQZ256rrik: 2021 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: 2022 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: 2023 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: 2024 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: 2025 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: 2026 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: 2027 case X86::VPTERNLOGDZ128rmbi: 2028 case X86::VPTERNLOGDZ256rmbi: 2029 case X86::VPTERNLOGDZrmbi: 2030 case X86::VPTERNLOGQZ128rmbi: 2031 case X86::VPTERNLOGQZ256rmbi: 2032 case X86::VPTERNLOGQZrmbi: 2033 case X86::VPTERNLOGDZ128rmbikz: 2034 case X86::VPTERNLOGDZ256rmbikz: 2035 case X86::VPTERNLOGDZrmbikz: 2036 case X86::VPTERNLOGQZ128rmbikz: 2037 case X86::VPTERNLOGQZ256rmbikz: 2038 case X86::VPTERNLOGQZrmbikz: 2039 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 2040 case X86::VPMADD52HUQZ128r: 2041 case X86::VPMADD52HUQZ128rk: 2042 case X86::VPMADD52HUQZ128rkz: 2043 case X86::VPMADD52HUQZ256r: 2044 case X86::VPMADD52HUQZ256rk: 2045 case X86::VPMADD52HUQZ256rkz: 2046 case X86::VPMADD52HUQZr: 2047 case X86::VPMADD52HUQZrk: 2048 case X86::VPMADD52HUQZrkz: 2049 case X86::VPMADD52LUQZ128r: 2050 case X86::VPMADD52LUQZ128rk: 2051 case X86::VPMADD52LUQZ128rkz: 2052 case X86::VPMADD52LUQZ256r: 2053 case X86::VPMADD52LUQZ256rk: 2054 case X86::VPMADD52LUQZ256rkz: 2055 case X86::VPMADD52LUQZr: 2056 case X86::VPMADD52LUQZrk: 2057 case X86::VPMADD52LUQZrkz: { 2058 unsigned CommutableOpIdx1 = 2; 2059 unsigned CommutableOpIdx2 = 3; 2060 if (X86II::isKMasked(Desc.TSFlags)) { 2061 // Skip the mask register. 2062 ++CommutableOpIdx1; 2063 ++CommutableOpIdx2; 2064 } 2065 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2066 CommutableOpIdx1, CommutableOpIdx2)) 2067 return false; 2068 if (!MI.getOperand(SrcOpIdx1).isReg() || 2069 !MI.getOperand(SrcOpIdx2).isReg()) 2070 // No idea. 2071 return false; 2072 return true; 2073 } 2074 2075 default: 2076 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), 2077 MI.getDesc().TSFlags); 2078 if (FMA3Group) 2079 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2, 2080 FMA3Group->isIntrinsic()); 2081 2082 // Handled masked instructions since we need to skip over the mask input 2083 // and the preserved input. 2084 if (X86II::isKMasked(Desc.TSFlags)) { 2085 // First assume that the first input is the mask operand and skip past it. 2086 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1; 2087 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2; 2088 // Check if the first input is tied. If there isn't one then we only 2089 // need to skip the mask operand which we did above. 2090 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(), 2091 MCOI::TIED_TO) != -1)) { 2092 // If this is zero masking instruction with a tied operand, we need to 2093 // move the first index back to the first input since this must 2094 // be a 3 input instruction and we want the first two non-mask inputs. 2095 // Otherwise this is a 2 input instruction with a preserved input and 2096 // mask, so we need to move the indices to skip one more input. 2097 if (X86II::isKMergeMasked(Desc.TSFlags)) { 2098 ++CommutableOpIdx1; 2099 ++CommutableOpIdx2; 2100 } else { 2101 --CommutableOpIdx1; 2102 } 2103 } 2104 2105 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2106 CommutableOpIdx1, CommutableOpIdx2)) 2107 return false; 2108 2109 if (!MI.getOperand(SrcOpIdx1).isReg() || 2110 !MI.getOperand(SrcOpIdx2).isReg()) 2111 // No idea. 2112 return false; 2113 return true; 2114 } 2115 2116 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 2117 } 2118 return false; 2119} 2120 2121X86::CondCode X86::getCondFromBranchOpc(unsigned BrOpc) { 2122 switch (BrOpc) { 2123 default: return X86::COND_INVALID; 2124 case X86::JE_1: return X86::COND_E; 2125 case X86::JNE_1: return X86::COND_NE; 2126 case X86::JL_1: return X86::COND_L; 2127 case X86::JLE_1: return X86::COND_LE; 2128 case X86::JG_1: return X86::COND_G; 2129 case X86::JGE_1: return X86::COND_GE; 2130 case X86::JB_1: return X86::COND_B; 2131 case X86::JBE_1: return X86::COND_BE; 2132 case X86::JA_1: return X86::COND_A; 2133 case X86::JAE_1: return X86::COND_AE; 2134 case X86::JS_1: return X86::COND_S; 2135 case X86::JNS_1: return X86::COND_NS; 2136 case X86::JP_1: return X86::COND_P; 2137 case X86::JNP_1: return X86::COND_NP; 2138 case X86::JO_1: return X86::COND_O; 2139 case X86::JNO_1: return X86::COND_NO; 2140 } 2141} 2142 2143/// Return condition code of a SET opcode. 2144X86::CondCode X86::getCondFromSETOpc(unsigned Opc) { 2145 switch (Opc) { 2146 default: return X86::COND_INVALID; 2147 case X86::SETAr: case X86::SETAm: return X86::COND_A; 2148 case X86::SETAEr: case X86::SETAEm: return X86::COND_AE; 2149 case X86::SETBr: case X86::SETBm: return X86::COND_B; 2150 case X86::SETBEr: case X86::SETBEm: return X86::COND_BE; 2151 case X86::SETEr: case X86::SETEm: return X86::COND_E; 2152 case X86::SETGr: case X86::SETGm: return X86::COND_G; 2153 case X86::SETGEr: case X86::SETGEm: return X86::COND_GE; 2154 case X86::SETLr: case X86::SETLm: return X86::COND_L; 2155 case X86::SETLEr: case X86::SETLEm: return X86::COND_LE; 2156 case X86::SETNEr: case X86::SETNEm: return X86::COND_NE; 2157 case X86::SETNOr: case X86::SETNOm: return X86::COND_NO; 2158 case X86::SETNPr: case X86::SETNPm: return X86::COND_NP; 2159 case X86::SETNSr: case X86::SETNSm: return X86::COND_NS; 2160 case X86::SETOr: case X86::SETOm: return X86::COND_O; 2161 case X86::SETPr: case X86::SETPm: return X86::COND_P; 2162 case X86::SETSr: case X86::SETSm: return X86::COND_S; 2163 } 2164} 2165 2166/// Return condition code of a CMov opcode. 2167X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) { 2168 switch (Opc) { 2169 default: return X86::COND_INVALID; 2170 case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm: 2171 case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr: 2172 return X86::COND_A; 2173 case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm: 2174 case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr: 2175 return X86::COND_AE; 2176 case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm: 2177 case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr: 2178 return X86::COND_B; 2179 case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm: 2180 case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr: 2181 return X86::COND_BE; 2182 case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm: 2183 case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr: 2184 return X86::COND_E; 2185 case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm: 2186 case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr: 2187 return X86::COND_G; 2188 case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm: 2189 case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr: 2190 return X86::COND_GE; 2191 case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm: 2192 case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr: 2193 return X86::COND_L; 2194 case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm: 2195 case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr: 2196 return X86::COND_LE; 2197 case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm: 2198 case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr: 2199 return X86::COND_NE; 2200 case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm: 2201 case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr: 2202 return X86::COND_NO; 2203 case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm: 2204 case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr: 2205 return X86::COND_NP; 2206 case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm: 2207 case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr: 2208 return X86::COND_NS; 2209 case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm: 2210 case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr: 2211 return X86::COND_O; 2212 case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm: 2213 case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr: 2214 return X86::COND_P; 2215 case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm: 2216 case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr: 2217 return X86::COND_S; 2218 } 2219} 2220 2221unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 2222 switch (CC) { 2223 default: llvm_unreachable("Illegal condition code!"); 2224 case X86::COND_E: return X86::JE_1; 2225 case X86::COND_NE: return X86::JNE_1; 2226 case X86::COND_L: return X86::JL_1; 2227 case X86::COND_LE: return X86::JLE_1; 2228 case X86::COND_G: return X86::JG_1; 2229 case X86::COND_GE: return X86::JGE_1; 2230 case X86::COND_B: return X86::JB_1; 2231 case X86::COND_BE: return X86::JBE_1; 2232 case X86::COND_A: return X86::JA_1; 2233 case X86::COND_AE: return X86::JAE_1; 2234 case X86::COND_S: return X86::JS_1; 2235 case X86::COND_NS: return X86::JNS_1; 2236 case X86::COND_P: return X86::JP_1; 2237 case X86::COND_NP: return X86::JNP_1; 2238 case X86::COND_O: return X86::JO_1; 2239 case X86::COND_NO: return X86::JNO_1; 2240 } 2241} 2242 2243/// Return the inverse of the specified condition, 2244/// e.g. turning COND_E to COND_NE. 2245X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 2246 switch (CC) { 2247 default: llvm_unreachable("Illegal condition code!"); 2248 case X86::COND_E: return X86::COND_NE; 2249 case X86::COND_NE: return X86::COND_E; 2250 case X86::COND_L: return X86::COND_GE; 2251 case X86::COND_LE: return X86::COND_G; 2252 case X86::COND_G: return X86::COND_LE; 2253 case X86::COND_GE: return X86::COND_L; 2254 case X86::COND_B: return X86::COND_AE; 2255 case X86::COND_BE: return X86::COND_A; 2256 case X86::COND_A: return X86::COND_BE; 2257 case X86::COND_AE: return X86::COND_B; 2258 case X86::COND_S: return X86::COND_NS; 2259 case X86::COND_NS: return X86::COND_S; 2260 case X86::COND_P: return X86::COND_NP; 2261 case X86::COND_NP: return X86::COND_P; 2262 case X86::COND_O: return X86::COND_NO; 2263 case X86::COND_NO: return X86::COND_O; 2264 case X86::COND_NE_OR_P: return X86::COND_E_AND_NP; 2265 case X86::COND_E_AND_NP: return X86::COND_NE_OR_P; 2266 } 2267} 2268 2269/// Assuming the flags are set by MI(a,b), return the condition code if we 2270/// modify the instructions such that flags are set by MI(b,a). 2271static X86::CondCode getSwappedCondition(X86::CondCode CC) { 2272 switch (CC) { 2273 default: return X86::COND_INVALID; 2274 case X86::COND_E: return X86::COND_E; 2275 case X86::COND_NE: return X86::COND_NE; 2276 case X86::COND_L: return X86::COND_G; 2277 case X86::COND_LE: return X86::COND_GE; 2278 case X86::COND_G: return X86::COND_L; 2279 case X86::COND_GE: return X86::COND_LE; 2280 case X86::COND_B: return X86::COND_A; 2281 case X86::COND_BE: return X86::COND_AE; 2282 case X86::COND_A: return X86::COND_B; 2283 case X86::COND_AE: return X86::COND_BE; 2284 } 2285} 2286 2287std::pair<X86::CondCode, bool> 2288X86::getX86ConditionCode(CmpInst::Predicate Predicate) { 2289 X86::CondCode CC = X86::COND_INVALID; 2290 bool NeedSwap = false; 2291 switch (Predicate) { 2292 default: break; 2293 // Floating-point Predicates 2294 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break; 2295 case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH; 2296 case CmpInst::FCMP_OGT: CC = X86::COND_A; break; 2297 case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH; 2298 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break; 2299 case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH; 2300 case CmpInst::FCMP_ULT: CC = X86::COND_B; break; 2301 case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH; 2302 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break; 2303 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break; 2304 case CmpInst::FCMP_UNO: CC = X86::COND_P; break; 2305 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break; 2306 case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH; 2307 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break; 2308 2309 // Integer Predicates 2310 case CmpInst::ICMP_EQ: CC = X86::COND_E; break; 2311 case CmpInst::ICMP_NE: CC = X86::COND_NE; break; 2312 case CmpInst::ICMP_UGT: CC = X86::COND_A; break; 2313 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break; 2314 case CmpInst::ICMP_ULT: CC = X86::COND_B; break; 2315 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break; 2316 case CmpInst::ICMP_SGT: CC = X86::COND_G; break; 2317 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break; 2318 case CmpInst::ICMP_SLT: CC = X86::COND_L; break; 2319 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break; 2320 } 2321 2322 return std::make_pair(CC, NeedSwap); 2323} 2324 2325/// Return a set opcode for the given condition and 2326/// whether it has memory operand. 2327unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) { 2328 static const uint16_t Opc[16][2] = { 2329 { X86::SETAr, X86::SETAm }, 2330 { X86::SETAEr, X86::SETAEm }, 2331 { X86::SETBr, X86::SETBm }, 2332 { X86::SETBEr, X86::SETBEm }, 2333 { X86::SETEr, X86::SETEm }, 2334 { X86::SETGr, X86::SETGm }, 2335 { X86::SETGEr, X86::SETGEm }, 2336 { X86::SETLr, X86::SETLm }, 2337 { X86::SETLEr, X86::SETLEm }, 2338 { X86::SETNEr, X86::SETNEm }, 2339 { X86::SETNOr, X86::SETNOm }, 2340 { X86::SETNPr, X86::SETNPm }, 2341 { X86::SETNSr, X86::SETNSm }, 2342 { X86::SETOr, X86::SETOm }, 2343 { X86::SETPr, X86::SETPm }, 2344 { X86::SETSr, X86::SETSm } 2345 }; 2346 2347 assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes"); 2348 return Opc[CC][HasMemoryOperand ? 1 : 0]; 2349} 2350 2351/// Return a cmov opcode for the given condition, 2352/// register size in bytes, and operand type. 2353unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes, 2354 bool HasMemoryOperand) { 2355 static const uint16_t Opc[32][3] = { 2356 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr }, 2357 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr }, 2358 { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr }, 2359 { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr }, 2360 { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr }, 2361 { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr }, 2362 { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr }, 2363 { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr }, 2364 { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr }, 2365 { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr }, 2366 { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr }, 2367 { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr }, 2368 { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr }, 2369 { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr }, 2370 { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr }, 2371 { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr }, 2372 { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm }, 2373 { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm }, 2374 { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm }, 2375 { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm }, 2376 { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm }, 2377 { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm }, 2378 { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm }, 2379 { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm }, 2380 { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm }, 2381 { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm }, 2382 { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm }, 2383 { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm }, 2384 { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm }, 2385 { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm }, 2386 { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm }, 2387 { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm } 2388 }; 2389 2390 assert(CC < 16 && "Can only handle standard cond codes"); 2391 unsigned Idx = HasMemoryOperand ? 16+CC : CC; 2392 switch(RegBytes) { 2393 default: llvm_unreachable("Illegal register size!"); 2394 case 2: return Opc[Idx][0]; 2395 case 4: return Opc[Idx][1]; 2396 case 8: return Opc[Idx][2]; 2397 } 2398} 2399 2400/// Get the VPCMP immediate for the given condition. 2401unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { 2402 switch (CC) { 2403 default: llvm_unreachable("Unexpected SETCC condition"); 2404 case ISD::SETNE: return 4; 2405 case ISD::SETEQ: return 0; 2406 case ISD::SETULT: 2407 case ISD::SETLT: return 1; 2408 case ISD::SETUGT: 2409 case ISD::SETGT: return 6; 2410 case ISD::SETUGE: 2411 case ISD::SETGE: return 5; 2412 case ISD::SETULE: 2413 case ISD::SETLE: return 2; 2414 } 2415} 2416 2417/// Get the VPCMP immediate if the opcodes are swapped. 2418unsigned X86::getSwappedVPCMPImm(unsigned Imm) { 2419 switch (Imm) { 2420 default: llvm_unreachable("Unreachable!"); 2421 case 0x01: Imm = 0x06; break; // LT -> NLE 2422 case 0x02: Imm = 0x05; break; // LE -> NLT 2423 case 0x05: Imm = 0x02; break; // NLT -> LE 2424 case 0x06: Imm = 0x01; break; // NLE -> LT 2425 case 0x00: // EQ 2426 case 0x03: // FALSE 2427 case 0x04: // NE 2428 case 0x07: // TRUE 2429 break; 2430 } 2431 2432 return Imm; 2433} 2434 2435/// Get the VPCOM immediate if the opcodes are swapped. 2436unsigned X86::getSwappedVPCOMImm(unsigned Imm) { 2437 switch (Imm) { 2438 default: llvm_unreachable("Unreachable!"); 2439 case 0x00: Imm = 0x02; break; // LT -> GT 2440 case 0x01: Imm = 0x03; break; // LE -> GE 2441 case 0x02: Imm = 0x00; break; // GT -> LT 2442 case 0x03: Imm = 0x01; break; // GE -> LE 2443 case 0x04: // EQ 2444 case 0x05: // NE 2445 case 0x06: // FALSE 2446 case 0x07: // TRUE 2447 break; 2448 } 2449 2450 return Imm; 2451} 2452 2453bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 2454 if (!MI.isTerminator()) return false; 2455 2456 // Conditional branch is a special case. 2457 if (MI.isBranch() && !MI.isBarrier()) 2458 return true; 2459 if (!MI.isPredicable()) 2460 return true; 2461 return !isPredicated(MI); 2462} 2463 2464bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { 2465 switch (MI.getOpcode()) { 2466 case X86::TCRETURNdi: 2467 case X86::TCRETURNri: 2468 case X86::TCRETURNmi: 2469 case X86::TCRETURNdi64: 2470 case X86::TCRETURNri64: 2471 case X86::TCRETURNmi64: 2472 return true; 2473 default: 2474 return false; 2475 } 2476} 2477 2478bool X86InstrInfo::canMakeTailCallConditional( 2479 SmallVectorImpl<MachineOperand> &BranchCond, 2480 const MachineInstr &TailCall) const { 2481 if (TailCall.getOpcode() != X86::TCRETURNdi && 2482 TailCall.getOpcode() != X86::TCRETURNdi64) { 2483 // Only direct calls can be done with a conditional branch. 2484 return false; 2485 } 2486 2487 const MachineFunction *MF = TailCall.getParent()->getParent(); 2488 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) { 2489 // Conditional tail calls confuse the Win64 unwinder. 2490 return false; 2491 } 2492 2493 assert(BranchCond.size() == 1); 2494 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { 2495 // Can't make a conditional tail call with this condition. 2496 return false; 2497 } 2498 2499 const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 2500 if (X86FI->getTCReturnAddrDelta() != 0 || 2501 TailCall.getOperand(1).getImm() != 0) { 2502 // A conditional tail call cannot do any stack adjustment. 2503 return false; 2504 } 2505 2506 return true; 2507} 2508 2509void X86InstrInfo::replaceBranchWithTailCall( 2510 MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, 2511 const MachineInstr &TailCall) const { 2512 assert(canMakeTailCallConditional(BranchCond, TailCall)); 2513 2514 MachineBasicBlock::iterator I = MBB.end(); 2515 while (I != MBB.begin()) { 2516 --I; 2517 if (I->isDebugInstr()) 2518 continue; 2519 if (!I->isBranch()) 2520 assert(0 && "Can't find the branch to replace!"); 2521 2522 X86::CondCode CC = X86::getCondFromBranchOpc(I->getOpcode()); 2523 assert(BranchCond.size() == 1); 2524 if (CC != BranchCond[0].getImm()) 2525 continue; 2526 2527 break; 2528 } 2529 2530 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc 2531 : X86::TCRETURNdi64cc; 2532 2533 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); 2534 MIB->addOperand(TailCall.getOperand(0)); // Destination. 2535 MIB.addImm(0); // Stack offset (not used). 2536 MIB->addOperand(BranchCond[0]); // Condition. 2537 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters. 2538 2539 // Add implicit uses and defs of all live regs potentially clobbered by the 2540 // call. This way they still appear live across the call. 2541 LivePhysRegs LiveRegs(getRegisterInfo()); 2542 LiveRegs.addLiveOuts(MBB); 2543 SmallVector<std::pair<unsigned, const MachineOperand *>, 8> Clobbers; 2544 LiveRegs.stepForward(*MIB, Clobbers); 2545 for (const auto &C : Clobbers) { 2546 MIB.addReg(C.first, RegState::Implicit); 2547 MIB.addReg(C.first, RegState::Implicit | RegState::Define); 2548 } 2549 2550 I->eraseFromParent(); 2551} 2552 2553// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may 2554// not be a fallthrough MBB now due to layout changes). Return nullptr if the 2555// fallthrough MBB cannot be identified. 2556static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB, 2557 MachineBasicBlock *TBB) { 2558 // Look for non-EHPad successors other than TBB. If we find exactly one, it 2559 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB 2560 // and fallthrough MBB. If we find more than one, we cannot identify the 2561 // fallthrough MBB and should return nullptr. 2562 MachineBasicBlock *FallthroughBB = nullptr; 2563 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) { 2564 if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB)) 2565 continue; 2566 // Return a nullptr if we found more than one fallthrough successor. 2567 if (FallthroughBB && FallthroughBB != TBB) 2568 return nullptr; 2569 FallthroughBB = *SI; 2570 } 2571 return FallthroughBB; 2572} 2573 2574bool X86InstrInfo::AnalyzeBranchImpl( 2575 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, 2576 SmallVectorImpl<MachineOperand> &Cond, 2577 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { 2578 2579 // Start from the bottom of the block and work up, examining the 2580 // terminator instructions. 2581 MachineBasicBlock::iterator I = MBB.end(); 2582 MachineBasicBlock::iterator UnCondBrIter = MBB.end(); 2583 while (I != MBB.begin()) { 2584 --I; 2585 if (I->isDebugInstr()) 2586 continue; 2587 2588 // Working from the bottom, when we see a non-terminator instruction, we're 2589 // done. 2590 if (!isUnpredicatedTerminator(*I)) 2591 break; 2592 2593 // A terminator that isn't a branch can't easily be handled by this 2594 // analysis. 2595 if (!I->isBranch()) 2596 return true; 2597 2598 // Handle unconditional branches. 2599 if (I->getOpcode() == X86::JMP_1) { 2600 UnCondBrIter = I; 2601 2602 if (!AllowModify) { 2603 TBB = I->getOperand(0).getMBB(); 2604 continue; 2605 } 2606 2607 // If the block has any instructions after a JMP, delete them. 2608 while (std::next(I) != MBB.end()) 2609 std::next(I)->eraseFromParent(); 2610 2611 Cond.clear(); 2612 FBB = nullptr; 2613 2614 // Delete the JMP if it's equivalent to a fall-through. 2615 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 2616 TBB = nullptr; 2617 I->eraseFromParent(); 2618 I = MBB.end(); 2619 UnCondBrIter = MBB.end(); 2620 continue; 2621 } 2622 2623 // TBB is used to indicate the unconditional destination. 2624 TBB = I->getOperand(0).getMBB(); 2625 continue; 2626 } 2627 2628 // Handle conditional branches. 2629 X86::CondCode BranchCode = X86::getCondFromBranchOpc(I->getOpcode()); 2630 if (BranchCode == X86::COND_INVALID) 2631 return true; // Can't handle indirect branch. 2632 2633 // Working from the bottom, handle the first conditional branch. 2634 if (Cond.empty()) { 2635 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); 2636 if (AllowModify && UnCondBrIter != MBB.end() && 2637 MBB.isLayoutSuccessor(TargetBB)) { 2638 // If we can modify the code and it ends in something like: 2639 // 2640 // jCC L1 2641 // jmp L2 2642 // L1: 2643 // ... 2644 // L2: 2645 // 2646 // Then we can change this to: 2647 // 2648 // jnCC L2 2649 // L1: 2650 // ... 2651 // L2: 2652 // 2653 // Which is a bit more efficient. 2654 // We conditionally jump to the fall-through block. 2655 BranchCode = GetOppositeBranchCondition(BranchCode); 2656 unsigned JNCC = GetCondBranchFromCond(BranchCode); 2657 MachineBasicBlock::iterator OldInst = I; 2658 2659 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) 2660 .addMBB(UnCondBrIter->getOperand(0).getMBB()); 2661 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) 2662 .addMBB(TargetBB); 2663 2664 OldInst->eraseFromParent(); 2665 UnCondBrIter->eraseFromParent(); 2666 2667 // Restart the analysis. 2668 UnCondBrIter = MBB.end(); 2669 I = MBB.end(); 2670 continue; 2671 } 2672 2673 FBB = TBB; 2674 TBB = I->getOperand(0).getMBB(); 2675 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 2676 CondBranches.push_back(&*I); 2677 continue; 2678 } 2679 2680 // Handle subsequent conditional branches. Only handle the case where all 2681 // conditional branches branch to the same destination and their condition 2682 // opcodes fit one of the special multi-branch idioms. 2683 assert(Cond.size() == 1); 2684 assert(TBB); 2685 2686 // If the conditions are the same, we can leave them alone. 2687 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 2688 auto NewTBB = I->getOperand(0).getMBB(); 2689 if (OldBranchCode == BranchCode && TBB == NewTBB) 2690 continue; 2691 2692 // If they differ, see if they fit one of the known patterns. Theoretically, 2693 // we could handle more patterns here, but we shouldn't expect to see them 2694 // if instruction selection has done a reasonable job. 2695 if (TBB == NewTBB && 2696 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) || 2697 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) { 2698 BranchCode = X86::COND_NE_OR_P; 2699 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) || 2700 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) { 2701 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB))) 2702 return true; 2703 2704 // X86::COND_E_AND_NP usually has two different branch destinations. 2705 // 2706 // JP B1 2707 // JE B2 2708 // JMP B1 2709 // B1: 2710 // B2: 2711 // 2712 // Here this condition branches to B2 only if NP && E. It has another 2713 // equivalent form: 2714 // 2715 // JNE B1 2716 // JNP B2 2717 // JMP B1 2718 // B1: 2719 // B2: 2720 // 2721 // Similarly it branches to B2 only if E && NP. That is why this condition 2722 // is named with COND_E_AND_NP. 2723 BranchCode = X86::COND_E_AND_NP; 2724 } else 2725 return true; 2726 2727 // Update the MachineOperand. 2728 Cond[0].setImm(BranchCode); 2729 CondBranches.push_back(&*I); 2730 } 2731 2732 return false; 2733} 2734 2735bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB, 2736 MachineBasicBlock *&TBB, 2737 MachineBasicBlock *&FBB, 2738 SmallVectorImpl<MachineOperand> &Cond, 2739 bool AllowModify) const { 2740 SmallVector<MachineInstr *, 4> CondBranches; 2741 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); 2742} 2743 2744bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, 2745 MachineBranchPredicate &MBP, 2746 bool AllowModify) const { 2747 using namespace std::placeholders; 2748 2749 SmallVector<MachineOperand, 4> Cond; 2750 SmallVector<MachineInstr *, 4> CondBranches; 2751 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, 2752 AllowModify)) 2753 return true; 2754 2755 if (Cond.size() != 1) 2756 return true; 2757 2758 assert(MBP.TrueDest && "expected!"); 2759 2760 if (!MBP.FalseDest) 2761 MBP.FalseDest = MBB.getNextNode(); 2762 2763 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2764 2765 MachineInstr *ConditionDef = nullptr; 2766 bool SingleUseCondition = true; 2767 2768 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) { 2769 if (I->modifiesRegister(X86::EFLAGS, TRI)) { 2770 ConditionDef = &*I; 2771 break; 2772 } 2773 2774 if (I->readsRegister(X86::EFLAGS, TRI)) 2775 SingleUseCondition = false; 2776 } 2777 2778 if (!ConditionDef) 2779 return true; 2780 2781 if (SingleUseCondition) { 2782 for (auto *Succ : MBB.successors()) 2783 if (Succ->isLiveIn(X86::EFLAGS)) 2784 SingleUseCondition = false; 2785 } 2786 2787 MBP.ConditionDef = ConditionDef; 2788 MBP.SingleUseCondition = SingleUseCondition; 2789 2790 // Currently we only recognize the simple pattern: 2791 // 2792 // test %reg, %reg 2793 // je %label 2794 // 2795 const unsigned TestOpcode = 2796 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; 2797 2798 if (ConditionDef->getOpcode() == TestOpcode && 2799 ConditionDef->getNumOperands() == 3 && 2800 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && 2801 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { 2802 MBP.LHS = ConditionDef->getOperand(0); 2803 MBP.RHS = MachineOperand::CreateImm(0); 2804 MBP.Predicate = Cond[0].getImm() == X86::COND_NE 2805 ? MachineBranchPredicate::PRED_NE 2806 : MachineBranchPredicate::PRED_EQ; 2807 return false; 2808 } 2809 2810 return true; 2811} 2812 2813unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB, 2814 int *BytesRemoved) const { 2815 assert(!BytesRemoved && "code size not handled"); 2816 2817 MachineBasicBlock::iterator I = MBB.end(); 2818 unsigned Count = 0; 2819 2820 while (I != MBB.begin()) { 2821 --I; 2822 if (I->isDebugInstr()) 2823 continue; 2824 if (I->getOpcode() != X86::JMP_1 && 2825 X86::getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 2826 break; 2827 // Remove the branch. 2828 I->eraseFromParent(); 2829 I = MBB.end(); 2830 ++Count; 2831 } 2832 2833 return Count; 2834} 2835 2836unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB, 2837 MachineBasicBlock *TBB, 2838 MachineBasicBlock *FBB, 2839 ArrayRef<MachineOperand> Cond, 2840 const DebugLoc &DL, 2841 int *BytesAdded) const { 2842 // Shouldn't be a fall through. 2843 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 2844 assert((Cond.size() == 1 || Cond.size() == 0) && 2845 "X86 branch conditions have one component!"); 2846 assert(!BytesAdded && "code size not handled"); 2847 2848 if (Cond.empty()) { 2849 // Unconditional branch? 2850 assert(!FBB && "Unconditional branch with multiple successors!"); 2851 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); 2852 return 1; 2853 } 2854 2855 // If FBB is null, it is implied to be a fall-through block. 2856 bool FallThru = FBB == nullptr; 2857 2858 // Conditional branch. 2859 unsigned Count = 0; 2860 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 2861 switch (CC) { 2862 case X86::COND_NE_OR_P: 2863 // Synthesize NE_OR_P with two branches. 2864 BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB); 2865 ++Count; 2866 BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB); 2867 ++Count; 2868 break; 2869 case X86::COND_E_AND_NP: 2870 // Use the next block of MBB as FBB if it is null. 2871 if (FBB == nullptr) { 2872 FBB = getFallThroughMBB(&MBB, TBB); 2873 assert(FBB && "MBB cannot be the last block in function when the false " 2874 "body is a fall-through."); 2875 } 2876 // Synthesize COND_E_AND_NP with two branches. 2877 BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(FBB); 2878 ++Count; 2879 BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB); 2880 ++Count; 2881 break; 2882 default: { 2883 unsigned Opc = GetCondBranchFromCond(CC); 2884 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); 2885 ++Count; 2886 } 2887 } 2888 if (!FallThru) { 2889 // Two-way Conditional branch. Insert the second branch. 2890 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); 2891 ++Count; 2892 } 2893 return Count; 2894} 2895 2896bool X86InstrInfo:: 2897canInsertSelect(const MachineBasicBlock &MBB, 2898 ArrayRef<MachineOperand> Cond, 2899 unsigned TrueReg, unsigned FalseReg, 2900 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 2901 // Not all subtargets have cmov instructions. 2902 if (!Subtarget.hasCMov()) 2903 return false; 2904 if (Cond.size() != 1) 2905 return false; 2906 // We cannot do the composite conditions, at least not in SSA form. 2907 if ((X86::CondCode)Cond[0].getImm() > X86::COND_S) 2908 return false; 2909 2910 // Check register classes. 2911 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2912 const TargetRegisterClass *RC = 2913 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 2914 if (!RC) 2915 return false; 2916 2917 // We have cmov instructions for 16, 32, and 64 bit general purpose registers. 2918 if (X86::GR16RegClass.hasSubClassEq(RC) || 2919 X86::GR32RegClass.hasSubClassEq(RC) || 2920 X86::GR64RegClass.hasSubClassEq(RC)) { 2921 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy 2922 // Bridge. Probably Ivy Bridge as well. 2923 CondCycles = 2; 2924 TrueCycles = 2; 2925 FalseCycles = 2; 2926 return true; 2927 } 2928 2929 // Can't do vectors. 2930 return false; 2931} 2932 2933void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, 2934 MachineBasicBlock::iterator I, 2935 const DebugLoc &DL, unsigned DstReg, 2936 ArrayRef<MachineOperand> Cond, unsigned TrueReg, 2937 unsigned FalseReg) const { 2938 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2939 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 2940 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg); 2941 assert(Cond.size() == 1 && "Invalid Cond array"); 2942 unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(), 2943 TRI.getRegSizeInBits(RC) / 8, 2944 false /*HasMemoryOperand*/); 2945 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg); 2946} 2947 2948/// Test if the given register is a physical h register. 2949static bool isHReg(unsigned Reg) { 2950 return X86::GR8_ABCD_HRegClass.contains(Reg); 2951} 2952 2953// Try and copy between VR128/VR64 and GR64 registers. 2954static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, 2955 const X86Subtarget &Subtarget) { 2956 bool HasAVX = Subtarget.hasAVX(); 2957 bool HasAVX512 = Subtarget.hasAVX512(); 2958 2959 // SrcReg(MaskReg) -> DestReg(GR64) 2960 // SrcReg(MaskReg) -> DestReg(GR32) 2961 2962 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 2963 if (X86::VK16RegClass.contains(SrcReg)) { 2964 if (X86::GR64RegClass.contains(DestReg)) { 2965 assert(Subtarget.hasBWI()); 2966 return X86::KMOVQrk; 2967 } 2968 if (X86::GR32RegClass.contains(DestReg)) 2969 return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk; 2970 } 2971 2972 // SrcReg(GR64) -> DestReg(MaskReg) 2973 // SrcReg(GR32) -> DestReg(MaskReg) 2974 2975 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 2976 if (X86::VK16RegClass.contains(DestReg)) { 2977 if (X86::GR64RegClass.contains(SrcReg)) { 2978 assert(Subtarget.hasBWI()); 2979 return X86::KMOVQkr; 2980 } 2981 if (X86::GR32RegClass.contains(SrcReg)) 2982 return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr; 2983 } 2984 2985 2986 // SrcReg(VR128) -> DestReg(GR64) 2987 // SrcReg(VR64) -> DestReg(GR64) 2988 // SrcReg(GR64) -> DestReg(VR128) 2989 // SrcReg(GR64) -> DestReg(VR64) 2990 2991 if (X86::GR64RegClass.contains(DestReg)) { 2992 if (X86::VR128XRegClass.contains(SrcReg)) 2993 // Copy from a VR128 register to a GR64 register. 2994 return HasAVX512 ? X86::VMOVPQIto64Zrr : 2995 HasAVX ? X86::VMOVPQIto64rr : 2996 X86::MOVPQIto64rr; 2997 if (X86::VR64RegClass.contains(SrcReg)) 2998 // Copy from a VR64 register to a GR64 register. 2999 return X86::MMX_MOVD64from64rr; 3000 } else if (X86::GR64RegClass.contains(SrcReg)) { 3001 // Copy from a GR64 register to a VR128 register. 3002 if (X86::VR128XRegClass.contains(DestReg)) 3003 return HasAVX512 ? X86::VMOV64toPQIZrr : 3004 HasAVX ? X86::VMOV64toPQIrr : 3005 X86::MOV64toPQIrr; 3006 // Copy from a GR64 register to a VR64 register. 3007 if (X86::VR64RegClass.contains(DestReg)) 3008 return X86::MMX_MOVD64to64rr; 3009 } 3010 3011 // SrcReg(FR32) -> DestReg(GR32) 3012 // SrcReg(GR32) -> DestReg(FR32) 3013 3014 if (X86::GR32RegClass.contains(DestReg) && 3015 X86::FR32XRegClass.contains(SrcReg)) 3016 // Copy from a FR32 register to a GR32 register. 3017 return HasAVX512 ? X86::VMOVSS2DIZrr : 3018 HasAVX ? X86::VMOVSS2DIrr : 3019 X86::MOVSS2DIrr; 3020 3021 if (X86::FR32XRegClass.contains(DestReg) && 3022 X86::GR32RegClass.contains(SrcReg)) 3023 // Copy from a GR32 register to a FR32 register. 3024 return HasAVX512 ? X86::VMOVDI2SSZrr : 3025 HasAVX ? X86::VMOVDI2SSrr : 3026 X86::MOVDI2SSrr; 3027 return 0; 3028} 3029 3030void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 3031 MachineBasicBlock::iterator MI, 3032 const DebugLoc &DL, unsigned DestReg, 3033 unsigned SrcReg, bool KillSrc) const { 3034 // First deal with the normal symmetric copies. 3035 bool HasAVX = Subtarget.hasAVX(); 3036 bool HasVLX = Subtarget.hasVLX(); 3037 unsigned Opc = 0; 3038 if (X86::GR64RegClass.contains(DestReg, SrcReg)) 3039 Opc = X86::MOV64rr; 3040 else if (X86::GR32RegClass.contains(DestReg, SrcReg)) 3041 Opc = X86::MOV32rr; 3042 else if (X86::GR16RegClass.contains(DestReg, SrcReg)) 3043 Opc = X86::MOV16rr; 3044 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { 3045 // Copying to or from a physical H register on x86-64 requires a NOREX 3046 // move. Otherwise use a normal move. 3047 if ((isHReg(DestReg) || isHReg(SrcReg)) && 3048 Subtarget.is64Bit()) { 3049 Opc = X86::MOV8rr_NOREX; 3050 // Both operands must be encodable without an REX prefix. 3051 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && 3052 "8-bit H register can not be copied outside GR8_NOREX"); 3053 } else 3054 Opc = X86::MOV8rr; 3055 } 3056 else if (X86::VR64RegClass.contains(DestReg, SrcReg)) 3057 Opc = X86::MMX_MOVQ64rr; 3058 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) { 3059 if (HasVLX) 3060 Opc = X86::VMOVAPSZ128rr; 3061 else if (X86::VR128RegClass.contains(DestReg, SrcReg)) 3062 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; 3063 else { 3064 // If this an extended register and we don't have VLX we need to use a 3065 // 512-bit move. 3066 Opc = X86::VMOVAPSZrr; 3067 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3068 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, 3069 &X86::VR512RegClass); 3070 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, 3071 &X86::VR512RegClass); 3072 } 3073 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) { 3074 if (HasVLX) 3075 Opc = X86::VMOVAPSZ256rr; 3076 else if (X86::VR256RegClass.contains(DestReg, SrcReg)) 3077 Opc = X86::VMOVAPSYrr; 3078 else { 3079 // If this an extended register and we don't have VLX we need to use a 3080 // 512-bit move. 3081 Opc = X86::VMOVAPSZrr; 3082 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3083 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, 3084 &X86::VR512RegClass); 3085 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, 3086 &X86::VR512RegClass); 3087 } 3088 } else if (X86::VR512RegClass.contains(DestReg, SrcReg)) 3089 Opc = X86::VMOVAPSZrr; 3090 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 3091 else if (X86::VK16RegClass.contains(DestReg, SrcReg)) 3092 Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk; 3093 if (!Opc) 3094 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); 3095 3096 if (Opc) { 3097 BuildMI(MBB, MI, DL, get(Opc), DestReg) 3098 .addReg(SrcReg, getKillRegState(KillSrc)); 3099 return; 3100 } 3101 3102 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) { 3103 // FIXME: We use a fatal error here because historically LLVM has tried 3104 // lower some of these physreg copies and we want to ensure we get 3105 // reasonable bug reports if someone encounters a case no other testing 3106 // found. This path should be removed after the LLVM 7 release. 3107 report_fatal_error("Unable to copy EFLAGS physical register!"); 3108 } 3109 3110 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " 3111 << RI.getName(DestReg) << '\n'); 3112 report_fatal_error("Cannot emit physreg copy instruction"); 3113} 3114 3115bool X86InstrInfo::isCopyInstr(const MachineInstr &MI, 3116 const MachineOperand *&Src, 3117 const MachineOperand *&Dest) const { 3118 if (MI.isMoveReg()) { 3119 Dest = &MI.getOperand(0); 3120 Src = &MI.getOperand(1); 3121 return true; 3122 } 3123 return false; 3124} 3125 3126static unsigned getLoadStoreRegOpcode(unsigned Reg, 3127 const TargetRegisterClass *RC, 3128 bool isStackAligned, 3129 const X86Subtarget &STI, 3130 bool load) { 3131 bool HasAVX = STI.hasAVX(); 3132 bool HasAVX512 = STI.hasAVX512(); 3133 bool HasVLX = STI.hasVLX(); 3134 3135 switch (STI.getRegisterInfo()->getSpillSize(*RC)) { 3136 default: 3137 llvm_unreachable("Unknown spill size"); 3138 case 1: 3139 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); 3140 if (STI.is64Bit()) 3141 // Copying to or from a physical H register on x86-64 requires a NOREX 3142 // move. Otherwise use a normal move. 3143 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) 3144 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; 3145 return load ? X86::MOV8rm : X86::MOV8mr; 3146 case 2: 3147 if (X86::VK16RegClass.hasSubClassEq(RC)) 3148 return load ? X86::KMOVWkm : X86::KMOVWmk; 3149 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); 3150 return load ? X86::MOV16rm : X86::MOV16mr; 3151 case 4: 3152 if (X86::GR32RegClass.hasSubClassEq(RC)) 3153 return load ? X86::MOV32rm : X86::MOV32mr; 3154 if (X86::FR32XRegClass.hasSubClassEq(RC)) 3155 return load ? 3156 (HasAVX512 ? X86::VMOVSSZrm : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) : 3157 (HasAVX512 ? X86::VMOVSSZmr : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr); 3158 if (X86::RFP32RegClass.hasSubClassEq(RC)) 3159 return load ? X86::LD_Fp32m : X86::ST_Fp32m; 3160 if (X86::VK32RegClass.hasSubClassEq(RC)) { 3161 assert(STI.hasBWI() && "KMOVD requires BWI"); 3162 return load ? X86::KMOVDkm : X86::KMOVDmk; 3163 } 3164 llvm_unreachable("Unknown 4-byte regclass"); 3165 case 8: 3166 if (X86::GR64RegClass.hasSubClassEq(RC)) 3167 return load ? X86::MOV64rm : X86::MOV64mr; 3168 if (X86::FR64XRegClass.hasSubClassEq(RC)) 3169 return load ? 3170 (HasAVX512 ? X86::VMOVSDZrm : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) : 3171 (HasAVX512 ? X86::VMOVSDZmr : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); 3172 if (X86::VR64RegClass.hasSubClassEq(RC)) 3173 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; 3174 if (X86::RFP64RegClass.hasSubClassEq(RC)) 3175 return load ? X86::LD_Fp64m : X86::ST_Fp64m; 3176 if (X86::VK64RegClass.hasSubClassEq(RC)) { 3177 assert(STI.hasBWI() && "KMOVQ requires BWI"); 3178 return load ? X86::KMOVQkm : X86::KMOVQmk; 3179 } 3180 llvm_unreachable("Unknown 8-byte regclass"); 3181 case 10: 3182 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); 3183 return load ? X86::LD_Fp80m : X86::ST_FpP80m; 3184 case 16: { 3185 if (X86::VR128XRegClass.hasSubClassEq(RC)) { 3186 // If stack is realigned we can use aligned stores. 3187 if (isStackAligned) 3188 return load ? 3189 (HasVLX ? X86::VMOVAPSZ128rm : 3190 HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : 3191 HasAVX ? X86::VMOVAPSrm : 3192 X86::MOVAPSrm): 3193 (HasVLX ? X86::VMOVAPSZ128mr : 3194 HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX : 3195 HasAVX ? X86::VMOVAPSmr : 3196 X86::MOVAPSmr); 3197 else 3198 return load ? 3199 (HasVLX ? X86::VMOVUPSZ128rm : 3200 HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX : 3201 HasAVX ? X86::VMOVUPSrm : 3202 X86::MOVUPSrm): 3203 (HasVLX ? X86::VMOVUPSZ128mr : 3204 HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX : 3205 HasAVX ? X86::VMOVUPSmr : 3206 X86::MOVUPSmr); 3207 } 3208 if (X86::BNDRRegClass.hasSubClassEq(RC)) { 3209 if (STI.is64Bit()) 3210 return load ? X86::BNDMOV64rm : X86::BNDMOV64mr; 3211 else 3212 return load ? X86::BNDMOV32rm : X86::BNDMOV32mr; 3213 } 3214 llvm_unreachable("Unknown 16-byte regclass"); 3215 } 3216 case 32: 3217 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); 3218 // If stack is realigned we can use aligned stores. 3219 if (isStackAligned) 3220 return load ? 3221 (HasVLX ? X86::VMOVAPSZ256rm : 3222 HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : 3223 X86::VMOVAPSYrm) : 3224 (HasVLX ? X86::VMOVAPSZ256mr : 3225 HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX : 3226 X86::VMOVAPSYmr); 3227 else 3228 return load ? 3229 (HasVLX ? X86::VMOVUPSZ256rm : 3230 HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX : 3231 X86::VMOVUPSYrm) : 3232 (HasVLX ? X86::VMOVUPSZ256mr : 3233 HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : 3234 X86::VMOVUPSYmr); 3235 case 64: 3236 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); 3237 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512"); 3238 if (isStackAligned) 3239 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; 3240 else 3241 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 3242 } 3243} 3244 3245bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg, 3246 int64_t &Offset, 3247 const TargetRegisterInfo *TRI) const { 3248 const MCInstrDesc &Desc = MemOp.getDesc(); 3249 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); 3250 if (MemRefBegin < 0) 3251 return false; 3252 3253 MemRefBegin += X86II::getOperandBias(Desc); 3254 3255 MachineOperand &BaseMO = MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); 3256 if (!BaseMO.isReg()) // Can be an MO_FrameIndex 3257 return false; 3258 3259 BaseReg = BaseMO.getReg(); 3260 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) 3261 return false; 3262 3263 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != 3264 X86::NoRegister) 3265 return false; 3266 3267 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp); 3268 3269 // Displacement can be symbolic 3270 if (!DispMO.isImm()) 3271 return false; 3272 3273 Offset = DispMO.getImm(); 3274 3275 return true; 3276} 3277 3278static unsigned getStoreRegOpcode(unsigned SrcReg, 3279 const TargetRegisterClass *RC, 3280 bool isStackAligned, 3281 const X86Subtarget &STI) { 3282 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); 3283} 3284 3285 3286static unsigned getLoadRegOpcode(unsigned DestReg, 3287 const TargetRegisterClass *RC, 3288 bool isStackAligned, 3289 const X86Subtarget &STI) { 3290 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); 3291} 3292 3293void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 3294 MachineBasicBlock::iterator MI, 3295 unsigned SrcReg, bool isKill, int FrameIdx, 3296 const TargetRegisterClass *RC, 3297 const TargetRegisterInfo *TRI) const { 3298 const MachineFunction &MF = *MBB.getParent(); 3299 assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && 3300 "Stack slot too small for store"); 3301 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); 3302 bool isAligned = 3303 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 3304 RI.canRealignStack(MF); 3305 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 3306 DebugLoc DL = MBB.findDebugLoc(MI); 3307 addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx) 3308 .addReg(SrcReg, getKillRegState(isKill)); 3309} 3310 3311void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 3312 bool isKill, 3313 SmallVectorImpl<MachineOperand> &Addr, 3314 const TargetRegisterClass *RC, 3315 MachineInstr::mmo_iterator MMOBegin, 3316 MachineInstr::mmo_iterator MMOEnd, 3317 SmallVectorImpl<MachineInstr*> &NewMIs) const { 3318 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 3319 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 3320 bool isAligned = MMOBegin != MMOEnd && 3321 (*MMOBegin)->getAlignment() >= Alignment; 3322 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 3323 DebugLoc DL; 3324 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); 3325 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 3326 MIB.add(Addr[i]); 3327 MIB.addReg(SrcReg, getKillRegState(isKill)); 3328 (*MIB).setMemRefs(MMOBegin, MMOEnd); 3329 NewMIs.push_back(MIB); 3330} 3331 3332 3333void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 3334 MachineBasicBlock::iterator MI, 3335 unsigned DestReg, int FrameIdx, 3336 const TargetRegisterClass *RC, 3337 const TargetRegisterInfo *TRI) const { 3338 const MachineFunction &MF = *MBB.getParent(); 3339 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); 3340 bool isAligned = 3341 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 3342 RI.canRealignStack(MF); 3343 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 3344 DebugLoc DL = MBB.findDebugLoc(MI); 3345 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); 3346} 3347 3348void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 3349 SmallVectorImpl<MachineOperand> &Addr, 3350 const TargetRegisterClass *RC, 3351 MachineInstr::mmo_iterator MMOBegin, 3352 MachineInstr::mmo_iterator MMOEnd, 3353 SmallVectorImpl<MachineInstr*> &NewMIs) const { 3354 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 3355 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 3356 bool isAligned = MMOBegin != MMOEnd && 3357 (*MMOBegin)->getAlignment() >= Alignment; 3358 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 3359 DebugLoc DL; 3360 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 3361 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 3362 MIB.add(Addr[i]); 3363 (*MIB).setMemRefs(MMOBegin, MMOEnd); 3364 NewMIs.push_back(MIB); 3365} 3366 3367bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 3368 unsigned &SrcReg2, int &CmpMask, 3369 int &CmpValue) const { 3370 switch (MI.getOpcode()) { 3371 default: break; 3372 case X86::CMP64ri32: 3373 case X86::CMP64ri8: 3374 case X86::CMP32ri: 3375 case X86::CMP32ri8: 3376 case X86::CMP16ri: 3377 case X86::CMP16ri8: 3378 case X86::CMP8ri: 3379 SrcReg = MI.getOperand(0).getReg(); 3380 SrcReg2 = 0; 3381 if (MI.getOperand(1).isImm()) { 3382 CmpMask = ~0; 3383 CmpValue = MI.getOperand(1).getImm(); 3384 } else { 3385 CmpMask = CmpValue = 0; 3386 } 3387 return true; 3388 // A SUB can be used to perform comparison. 3389 case X86::SUB64rm: 3390 case X86::SUB32rm: 3391 case X86::SUB16rm: 3392 case X86::SUB8rm: 3393 SrcReg = MI.getOperand(1).getReg(); 3394 SrcReg2 = 0; 3395 CmpMask = 0; 3396 CmpValue = 0; 3397 return true; 3398 case X86::SUB64rr: 3399 case X86::SUB32rr: 3400 case X86::SUB16rr: 3401 case X86::SUB8rr: 3402 SrcReg = MI.getOperand(1).getReg(); 3403 SrcReg2 = MI.getOperand(2).getReg(); 3404 CmpMask = 0; 3405 CmpValue = 0; 3406 return true; 3407 case X86::SUB64ri32: 3408 case X86::SUB64ri8: 3409 case X86::SUB32ri: 3410 case X86::SUB32ri8: 3411 case X86::SUB16ri: 3412 case X86::SUB16ri8: 3413 case X86::SUB8ri: 3414 SrcReg = MI.getOperand(1).getReg(); 3415 SrcReg2 = 0; 3416 if (MI.getOperand(2).isImm()) { 3417 CmpMask = ~0; 3418 CmpValue = MI.getOperand(2).getImm(); 3419 } else { 3420 CmpMask = CmpValue = 0; 3421 } 3422 return true; 3423 case X86::CMP64rr: 3424 case X86::CMP32rr: 3425 case X86::CMP16rr: 3426 case X86::CMP8rr: 3427 SrcReg = MI.getOperand(0).getReg(); 3428 SrcReg2 = MI.getOperand(1).getReg(); 3429 CmpMask = 0; 3430 CmpValue = 0; 3431 return true; 3432 case X86::TEST8rr: 3433 case X86::TEST16rr: 3434 case X86::TEST32rr: 3435 case X86::TEST64rr: 3436 SrcReg = MI.getOperand(0).getReg(); 3437 if (MI.getOperand(1).getReg() != SrcReg) 3438 return false; 3439 // Compare against zero. 3440 SrcReg2 = 0; 3441 CmpMask = ~0; 3442 CmpValue = 0; 3443 return true; 3444 } 3445 return false; 3446} 3447 3448/// Check whether the first instruction, whose only 3449/// purpose is to update flags, can be made redundant. 3450/// CMPrr can be made redundant by SUBrr if the operands are the same. 3451/// This function can be extended later on. 3452/// SrcReg, SrcRegs: register operands for FlagI. 3453/// ImmValue: immediate for FlagI if it takes an immediate. 3454inline static bool isRedundantFlagInstr(MachineInstr &FlagI, unsigned SrcReg, 3455 unsigned SrcReg2, int ImmMask, 3456 int ImmValue, MachineInstr &OI) { 3457 if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) || 3458 (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) || 3459 (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) || 3460 (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) && 3461 ((OI.getOperand(1).getReg() == SrcReg && 3462 OI.getOperand(2).getReg() == SrcReg2) || 3463 (OI.getOperand(1).getReg() == SrcReg2 && 3464 OI.getOperand(2).getReg() == SrcReg))) 3465 return true; 3466 3467 if (ImmMask != 0 && 3468 ((FlagI.getOpcode() == X86::CMP64ri32 && 3469 OI.getOpcode() == X86::SUB64ri32) || 3470 (FlagI.getOpcode() == X86::CMP64ri8 && 3471 OI.getOpcode() == X86::SUB64ri8) || 3472 (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) || 3473 (FlagI.getOpcode() == X86::CMP32ri8 && 3474 OI.getOpcode() == X86::SUB32ri8) || 3475 (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) || 3476 (FlagI.getOpcode() == X86::CMP16ri8 && 3477 OI.getOpcode() == X86::SUB16ri8) || 3478 (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) && 3479 OI.getOperand(1).getReg() == SrcReg && 3480 OI.getOperand(2).getImm() == ImmValue) 3481 return true; 3482 return false; 3483} 3484 3485/// Check whether the definition can be converted 3486/// to remove a comparison against zero. 3487inline static bool isDefConvertible(MachineInstr &MI) { 3488 switch (MI.getOpcode()) { 3489 default: return false; 3490 3491 // The shift instructions only modify ZF if their shift count is non-zero. 3492 // N.B.: The processor truncates the shift count depending on the encoding. 3493 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: 3494 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: 3495 return getTruncatedShiftCount(MI, 2) != 0; 3496 3497 // Some left shift instructions can be turned into LEA instructions but only 3498 // if their flags aren't used. Avoid transforming such instructions. 3499 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ 3500 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 3501 if (isTruncatedShiftCountForLEA(ShAmt)) return false; 3502 return ShAmt != 0; 3503 } 3504 3505 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: 3506 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: 3507 return getTruncatedShiftCount(MI, 3) != 0; 3508 3509 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: 3510 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: 3511 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: 3512 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: 3513 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: 3514 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: 3515 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: 3516 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: 3517 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: 3518 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: 3519 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: 3520 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: 3521 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: 3522 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: 3523 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: 3524 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: 3525 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: 3526 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: 3527 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: 3528 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: 3529 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: 3530 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: 3531 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: 3532 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: 3533 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: 3534 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: 3535 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: 3536 case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri: 3537 case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8: 3538 case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr: 3539 case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm: 3540 case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm: 3541 case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri: 3542 case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8: 3543 case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: 3544 case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: 3545 case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: 3546 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: 3547 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: 3548 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: 3549 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: 3550 case X86::ANDN32rr: case X86::ANDN32rm: 3551 case X86::ANDN64rr: case X86::ANDN64rm: 3552 case X86::BEXTR32rr: case X86::BEXTR64rr: 3553 case X86::BEXTR32rm: case X86::BEXTR64rm: 3554 case X86::BLSI32rr: case X86::BLSI32rm: 3555 case X86::BLSI64rr: case X86::BLSI64rm: 3556 case X86::BLSMSK32rr:case X86::BLSMSK32rm: 3557 case X86::BLSMSK64rr:case X86::BLSMSK64rm: 3558 case X86::BLSR32rr: case X86::BLSR32rm: 3559 case X86::BLSR64rr: case X86::BLSR64rm: 3560 case X86::BZHI32rr: case X86::BZHI32rm: 3561 case X86::BZHI64rr: case X86::BZHI64rm: 3562 case X86::LZCNT16rr: case X86::LZCNT16rm: 3563 case X86::LZCNT32rr: case X86::LZCNT32rm: 3564 case X86::LZCNT64rr: case X86::LZCNT64rm: 3565 case X86::POPCNT16rr:case X86::POPCNT16rm: 3566 case X86::POPCNT32rr:case X86::POPCNT32rm: 3567 case X86::POPCNT64rr:case X86::POPCNT64rm: 3568 case X86::TZCNT16rr: case X86::TZCNT16rm: 3569 case X86::TZCNT32rr: case X86::TZCNT32rm: 3570 case X86::TZCNT64rr: case X86::TZCNT64rm: 3571 case X86::BEXTRI32ri: case X86::BEXTRI32mi: 3572 case X86::BEXTRI64ri: case X86::BEXTRI64mi: 3573 case X86::BLCFILL32rr: case X86::BLCFILL32rm: 3574 case X86::BLCFILL64rr: case X86::BLCFILL64rm: 3575 case X86::BLCI32rr: case X86::BLCI32rm: 3576 case X86::BLCI64rr: case X86::BLCI64rm: 3577 case X86::BLCIC32rr: case X86::BLCIC32rm: 3578 case X86::BLCIC64rr: case X86::BLCIC64rm: 3579 case X86::BLCMSK32rr: case X86::BLCMSK32rm: 3580 case X86::BLCMSK64rr: case X86::BLCMSK64rm: 3581 case X86::BLCS32rr: case X86::BLCS32rm: 3582 case X86::BLCS64rr: case X86::BLCS64rm: 3583 case X86::BLSFILL32rr: case X86::BLSFILL32rm: 3584 case X86::BLSFILL64rr: case X86::BLSFILL64rm: 3585 case X86::BLSIC32rr: case X86::BLSIC32rm: 3586 case X86::BLSIC64rr: case X86::BLSIC64rm: 3587 return true; 3588 } 3589} 3590 3591/// Check whether the use can be converted to remove a comparison against zero. 3592static X86::CondCode isUseDefConvertible(MachineInstr &MI) { 3593 switch (MI.getOpcode()) { 3594 default: return X86::COND_INVALID; 3595 case X86::LZCNT16rr: case X86::LZCNT16rm: 3596 case X86::LZCNT32rr: case X86::LZCNT32rm: 3597 case X86::LZCNT64rr: case X86::LZCNT64rm: 3598 return X86::COND_B; 3599 case X86::POPCNT16rr:case X86::POPCNT16rm: 3600 case X86::POPCNT32rr:case X86::POPCNT32rm: 3601 case X86::POPCNT64rr:case X86::POPCNT64rm: 3602 return X86::COND_E; 3603 case X86::TZCNT16rr: case X86::TZCNT16rm: 3604 case X86::TZCNT32rr: case X86::TZCNT32rm: 3605 case X86::TZCNT64rr: case X86::TZCNT64rm: 3606 return X86::COND_B; 3607 case X86::BSF16rr: 3608 case X86::BSF16rm: 3609 case X86::BSF32rr: 3610 case X86::BSF32rm: 3611 case X86::BSF64rr: 3612 case X86::BSF64rm: 3613 return X86::COND_E; 3614 } 3615} 3616 3617/// Check if there exists an earlier instruction that 3618/// operates on the same source operands and sets flags in the same way as 3619/// Compare; remove Compare if possible. 3620bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, 3621 unsigned SrcReg2, int CmpMask, 3622 int CmpValue, 3623 const MachineRegisterInfo *MRI) const { 3624 // Check whether we can replace SUB with CMP. 3625 unsigned NewOpcode = 0; 3626 switch (CmpInstr.getOpcode()) { 3627 default: break; 3628 case X86::SUB64ri32: 3629 case X86::SUB64ri8: 3630 case X86::SUB32ri: 3631 case X86::SUB32ri8: 3632 case X86::SUB16ri: 3633 case X86::SUB16ri8: 3634 case X86::SUB8ri: 3635 case X86::SUB64rm: 3636 case X86::SUB32rm: 3637 case X86::SUB16rm: 3638 case X86::SUB8rm: 3639 case X86::SUB64rr: 3640 case X86::SUB32rr: 3641 case X86::SUB16rr: 3642 case X86::SUB8rr: { 3643 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) 3644 return false; 3645 // There is no use of the destination register, we can replace SUB with CMP. 3646 switch (CmpInstr.getOpcode()) { 3647 default: llvm_unreachable("Unreachable!"); 3648 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; 3649 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; 3650 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; 3651 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; 3652 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; 3653 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; 3654 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; 3655 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; 3656 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; 3657 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; 3658 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; 3659 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; 3660 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; 3661 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; 3662 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; 3663 } 3664 CmpInstr.setDesc(get(NewOpcode)); 3665 CmpInstr.RemoveOperand(0); 3666 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. 3667 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || 3668 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) 3669 return false; 3670 } 3671 } 3672 3673 // Get the unique definition of SrcReg. 3674 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 3675 if (!MI) return false; 3676 3677 // CmpInstr is the first instruction of the BB. 3678 MachineBasicBlock::iterator I = CmpInstr, Def = MI; 3679 3680 // If we are comparing against zero, check whether we can use MI to update 3681 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. 3682 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0); 3683 if (IsCmpZero && MI->getParent() != CmpInstr.getParent()) 3684 return false; 3685 3686 // If we have a use of the source register between the def and our compare 3687 // instruction we can eliminate the compare iff the use sets EFLAGS in the 3688 // right way. 3689 bool ShouldUpdateCC = false; 3690 X86::CondCode NewCC = X86::COND_INVALID; 3691 if (IsCmpZero && !isDefConvertible(*MI)) { 3692 // Scan forward from the use until we hit the use we're looking for or the 3693 // compare instruction. 3694 for (MachineBasicBlock::iterator J = MI;; ++J) { 3695 // Do we have a convertible instruction? 3696 NewCC = isUseDefConvertible(*J); 3697 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && 3698 J->getOperand(1).getReg() == SrcReg) { 3699 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!"); 3700 ShouldUpdateCC = true; // Update CC later on. 3701 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going 3702 // with the new def. 3703 Def = J; 3704 MI = &*Def; 3705 break; 3706 } 3707 3708 if (J == I) 3709 return false; 3710 } 3711 } 3712 3713 // We are searching for an earlier instruction that can make CmpInstr 3714 // redundant and that instruction will be saved in Sub. 3715 MachineInstr *Sub = nullptr; 3716 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3717 3718 // We iterate backward, starting from the instruction before CmpInstr and 3719 // stop when reaching the definition of a source register or done with the BB. 3720 // RI points to the instruction before CmpInstr. 3721 // If the definition is in this basic block, RE points to the definition; 3722 // otherwise, RE is the rend of the basic block. 3723 MachineBasicBlock::reverse_iterator 3724 RI = ++I.getReverse(), 3725 RE = CmpInstr.getParent() == MI->getParent() 3726 ? Def.getReverse() /* points to MI */ 3727 : CmpInstr.getParent()->rend(); 3728 MachineInstr *Movr0Inst = nullptr; 3729 for (; RI != RE; ++RI) { 3730 MachineInstr &Instr = *RI; 3731 // Check whether CmpInstr can be made redundant by the current instruction. 3732 if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, 3733 CmpValue, Instr)) { 3734 Sub = &Instr; 3735 break; 3736 } 3737 3738 if (Instr.modifiesRegister(X86::EFLAGS, TRI) || 3739 Instr.readsRegister(X86::EFLAGS, TRI)) { 3740 // This instruction modifies or uses EFLAGS. 3741 3742 // MOV32r0 etc. are implemented with xor which clobbers condition code. 3743 // They are safe to move up, if the definition to EFLAGS is dead and 3744 // earlier instructions do not read or write EFLAGS. 3745 if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 && 3746 Instr.registerDefIsDead(X86::EFLAGS, TRI)) { 3747 Movr0Inst = &Instr; 3748 continue; 3749 } 3750 3751 // We can't remove CmpInstr. 3752 return false; 3753 } 3754 } 3755 3756 // Return false if no candidates exist. 3757 if (!IsCmpZero && !Sub) 3758 return false; 3759 3760 bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 3761 Sub->getOperand(2).getReg() == SrcReg); 3762 3763 // Scan forward from the instruction after CmpInstr for uses of EFLAGS. 3764 // It is safe to remove CmpInstr if EFLAGS is redefined or killed. 3765 // If we are done with the basic block, we need to check whether EFLAGS is 3766 // live-out. 3767 bool IsSafe = false; 3768 SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate; 3769 MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); 3770 for (++I; I != E; ++I) { 3771 const MachineInstr &Instr = *I; 3772 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); 3773 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); 3774 // We should check the usage if this instruction uses and updates EFLAGS. 3775 if (!UseEFLAGS && ModifyEFLAGS) { 3776 // It is safe to remove CmpInstr if EFLAGS is updated again. 3777 IsSafe = true; 3778 break; 3779 } 3780 if (!UseEFLAGS && !ModifyEFLAGS) 3781 continue; 3782 3783 // EFLAGS is used by this instruction. 3784 X86::CondCode OldCC = X86::COND_INVALID; 3785 bool OpcIsSET = false; 3786 if (IsCmpZero || IsSwapped) { 3787 // We decode the condition code from opcode. 3788 if (Instr.isBranch()) 3789 OldCC = X86::getCondFromBranchOpc(Instr.getOpcode()); 3790 else { 3791 OldCC = X86::getCondFromSETOpc(Instr.getOpcode()); 3792 if (OldCC != X86::COND_INVALID) 3793 OpcIsSET = true; 3794 else 3795 OldCC = X86::getCondFromCMovOpc(Instr.getOpcode()); 3796 } 3797 if (OldCC == X86::COND_INVALID) return false; 3798 } 3799 X86::CondCode ReplacementCC = X86::COND_INVALID; 3800 if (IsCmpZero) { 3801 switch (OldCC) { 3802 default: break; 3803 case X86::COND_A: case X86::COND_AE: 3804 case X86::COND_B: case X86::COND_BE: 3805 case X86::COND_G: case X86::COND_GE: 3806 case X86::COND_L: case X86::COND_LE: 3807 case X86::COND_O: case X86::COND_NO: 3808 // CF and OF are used, we can't perform this optimization. 3809 return false; 3810 } 3811 3812 // If we're updating the condition code check if we have to reverse the 3813 // condition. 3814 if (ShouldUpdateCC) 3815 switch (OldCC) { 3816 default: 3817 return false; 3818 case X86::COND_E: 3819 ReplacementCC = NewCC; 3820 break; 3821 case X86::COND_NE: 3822 ReplacementCC = GetOppositeBranchCondition(NewCC); 3823 break; 3824 } 3825 } else if (IsSwapped) { 3826 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs 3827 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3828 // We swap the condition code and synthesize the new opcode. 3829 ReplacementCC = getSwappedCondition(OldCC); 3830 if (ReplacementCC == X86::COND_INVALID) return false; 3831 } 3832 3833 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) { 3834 // Synthesize the new opcode. 3835 bool HasMemoryOperand = Instr.hasOneMemOperand(); 3836 unsigned NewOpc; 3837 if (Instr.isBranch()) 3838 NewOpc = GetCondBranchFromCond(ReplacementCC); 3839 else if(OpcIsSET) 3840 NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand); 3841 else { 3842 unsigned DstReg = Instr.getOperand(0).getReg(); 3843 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 3844 NewOpc = getCMovFromCond(ReplacementCC, TRI->getRegSizeInBits(*DstRC)/8, 3845 HasMemoryOperand); 3846 } 3847 3848 // Push the MachineInstr to OpsToUpdate. 3849 // If it is safe to remove CmpInstr, the condition code of these 3850 // instructions will be modified. 3851 OpsToUpdate.push_back(std::make_pair(&*I, NewOpc)); 3852 } 3853 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { 3854 // It is safe to remove CmpInstr if EFLAGS is updated again or killed. 3855 IsSafe = true; 3856 break; 3857 } 3858 } 3859 3860 // If EFLAGS is not killed nor re-defined, we should check whether it is 3861 // live-out. If it is live-out, do not optimize. 3862 if ((IsCmpZero || IsSwapped) && !IsSafe) { 3863 MachineBasicBlock *MBB = CmpInstr.getParent(); 3864 for (MachineBasicBlock *Successor : MBB->successors()) 3865 if (Successor->isLiveIn(X86::EFLAGS)) 3866 return false; 3867 } 3868 3869 // The instruction to be updated is either Sub or MI. 3870 Sub = IsCmpZero ? MI : Sub; 3871 // Move Movr0Inst to the appropriate place before Sub. 3872 if (Movr0Inst) { 3873 // Look backwards until we find a def that doesn't use the current EFLAGS. 3874 Def = Sub; 3875 MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(), 3876 InsertE = Sub->getParent()->rend(); 3877 for (; InsertI != InsertE; ++InsertI) { 3878 MachineInstr *Instr = &*InsertI; 3879 if (!Instr->readsRegister(X86::EFLAGS, TRI) && 3880 Instr->modifiesRegister(X86::EFLAGS, TRI)) { 3881 Sub->getParent()->remove(Movr0Inst); 3882 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), 3883 Movr0Inst); 3884 break; 3885 } 3886 } 3887 if (InsertI == InsertE) 3888 return false; 3889 } 3890 3891 // Make sure Sub instruction defines EFLAGS and mark the def live. 3892 unsigned i = 0, e = Sub->getNumOperands(); 3893 for (; i != e; ++i) { 3894 MachineOperand &MO = Sub->getOperand(i); 3895 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { 3896 MO.setIsDead(false); 3897 break; 3898 } 3899 } 3900 assert(i != e && "Unable to locate a def EFLAGS operand"); 3901 3902 CmpInstr.eraseFromParent(); 3903 3904 // Modify the condition code of instructions in OpsToUpdate. 3905 for (auto &Op : OpsToUpdate) 3906 Op.first->setDesc(get(Op.second)); 3907 return true; 3908} 3909 3910/// Try to remove the load by folding it to a register 3911/// operand at the use. We fold the load instructions if load defines a virtual 3912/// register, the virtual register is used once in the same BB, and the 3913/// instructions in-between do not load or store, and have no side effects. 3914MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, 3915 const MachineRegisterInfo *MRI, 3916 unsigned &FoldAsLoadDefReg, 3917 MachineInstr *&DefMI) const { 3918 // Check whether we can move DefMI here. 3919 DefMI = MRI->getVRegDef(FoldAsLoadDefReg); 3920 assert(DefMI); 3921 bool SawStore = false; 3922 if (!DefMI->isSafeToMove(nullptr, SawStore)) 3923 return nullptr; 3924 3925 // Collect information about virtual register operands of MI. 3926 SmallVector<unsigned, 1> SrcOperandIds; 3927 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 3928 MachineOperand &MO = MI.getOperand(i); 3929 if (!MO.isReg()) 3930 continue; 3931 unsigned Reg = MO.getReg(); 3932 if (Reg != FoldAsLoadDefReg) 3933 continue; 3934 // Do not fold if we have a subreg use or a def. 3935 if (MO.getSubReg() || MO.isDef()) 3936 return nullptr; 3937 SrcOperandIds.push_back(i); 3938 } 3939 if (SrcOperandIds.empty()) 3940 return nullptr; 3941 3942 // Check whether we can fold the def into SrcOperandId. 3943 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) { 3944 FoldAsLoadDefReg = 0; 3945 return FoldMI; 3946 } 3947 3948 return nullptr; 3949} 3950 3951/// Expand a single-def pseudo instruction to a two-addr 3952/// instruction with two undef reads of the register being defined. 3953/// This is used for mapping: 3954/// %xmm4 = V_SET0 3955/// to: 3956/// %xmm4 = PXORrr undef %xmm4, undef %xmm4 3957/// 3958static bool Expand2AddrUndef(MachineInstrBuilder &MIB, 3959 const MCInstrDesc &Desc) { 3960 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 3961 unsigned Reg = MIB->getOperand(0).getReg(); 3962 MIB->setDesc(Desc); 3963 3964 // MachineInstr::addOperand() will insert explicit operands before any 3965 // implicit operands. 3966 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 3967 // But we don't trust that. 3968 assert(MIB->getOperand(1).getReg() == Reg && 3969 MIB->getOperand(2).getReg() == Reg && "Misplaced operand"); 3970 return true; 3971} 3972 3973/// Expand a single-def pseudo instruction to a two-addr 3974/// instruction with two %k0 reads. 3975/// This is used for mapping: 3976/// %k4 = K_SET1 3977/// to: 3978/// %k4 = KXNORrr %k0, %k0 3979static bool Expand2AddrKreg(MachineInstrBuilder &MIB, 3980 const MCInstrDesc &Desc, unsigned Reg) { 3981 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 3982 MIB->setDesc(Desc); 3983 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 3984 return true; 3985} 3986 3987static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, 3988 bool MinusOne) { 3989 MachineBasicBlock &MBB = *MIB->getParent(); 3990 DebugLoc DL = MIB->getDebugLoc(); 3991 unsigned Reg = MIB->getOperand(0).getReg(); 3992 3993 // Insert the XOR. 3994 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) 3995 .addReg(Reg, RegState::Undef) 3996 .addReg(Reg, RegState::Undef); 3997 3998 // Turn the pseudo into an INC or DEC. 3999 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); 4000 MIB.addReg(Reg); 4001 4002 return true; 4003} 4004 4005static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, 4006 const TargetInstrInfo &TII, 4007 const X86Subtarget &Subtarget) { 4008 MachineBasicBlock &MBB = *MIB->getParent(); 4009 DebugLoc DL = MIB->getDebugLoc(); 4010 int64_t Imm = MIB->getOperand(1).getImm(); 4011 assert(Imm != 0 && "Using push/pop for 0 is not efficient."); 4012 MachineBasicBlock::iterator I = MIB.getInstr(); 4013 4014 int StackAdjustment; 4015 4016 if (Subtarget.is64Bit()) { 4017 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 || 4018 MIB->getOpcode() == X86::MOV32ImmSExti8); 4019 4020 // Can't use push/pop lowering if the function might write to the red zone. 4021 X86MachineFunctionInfo *X86FI = 4022 MBB.getParent()->getInfo<X86MachineFunctionInfo>(); 4023 if (X86FI->getUsesRedZone()) { 4024 MIB->setDesc(TII.get(MIB->getOpcode() == 4025 X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri)); 4026 return true; 4027 } 4028 4029 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and 4030 // widen the register if necessary. 4031 StackAdjustment = 8; 4032 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm); 4033 MIB->setDesc(TII.get(X86::POP64r)); 4034 MIB->getOperand(0) 4035 .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64)); 4036 } else { 4037 assert(MIB->getOpcode() == X86::MOV32ImmSExti8); 4038 StackAdjustment = 4; 4039 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); 4040 MIB->setDesc(TII.get(X86::POP32r)); 4041 } 4042 4043 // Build CFI if necessary. 4044 MachineFunction &MF = *MBB.getParent(); 4045 const X86FrameLowering *TFL = Subtarget.getFrameLowering(); 4046 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 4047 bool NeedsDwarfCFI = 4048 !IsWin64Prologue && 4049 (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry()); 4050 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; 4051 if (EmitCFI) { 4052 TFL->BuildCFI(MBB, I, DL, 4053 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); 4054 TFL->BuildCFI(MBB, std::next(I), DL, 4055 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); 4056 } 4057 4058 return true; 4059} 4060 4061// LoadStackGuard has so far only been implemented for 64-bit MachO. Different 4062// code sequence is needed for other targets. 4063static void expandLoadStackGuard(MachineInstrBuilder &MIB, 4064 const TargetInstrInfo &TII) { 4065 MachineBasicBlock &MBB = *MIB->getParent(); 4066 DebugLoc DL = MIB->getDebugLoc(); 4067 unsigned Reg = MIB->getOperand(0).getReg(); 4068 const GlobalValue *GV = 4069 cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); 4070 auto Flags = MachineMemOperand::MOLoad | 4071 MachineMemOperand::MODereferenceable | 4072 MachineMemOperand::MOInvariant; 4073 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 4074 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8); 4075 MachineBasicBlock::iterator I = MIB.getInstr(); 4076 4077 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) 4078 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) 4079 .addMemOperand(MMO); 4080 MIB->setDebugLoc(DL); 4081 MIB->setDesc(TII.get(X86::MOV64rm)); 4082 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); 4083} 4084 4085static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { 4086 MachineBasicBlock &MBB = *MIB->getParent(); 4087 MachineFunction &MF = *MBB.getParent(); 4088 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 4089 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 4090 unsigned XorOp = 4091 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr; 4092 MIB->setDesc(TII.get(XorOp)); 4093 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef); 4094 return true; 4095} 4096 4097// This is used to handle spills for 128/256-bit registers when we have AVX512, 4098// but not VLX. If it uses an extended register we need to use an instruction 4099// that loads the lower 128/256-bit, but is available with only AVX512F. 4100static bool expandNOVLXLoad(MachineInstrBuilder &MIB, 4101 const TargetRegisterInfo *TRI, 4102 const MCInstrDesc &LoadDesc, 4103 const MCInstrDesc &BroadcastDesc, 4104 unsigned SubIdx) { 4105 unsigned DestReg = MIB->getOperand(0).getReg(); 4106 // Check if DestReg is XMM16-31 or YMM16-31. 4107 if (TRI->getEncodingValue(DestReg) < 16) { 4108 // We can use a normal VEX encoded load. 4109 MIB->setDesc(LoadDesc); 4110 } else { 4111 // Use a 128/256-bit VBROADCAST instruction. 4112 MIB->setDesc(BroadcastDesc); 4113 // Change the destination to a 512-bit register. 4114 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass); 4115 MIB->getOperand(0).setReg(DestReg); 4116 } 4117 return true; 4118} 4119 4120// This is used to handle spills for 128/256-bit registers when we have AVX512, 4121// but not VLX. If it uses an extended register we need to use an instruction 4122// that stores the lower 128/256-bit, but is available with only AVX512F. 4123static bool expandNOVLXStore(MachineInstrBuilder &MIB, 4124 const TargetRegisterInfo *TRI, 4125 const MCInstrDesc &StoreDesc, 4126 const MCInstrDesc &ExtractDesc, 4127 unsigned SubIdx) { 4128 unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg(); 4129 // Check if DestReg is XMM16-31 or YMM16-31. 4130 if (TRI->getEncodingValue(SrcReg) < 16) { 4131 // We can use a normal VEX encoded store. 4132 MIB->setDesc(StoreDesc); 4133 } else { 4134 // Use a VEXTRACTF instruction. 4135 MIB->setDesc(ExtractDesc); 4136 // Change the destination to a 512-bit register. 4137 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass); 4138 MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg); 4139 MIB.addImm(0x0); // Append immediate to extract from the lower bits. 4140 } 4141 4142 return true; 4143} 4144bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 4145 bool HasAVX = Subtarget.hasAVX(); 4146 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 4147 switch (MI.getOpcode()) { 4148 case X86::MOV32r0: 4149 return Expand2AddrUndef(MIB, get(X86::XOR32rr)); 4150 case X86::MOV32r1: 4151 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); 4152 case X86::MOV32r_1: 4153 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); 4154 case X86::MOV32ImmSExti8: 4155 case X86::MOV64ImmSExti8: 4156 return ExpandMOVImmSExti8(MIB, *this, Subtarget); 4157 case X86::SETB_C8r: 4158 return Expand2AddrUndef(MIB, get(X86::SBB8rr)); 4159 case X86::SETB_C16r: 4160 return Expand2AddrUndef(MIB, get(X86::SBB16rr)); 4161 case X86::SETB_C32r: 4162 return Expand2AddrUndef(MIB, get(X86::SBB32rr)); 4163 case X86::SETB_C64r: 4164 return Expand2AddrUndef(MIB, get(X86::SBB64rr)); 4165 case X86::MMX_SET0: 4166 return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr)); 4167 case X86::V_SET0: 4168 case X86::FsFLD0SS: 4169 case X86::FsFLD0SD: 4170 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); 4171 case X86::AVX_SET0: { 4172 assert(HasAVX && "AVX not supported"); 4173 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4174 unsigned SrcReg = MIB->getOperand(0).getReg(); 4175 unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); 4176 MIB->getOperand(0).setReg(XReg); 4177 Expand2AddrUndef(MIB, get(X86::VXORPSrr)); 4178 MIB.addReg(SrcReg, RegState::ImplicitDefine); 4179 return true; 4180 } 4181 case X86::AVX512_128_SET0: 4182 case X86::AVX512_FsFLD0SS: 4183 case X86::AVX512_FsFLD0SD: { 4184 bool HasVLX = Subtarget.hasVLX(); 4185 unsigned SrcReg = MIB->getOperand(0).getReg(); 4186 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4187 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) 4188 return Expand2AddrUndef(MIB, 4189 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); 4190 // Extended register without VLX. Use a larger XOR. 4191 SrcReg = 4192 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass); 4193 MIB->getOperand(0).setReg(SrcReg); 4194 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 4195 } 4196 case X86::AVX512_256_SET0: 4197 case X86::AVX512_512_SET0: { 4198 bool HasVLX = Subtarget.hasVLX(); 4199 unsigned SrcReg = MIB->getOperand(0).getReg(); 4200 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4201 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { 4202 unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); 4203 MIB->getOperand(0).setReg(XReg); 4204 Expand2AddrUndef(MIB, 4205 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); 4206 MIB.addReg(SrcReg, RegState::ImplicitDefine); 4207 return true; 4208 } 4209 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 4210 } 4211 case X86::V_SETALLONES: 4212 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); 4213 case X86::AVX2_SETALLONES: 4214 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); 4215 case X86::AVX1_SETALLONES: { 4216 unsigned Reg = MIB->getOperand(0).getReg(); 4217 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS. 4218 MIB->setDesc(get(X86::VCMPPSYrri)); 4219 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); 4220 return true; 4221 } 4222 case X86::AVX512_512_SETALLONES: { 4223 unsigned Reg = MIB->getOperand(0).getReg(); 4224 MIB->setDesc(get(X86::VPTERNLOGDZrri)); 4225 // VPTERNLOGD needs 3 register inputs and an immediate. 4226 // 0xff will return 1s for any input. 4227 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef) 4228 .addReg(Reg, RegState::Undef).addImm(0xff); 4229 return true; 4230 } 4231 case X86::AVX512_512_SEXT_MASK_32: 4232 case X86::AVX512_512_SEXT_MASK_64: { 4233 unsigned Reg = MIB->getOperand(0).getReg(); 4234 unsigned MaskReg = MIB->getOperand(1).getReg(); 4235 unsigned MaskState = getRegState(MIB->getOperand(1)); 4236 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? 4237 X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; 4238 MI.RemoveOperand(1); 4239 MIB->setDesc(get(Opc)); 4240 // VPTERNLOG needs 3 register inputs and an immediate. 4241 // 0xff will return 1s for any input. 4242 MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState) 4243 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff); 4244 return true; 4245 } 4246 case X86::VMOVAPSZ128rm_NOVLX: 4247 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm), 4248 get(X86::VBROADCASTF32X4rm), X86::sub_xmm); 4249 case X86::VMOVUPSZ128rm_NOVLX: 4250 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm), 4251 get(X86::VBROADCASTF32X4rm), X86::sub_xmm); 4252 case X86::VMOVAPSZ256rm_NOVLX: 4253 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm), 4254 get(X86::VBROADCASTF64X4rm), X86::sub_ymm); 4255 case X86::VMOVUPSZ256rm_NOVLX: 4256 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm), 4257 get(X86::VBROADCASTF64X4rm), X86::sub_ymm); 4258 case X86::VMOVAPSZ128mr_NOVLX: 4259 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), 4260 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); 4261 case X86::VMOVUPSZ128mr_NOVLX: 4262 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), 4263 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); 4264 case X86::VMOVAPSZ256mr_NOVLX: 4265 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), 4266 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); 4267 case X86::VMOVUPSZ256mr_NOVLX: 4268 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), 4269 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); 4270 case X86::MOV32ri64: 4271 MI.setDesc(get(X86::MOV32ri)); 4272 return true; 4273 4274 // KNL does not recognize dependency-breaking idioms for mask registers, 4275 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. 4276 // Using %k0 as the undef input register is a performance heuristic based 4277 // on the assumption that %k0 is used less frequently than the other mask 4278 // registers, since it is not usable as a write mask. 4279 // FIXME: A more advanced approach would be to choose the best input mask 4280 // register based on context. 4281 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); 4282 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); 4283 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); 4284 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); 4285 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); 4286 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); 4287 case TargetOpcode::LOAD_STACK_GUARD: 4288 expandLoadStackGuard(MIB, *this); 4289 return true; 4290 case X86::XOR64_FP: 4291 case X86::XOR32_FP: 4292 return expandXorFP(MIB, *this); 4293 } 4294 return false; 4295} 4296 4297/// Return true for all instructions that only update 4298/// the first 32 or 64-bits of the destination register and leave the rest 4299/// unmodified. This can be used to avoid folding loads if the instructions 4300/// only update part of the destination register, and the non-updated part is 4301/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these 4302/// instructions breaks the partial register dependency and it can improve 4303/// performance. e.g.: 4304/// 4305/// movss (%rdi), %xmm0 4306/// cvtss2sd %xmm0, %xmm0 4307/// 4308/// Instead of 4309/// cvtss2sd (%rdi), %xmm0 4310/// 4311/// FIXME: This should be turned into a TSFlags. 4312/// 4313static bool hasPartialRegUpdate(unsigned Opcode, 4314 const X86Subtarget &Subtarget) { 4315 switch (Opcode) { 4316 case X86::CVTSI2SSrr: 4317 case X86::CVTSI2SSrm: 4318 case X86::CVTSI642SSrr: 4319 case X86::CVTSI642SSrm: 4320 case X86::CVTSI2SDrr: 4321 case X86::CVTSI2SDrm: 4322 case X86::CVTSI642SDrr: 4323 case X86::CVTSI642SDrm: 4324 case X86::CVTSD2SSrr: 4325 case X86::CVTSD2SSrm: 4326 case X86::CVTSS2SDrr: 4327 case X86::CVTSS2SDrm: 4328 case X86::MOVHPDrm: 4329 case X86::MOVHPSrm: 4330 case X86::MOVLPDrm: 4331 case X86::MOVLPSrm: 4332 case X86::RCPSSr: 4333 case X86::RCPSSm: 4334 case X86::RCPSSr_Int: 4335 case X86::RCPSSm_Int: 4336 case X86::ROUNDSDr: 4337 case X86::ROUNDSDm: 4338 case X86::ROUNDSSr: 4339 case X86::ROUNDSSm: 4340 case X86::RSQRTSSr: 4341 case X86::RSQRTSSm: 4342 case X86::RSQRTSSr_Int: 4343 case X86::RSQRTSSm_Int: 4344 case X86::SQRTSSr: 4345 case X86::SQRTSSm: 4346 case X86::SQRTSSr_Int: 4347 case X86::SQRTSSm_Int: 4348 case X86::SQRTSDr: 4349 case X86::SQRTSDm: 4350 case X86::SQRTSDr_Int: 4351 case X86::SQRTSDm_Int: 4352 return true; 4353 // GPR 4354 case X86::POPCNT32rm: 4355 case X86::POPCNT32rr: 4356 case X86::POPCNT64rm: 4357 case X86::POPCNT64rr: 4358 return Subtarget.hasPOPCNTFalseDeps(); 4359 case X86::LZCNT32rm: 4360 case X86::LZCNT32rr: 4361 case X86::LZCNT64rm: 4362 case X86::LZCNT64rr: 4363 case X86::TZCNT32rm: 4364 case X86::TZCNT32rr: 4365 case X86::TZCNT64rm: 4366 case X86::TZCNT64rr: 4367 return Subtarget.hasLZCNTFalseDeps(); 4368 } 4369 4370 return false; 4371} 4372 4373/// Inform the BreakFalseDeps pass how many idle 4374/// instructions we would like before a partial register update. 4375unsigned X86InstrInfo::getPartialRegUpdateClearance( 4376 const MachineInstr &MI, unsigned OpNum, 4377 const TargetRegisterInfo *TRI) const { 4378 if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget)) 4379 return 0; 4380 4381 // If MI is marked as reading Reg, the partial register update is wanted. 4382 const MachineOperand &MO = MI.getOperand(0); 4383 unsigned Reg = MO.getReg(); 4384 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 4385 if (MO.readsReg() || MI.readsVirtualRegister(Reg)) 4386 return 0; 4387 } else { 4388 if (MI.readsRegister(Reg, TRI)) 4389 return 0; 4390 } 4391 4392 // If any instructions in the clearance range are reading Reg, insert a 4393 // dependency breaking instruction, which is inexpensive and is likely to 4394 // be hidden in other instruction's cycles. 4395 return PartialRegUpdateClearance; 4396} 4397 4398// Return true for any instruction the copies the high bits of the first source 4399// operand into the unused high bits of the destination operand. 4400static bool hasUndefRegUpdate(unsigned Opcode) { 4401 switch (Opcode) { 4402 case X86::VCVTSI2SSrr: 4403 case X86::VCVTSI2SSrm: 4404 case X86::VCVTSI2SSrr_Int: 4405 case X86::VCVTSI2SSrm_Int: 4406 case X86::VCVTSI642SSrr: 4407 case X86::VCVTSI642SSrm: 4408 case X86::VCVTSI642SSrr_Int: 4409 case X86::VCVTSI642SSrm_Int: 4410 case X86::VCVTSI2SDrr: 4411 case X86::VCVTSI2SDrm: 4412 case X86::VCVTSI2SDrr_Int: 4413 case X86::VCVTSI2SDrm_Int: 4414 case X86::VCVTSI642SDrr: 4415 case X86::VCVTSI642SDrm: 4416 case X86::VCVTSI642SDrr_Int: 4417 case X86::VCVTSI642SDrm_Int: 4418 case X86::VCVTSD2SSrr: 4419 case X86::VCVTSD2SSrm: 4420 case X86::VCVTSD2SSrr_Int: 4421 case X86::VCVTSD2SSrm_Int: 4422 case X86::VCVTSS2SDrr: 4423 case X86::VCVTSS2SDrm: 4424 case X86::VCVTSS2SDrr_Int: 4425 case X86::VCVTSS2SDrm_Int: 4426 case X86::VRCPSSr: 4427 case X86::VRCPSSr_Int: 4428 case X86::VRCPSSm: 4429 case X86::VRCPSSm_Int: 4430 case X86::VROUNDSDr: 4431 case X86::VROUNDSDm: 4432 case X86::VROUNDSDr_Int: 4433 case X86::VROUNDSDm_Int: 4434 case X86::VROUNDSSr: 4435 case X86::VROUNDSSm: 4436 case X86::VROUNDSSr_Int: 4437 case X86::VROUNDSSm_Int: 4438 case X86::VRSQRTSSr: 4439 case X86::VRSQRTSSr_Int: 4440 case X86::VRSQRTSSm: 4441 case X86::VRSQRTSSm_Int: 4442 case X86::VSQRTSSr: 4443 case X86::VSQRTSSr_Int: 4444 case X86::VSQRTSSm: 4445 case X86::VSQRTSSm_Int: 4446 case X86::VSQRTSDr: 4447 case X86::VSQRTSDr_Int: 4448 case X86::VSQRTSDm: 4449 case X86::VSQRTSDm_Int: 4450 // AVX-512 4451 case X86::VCVTSI2SSZrr: 4452 case X86::VCVTSI2SSZrm: 4453 case X86::VCVTSI2SSZrr_Int: 4454 case X86::VCVTSI2SSZrrb_Int: 4455 case X86::VCVTSI2SSZrm_Int: 4456 case X86::VCVTSI642SSZrr: 4457 case X86::VCVTSI642SSZrm: 4458 case X86::VCVTSI642SSZrr_Int: 4459 case X86::VCVTSI642SSZrrb_Int: 4460 case X86::VCVTSI642SSZrm_Int: 4461 case X86::VCVTSI2SDZrr: 4462 case X86::VCVTSI2SDZrm: 4463 case X86::VCVTSI2SDZrr_Int: 4464 case X86::VCVTSI2SDZrrb_Int: 4465 case X86::VCVTSI2SDZrm_Int: 4466 case X86::VCVTSI642SDZrr: 4467 case X86::VCVTSI642SDZrm: 4468 case X86::VCVTSI642SDZrr_Int: 4469 case X86::VCVTSI642SDZrrb_Int: 4470 case X86::VCVTSI642SDZrm_Int: 4471 case X86::VCVTUSI2SSZrr: 4472 case X86::VCVTUSI2SSZrm: 4473 case X86::VCVTUSI2SSZrr_Int: 4474 case X86::VCVTUSI2SSZrrb_Int: 4475 case X86::VCVTUSI2SSZrm_Int: 4476 case X86::VCVTUSI642SSZrr: 4477 case X86::VCVTUSI642SSZrm: 4478 case X86::VCVTUSI642SSZrr_Int: 4479 case X86::VCVTUSI642SSZrrb_Int: 4480 case X86::VCVTUSI642SSZrm_Int: 4481 case X86::VCVTUSI2SDZrr: 4482 case X86::VCVTUSI2SDZrm: 4483 case X86::VCVTUSI2SDZrr_Int: 4484 case X86::VCVTUSI2SDZrm_Int: 4485 case X86::VCVTUSI642SDZrr: 4486 case X86::VCVTUSI642SDZrm: 4487 case X86::VCVTUSI642SDZrr_Int: 4488 case X86::VCVTUSI642SDZrrb_Int: 4489 case X86::VCVTUSI642SDZrm_Int: 4490 case X86::VCVTSD2SSZrr: 4491 case X86::VCVTSD2SSZrr_Int: 4492 case X86::VCVTSD2SSZrrb_Int: 4493 case X86::VCVTSD2SSZrm: 4494 case X86::VCVTSD2SSZrm_Int: 4495 case X86::VCVTSS2SDZrr: 4496 case X86::VCVTSS2SDZrr_Int: 4497 case X86::VCVTSS2SDZrrb_Int: 4498 case X86::VCVTSS2SDZrm: 4499 case X86::VCVTSS2SDZrm_Int: 4500 case X86::VGETEXPSDZr: 4501 case X86::VGETEXPSDZrb: 4502 case X86::VGETEXPSDZm: 4503 case X86::VGETEXPSSZr: 4504 case X86::VGETEXPSSZrb: 4505 case X86::VGETEXPSSZm: 4506 case X86::VGETMANTSDZrri: 4507 case X86::VGETMANTSDZrrib: 4508 case X86::VGETMANTSDZrmi: 4509 case X86::VGETMANTSSZrri: 4510 case X86::VGETMANTSSZrrib: 4511 case X86::VGETMANTSSZrmi: 4512 case X86::VRNDSCALESDZr: 4513 case X86::VRNDSCALESDZr_Int: 4514 case X86::VRNDSCALESDZrb_Int: 4515 case X86::VRNDSCALESDZm: 4516 case X86::VRNDSCALESDZm_Int: 4517 case X86::VRNDSCALESSZr: 4518 case X86::VRNDSCALESSZr_Int: 4519 case X86::VRNDSCALESSZrb_Int: 4520 case X86::VRNDSCALESSZm: 4521 case X86::VRNDSCALESSZm_Int: 4522 case X86::VRCP14SDZrr: 4523 case X86::VRCP14SDZrm: 4524 case X86::VRCP14SSZrr: 4525 case X86::VRCP14SSZrm: 4526 case X86::VRCP28SDZr: 4527 case X86::VRCP28SDZrb: 4528 case X86::VRCP28SDZm: 4529 case X86::VRCP28SSZr: 4530 case X86::VRCP28SSZrb: 4531 case X86::VRCP28SSZm: 4532 case X86::VREDUCESSZrmi: 4533 case X86::VREDUCESSZrri: 4534 case X86::VREDUCESSZrrib: 4535 case X86::VRSQRT14SDZrr: 4536 case X86::VRSQRT14SDZrm: 4537 case X86::VRSQRT14SSZrr: 4538 case X86::VRSQRT14SSZrm: 4539 case X86::VRSQRT28SDZr: 4540 case X86::VRSQRT28SDZrb: 4541 case X86::VRSQRT28SDZm: 4542 case X86::VRSQRT28SSZr: 4543 case X86::VRSQRT28SSZrb: 4544 case X86::VRSQRT28SSZm: 4545 case X86::VSQRTSSZr: 4546 case X86::VSQRTSSZr_Int: 4547 case X86::VSQRTSSZrb_Int: 4548 case X86::VSQRTSSZm: 4549 case X86::VSQRTSSZm_Int: 4550 case X86::VSQRTSDZr: 4551 case X86::VSQRTSDZr_Int: 4552 case X86::VSQRTSDZrb_Int: 4553 case X86::VSQRTSDZm: 4554 case X86::VSQRTSDZm_Int: 4555 return true; 4556 } 4557 4558 return false; 4559} 4560 4561/// Inform the BreakFalseDeps pass how many idle instructions we would like 4562/// before certain undef register reads. 4563/// 4564/// This catches the VCVTSI2SD family of instructions: 4565/// 4566/// vcvtsi2sdq %rax, undef %xmm0, %xmm14 4567/// 4568/// We should to be careful *not* to catch VXOR idioms which are presumably 4569/// handled specially in the pipeline: 4570/// 4571/// vxorps undef %xmm1, undef %xmm1, %xmm1 4572/// 4573/// Like getPartialRegUpdateClearance, this makes a strong assumption that the 4574/// high bits that are passed-through are not live. 4575unsigned 4576X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, 4577 const TargetRegisterInfo *TRI) const { 4578 if (!hasUndefRegUpdate(MI.getOpcode())) 4579 return 0; 4580 4581 // Set the OpNum parameter to the first source operand. 4582 OpNum = 1; 4583 4584 const MachineOperand &MO = MI.getOperand(OpNum); 4585 if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 4586 return UndefRegClearance; 4587 } 4588 return 0; 4589} 4590 4591void X86InstrInfo::breakPartialRegDependency( 4592 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 4593 unsigned Reg = MI.getOperand(OpNum).getReg(); 4594 // If MI kills this register, the false dependence is already broken. 4595 if (MI.killsRegister(Reg, TRI)) 4596 return; 4597 4598 if (X86::VR128RegClass.contains(Reg)) { 4599 // These instructions are all floating point domain, so xorps is the best 4600 // choice. 4601 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; 4602 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg) 4603 .addReg(Reg, RegState::Undef) 4604 .addReg(Reg, RegState::Undef); 4605 MI.addRegisterKilled(Reg, TRI, true); 4606 } else if (X86::VR256RegClass.contains(Reg)) { 4607 // Use vxorps to clear the full ymm register. 4608 // It wants to read and write the xmm sub-register. 4609 unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); 4610 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) 4611 .addReg(XReg, RegState::Undef) 4612 .addReg(XReg, RegState::Undef) 4613 .addReg(Reg, RegState::ImplicitDefine); 4614 MI.addRegisterKilled(Reg, TRI, true); 4615 } else if (X86::GR64RegClass.contains(Reg)) { 4616 // Using XOR32rr because it has shorter encoding and zeros up the upper bits 4617 // as well. 4618 unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit); 4619 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) 4620 .addReg(XReg, RegState::Undef) 4621 .addReg(XReg, RegState::Undef) 4622 .addReg(Reg, RegState::ImplicitDefine); 4623 MI.addRegisterKilled(Reg, TRI, true); 4624 } else if (X86::GR32RegClass.contains(Reg)) { 4625 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg) 4626 .addReg(Reg, RegState::Undef) 4627 .addReg(Reg, RegState::Undef); 4628 MI.addRegisterKilled(Reg, TRI, true); 4629 } 4630} 4631 4632static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, 4633 int PtrOffset = 0) { 4634 unsigned NumAddrOps = MOs.size(); 4635 4636 if (NumAddrOps < 4) { 4637 // FrameIndex only - add an immediate offset (whether its zero or not). 4638 for (unsigned i = 0; i != NumAddrOps; ++i) 4639 MIB.add(MOs[i]); 4640 addOffset(MIB, PtrOffset); 4641 } else { 4642 // General Memory Addressing - we need to add any offset to an existing 4643 // offset. 4644 assert(MOs.size() == 5 && "Unexpected memory operand list length"); 4645 for (unsigned i = 0; i != NumAddrOps; ++i) { 4646 const MachineOperand &MO = MOs[i]; 4647 if (i == 3 && PtrOffset != 0) { 4648 MIB.addDisp(MO, PtrOffset); 4649 } else { 4650 MIB.add(MO); 4651 } 4652 } 4653 } 4654} 4655 4656static void updateOperandRegConstraints(MachineFunction &MF, 4657 MachineInstr &NewMI, 4658 const TargetInstrInfo &TII) { 4659 MachineRegisterInfo &MRI = MF.getRegInfo(); 4660 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 4661 4662 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) { 4663 MachineOperand &MO = NewMI.getOperand(Idx); 4664 // We only need to update constraints on virtual register operands. 4665 if (!MO.isReg()) 4666 continue; 4667 unsigned Reg = MO.getReg(); 4668 if (!TRI.isVirtualRegister(Reg)) 4669 continue; 4670 4671 auto *NewRC = MRI.constrainRegClass( 4672 Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF)); 4673 if (!NewRC) { 4674 LLVM_DEBUG( 4675 dbgs() << "WARNING: Unable to update register constraint for operand " 4676 << Idx << " of instruction:\n"; 4677 NewMI.dump(); dbgs() << "\n"); 4678 } 4679 } 4680} 4681 4682static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 4683 ArrayRef<MachineOperand> MOs, 4684 MachineBasicBlock::iterator InsertPt, 4685 MachineInstr &MI, 4686 const TargetInstrInfo &TII) { 4687 // Create the base instruction with the memory operand as the first part. 4688 // Omit the implicit operands, something BuildMI can't do. 4689 MachineInstr *NewMI = 4690 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); 4691 MachineInstrBuilder MIB(MF, NewMI); 4692 addOperands(MIB, MOs); 4693 4694 // Loop over the rest of the ri operands, converting them over. 4695 unsigned NumOps = MI.getDesc().getNumOperands() - 2; 4696 for (unsigned i = 0; i != NumOps; ++i) { 4697 MachineOperand &MO = MI.getOperand(i + 2); 4698 MIB.add(MO); 4699 } 4700 for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { 4701 MachineOperand &MO = MI.getOperand(i); 4702 MIB.add(MO); 4703 } 4704 4705 updateOperandRegConstraints(MF, *NewMI, TII); 4706 4707 MachineBasicBlock *MBB = InsertPt->getParent(); 4708 MBB->insert(InsertPt, NewMI); 4709 4710 return MIB; 4711} 4712 4713static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, 4714 unsigned OpNo, ArrayRef<MachineOperand> MOs, 4715 MachineBasicBlock::iterator InsertPt, 4716 MachineInstr &MI, const TargetInstrInfo &TII, 4717 int PtrOffset = 0) { 4718 // Omit the implicit operands, something BuildMI can't do. 4719 MachineInstr *NewMI = 4720 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); 4721 MachineInstrBuilder MIB(MF, NewMI); 4722 4723 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4724 MachineOperand &MO = MI.getOperand(i); 4725 if (i == OpNo) { 4726 assert(MO.isReg() && "Expected to fold into reg operand!"); 4727 addOperands(MIB, MOs, PtrOffset); 4728 } else { 4729 MIB.add(MO); 4730 } 4731 } 4732 4733 updateOperandRegConstraints(MF, *NewMI, TII); 4734 4735 MachineBasicBlock *MBB = InsertPt->getParent(); 4736 MBB->insert(InsertPt, NewMI); 4737 4738 return MIB; 4739} 4740 4741static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 4742 ArrayRef<MachineOperand> MOs, 4743 MachineBasicBlock::iterator InsertPt, 4744 MachineInstr &MI) { 4745 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, 4746 MI.getDebugLoc(), TII.get(Opcode)); 4747 addOperands(MIB, MOs); 4748 return MIB.addImm(0); 4749} 4750 4751MachineInstr *X86InstrInfo::foldMemoryOperandCustom( 4752 MachineFunction &MF, MachineInstr &MI, unsigned OpNum, 4753 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 4754 unsigned Size, unsigned Align) const { 4755 switch (MI.getOpcode()) { 4756 case X86::INSERTPSrr: 4757 case X86::VINSERTPSrr: 4758 case X86::VINSERTPSZrr: 4759 // Attempt to convert the load of inserted vector into a fold load 4760 // of a single float. 4761 if (OpNum == 2) { 4762 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); 4763 unsigned ZMask = Imm & 15; 4764 unsigned DstIdx = (Imm >> 4) & 3; 4765 unsigned SrcIdx = (Imm >> 6) & 3; 4766 4767 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4768 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); 4769 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4770 if (Size <= RCSize && 4 <= Align) { 4771 int PtrOffset = SrcIdx * 4; 4772 unsigned NewImm = (DstIdx << 4) | ZMask; 4773 unsigned NewOpCode = 4774 (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : 4775 (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : 4776 X86::INSERTPSrm; 4777 MachineInstr *NewMI = 4778 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); 4779 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); 4780 return NewMI; 4781 } 4782 } 4783 break; 4784 case X86::MOVHLPSrr: 4785 case X86::VMOVHLPSrr: 4786 case X86::VMOVHLPSZrr: 4787 // Move the upper 64-bits of the second operand to the lower 64-bits. 4788 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. 4789 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. 4790 if (OpNum == 2) { 4791 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4792 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); 4793 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4794 if (Size <= RCSize && 8 <= Align) { 4795 unsigned NewOpCode = 4796 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : 4797 (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : 4798 X86::MOVLPSrm; 4799 MachineInstr *NewMI = 4800 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); 4801 return NewMI; 4802 } 4803 } 4804 break; 4805 }; 4806 4807 return nullptr; 4808} 4809 4810static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI) { 4811 if (MF.getFunction().optForSize() || !hasUndefRegUpdate(MI.getOpcode()) || 4812 !MI.getOperand(1).isReg()) 4813 return false; 4814 4815 // The are two cases we need to handle depending on where in the pipeline 4816 // the folding attempt is being made. 4817 // -Register has the undef flag set. 4818 // -Register is produced by the IMPLICIT_DEF instruction. 4819 4820 if (MI.getOperand(1).isUndef()) 4821 return true; 4822 4823 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4824 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg()); 4825 return VRegDef && VRegDef->isImplicitDef(); 4826} 4827 4828 4829MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 4830 MachineFunction &MF, MachineInstr &MI, unsigned OpNum, 4831 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 4832 unsigned Size, unsigned Align, bool AllowCommute) const { 4833 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); 4834 bool isTwoAddrFold = false; 4835 4836 // For CPUs that favor the register form of a call or push, 4837 // do not fold loads into calls or pushes, unless optimizing for size 4838 // aggressively. 4839 if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() && 4840 (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || 4841 MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || 4842 MI.getOpcode() == X86::PUSH64r)) 4843 return nullptr; 4844 4845 // Avoid partial and undef register update stalls unless optimizing for size. 4846 if (!MF.getFunction().optForSize() && 4847 (hasPartialRegUpdate(MI.getOpcode(), Subtarget) || 4848 shouldPreventUndefRegUpdateMemFold(MF, MI))) 4849 return nullptr; 4850 4851 unsigned NumOps = MI.getDesc().getNumOperands(); 4852 bool isTwoAddr = 4853 NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 4854 4855 // FIXME: AsmPrinter doesn't know how to handle 4856 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 4857 if (MI.getOpcode() == X86::ADD32ri && 4858 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 4859 return nullptr; 4860 4861 // GOTTPOFF relocation loads can only be folded into add instructions. 4862 // FIXME: Need to exclude other relocations that only support specific 4863 // instructions. 4864 if (MOs.size() == X86::AddrNumOperands && 4865 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && 4866 MI.getOpcode() != X86::ADD64rr) 4867 return nullptr; 4868 4869 MachineInstr *NewMI = nullptr; 4870 4871 // Attempt to fold any custom cases we have. 4872 if (MachineInstr *CustomMI = 4873 foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align)) 4874 return CustomMI; 4875 4876 const X86MemoryFoldTableEntry *I = nullptr; 4877 4878 // Folding a memory location into the two-address part of a two-address 4879 // instruction is different than folding it other places. It requires 4880 // replacing the *two* registers with the memory location. 4881 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() && 4882 MI.getOperand(1).isReg() && 4883 MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { 4884 I = lookupTwoAddrFoldTable(MI.getOpcode()); 4885 isTwoAddrFold = true; 4886 } else { 4887 if (OpNum == 0) { 4888 if (MI.getOpcode() == X86::MOV32r0) { 4889 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); 4890 if (NewMI) 4891 return NewMI; 4892 } 4893 } 4894 4895 I = lookupFoldTable(MI.getOpcode(), OpNum); 4896 } 4897 4898 if (I != nullptr) { 4899 unsigned Opcode = I->DstOp; 4900 unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; 4901 if (Align < MinAlign) 4902 return nullptr; 4903 bool NarrowToMOV32rm = false; 4904 if (Size) { 4905 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4906 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, 4907 &RI, MF); 4908 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4909 if (Size < RCSize) { 4910 // Check if it's safe to fold the load. If the size of the object is 4911 // narrower than the load width, then it's not. 4912 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) 4913 return nullptr; 4914 // If this is a 64-bit load, but the spill slot is 32, then we can do 4915 // a 32-bit load which is implicitly zero-extended. This likely is 4916 // due to live interval analysis remat'ing a load from stack slot. 4917 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 4918 return nullptr; 4919 Opcode = X86::MOV32rm; 4920 NarrowToMOV32rm = true; 4921 } 4922 } 4923 4924 if (isTwoAddrFold) 4925 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); 4926 else 4927 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); 4928 4929 if (NarrowToMOV32rm) { 4930 // If this is the special case where we use a MOV32rm to load a 32-bit 4931 // value and zero-extend the top bits. Change the destination register 4932 // to a 32-bit one. 4933 unsigned DstReg = NewMI->getOperand(0).getReg(); 4934 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 4935 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); 4936 else 4937 NewMI->getOperand(0).setSubReg(X86::sub_32bit); 4938 } 4939 return NewMI; 4940 } 4941 4942 // If the instruction and target operand are commutable, commute the 4943 // instruction and try again. 4944 if (AllowCommute) { 4945 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; 4946 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { 4947 bool HasDef = MI.getDesc().getNumDefs(); 4948 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; 4949 unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); 4950 unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); 4951 bool Tied1 = 4952 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); 4953 bool Tied2 = 4954 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); 4955 4956 // If either of the commutable operands are tied to the destination 4957 // then we can not commute + fold. 4958 if ((HasDef && Reg0 == Reg1 && Tied1) || 4959 (HasDef && Reg0 == Reg2 && Tied2)) 4960 return nullptr; 4961 4962 MachineInstr *CommutedMI = 4963 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 4964 if (!CommutedMI) { 4965 // Unable to commute. 4966 return nullptr; 4967 } 4968 if (CommutedMI != &MI) { 4969 // New instruction. We can't fold from this. 4970 CommutedMI->eraseFromParent(); 4971 return nullptr; 4972 } 4973 4974 // Attempt to fold with the commuted version of the instruction. 4975 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, 4976 Size, Align, /*AllowCommute=*/false); 4977 if (NewMI) 4978 return NewMI; 4979 4980 // Folding failed again - undo the commute before returning. 4981 MachineInstr *UncommutedMI = 4982 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 4983 if (!UncommutedMI) { 4984 // Unable to commute. 4985 return nullptr; 4986 } 4987 if (UncommutedMI != &MI) { 4988 // New instruction. It doesn't need to be kept. 4989 UncommutedMI->eraseFromParent(); 4990 return nullptr; 4991 } 4992 4993 // Return here to prevent duplicate fuse failure report. 4994 return nullptr; 4995 } 4996 } 4997 4998 // No fusion 4999 if (PrintFailedFusing && !MI.isCopy()) 5000 dbgs() << "We failed to fuse operand " << OpNum << " in " << MI; 5001 return nullptr; 5002} 5003 5004MachineInstr * 5005X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 5006 ArrayRef<unsigned> Ops, 5007 MachineBasicBlock::iterator InsertPt, 5008 int FrameIndex, LiveIntervals *LIS) const { 5009 // Check switch flag 5010 if (NoFusing) 5011 return nullptr; 5012 5013 // Avoid partial and undef register update stalls unless optimizing for size. 5014 if (!MF.getFunction().optForSize() && 5015 (hasPartialRegUpdate(MI.getOpcode(), Subtarget) || 5016 shouldPreventUndefRegUpdateMemFold(MF, MI))) 5017 return nullptr; 5018 5019 // Don't fold subreg spills, or reloads that use a high subreg. 5020 for (auto Op : Ops) { 5021 MachineOperand &MO = MI.getOperand(Op); 5022 auto SubReg = MO.getSubReg(); 5023 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi)) 5024 return nullptr; 5025 } 5026 5027 const MachineFrameInfo &MFI = MF.getFrameInfo(); 5028 unsigned Size = MFI.getObjectSize(FrameIndex); 5029 unsigned Alignment = MFI.getObjectAlignment(FrameIndex); 5030 // If the function stack isn't realigned we don't want to fold instructions 5031 // that need increased alignment. 5032 if (!RI.needsStackRealignment(MF)) 5033 Alignment = 5034 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); 5035 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5036 unsigned NewOpc = 0; 5037 unsigned RCSize = 0; 5038 switch (MI.getOpcode()) { 5039 default: return nullptr; 5040 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; 5041 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; 5042 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; 5043 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; 5044 } 5045 // Check if it's safe to fold the load. If the size of the object is 5046 // narrower than the load width, then it's not. 5047 if (Size < RCSize) 5048 return nullptr; 5049 // Change to CMPXXri r, 0 first. 5050 MI.setDesc(get(NewOpc)); 5051 MI.getOperand(1).ChangeToImmediate(0); 5052 } else if (Ops.size() != 1) 5053 return nullptr; 5054 5055 return foldMemoryOperandImpl(MF, MI, Ops[0], 5056 MachineOperand::CreateFI(FrameIndex), InsertPt, 5057 Size, Alignment, /*AllowCommute=*/true); 5058} 5059 5060/// Check if \p LoadMI is a partial register load that we can't fold into \p MI 5061/// because the latter uses contents that wouldn't be defined in the folded 5062/// version. For instance, this transformation isn't legal: 5063/// movss (%rdi), %xmm0 5064/// addps %xmm0, %xmm0 5065/// -> 5066/// addps (%rdi), %xmm0 5067/// 5068/// But this one is: 5069/// movss (%rdi), %xmm0 5070/// addss %xmm0, %xmm0 5071/// -> 5072/// addss (%rdi), %xmm0 5073/// 5074static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, 5075 const MachineInstr &UserMI, 5076 const MachineFunction &MF) { 5077 unsigned Opc = LoadMI.getOpcode(); 5078 unsigned UserOpc = UserMI.getOpcode(); 5079 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 5080 const TargetRegisterClass *RC = 5081 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); 5082 unsigned RegSize = TRI.getRegSizeInBits(*RC); 5083 5084 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) && 5085 RegSize > 32) { 5086 // These instructions only load 32 bits, we can't fold them if the 5087 // destination register is wider than 32 bits (4 bytes), and its user 5088 // instruction isn't scalar (SS). 5089 switch (UserOpc) { 5090 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int: 5091 case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int: 5092 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int: 5093 case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int: 5094 case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int: 5095 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int: 5096 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int: 5097 case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz: 5098 case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz: 5099 case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz: 5100 case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz: 5101 case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz: 5102 case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz: 5103 case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int: 5104 case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int: 5105 case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int: 5106 case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int: 5107 case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int: 5108 case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int: 5109 case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int: 5110 case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int: 5111 case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int: 5112 case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int: 5113 case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int: 5114 case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int: 5115 case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int: 5116 case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int: 5117 case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk: 5118 case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk: 5119 case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk: 5120 case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk: 5121 case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk: 5122 case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk: 5123 case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz: 5124 case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz: 5125 case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz: 5126 case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz: 5127 case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz: 5128 case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz: 5129 return false; 5130 default: 5131 return true; 5132 } 5133 } 5134 5135 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) && 5136 RegSize > 64) { 5137 // These instructions only load 64 bits, we can't fold them if the 5138 // destination register is wider than 64 bits (8 bytes), and its user 5139 // instruction isn't scalar (SD). 5140 switch (UserOpc) { 5141 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int: 5142 case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int: 5143 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int: 5144 case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int: 5145 case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int: 5146 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int: 5147 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int: 5148 case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz: 5149 case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz: 5150 case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz: 5151 case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz: 5152 case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz: 5153 case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz: 5154 case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int: 5155 case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int: 5156 case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int: 5157 case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int: 5158 case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int: 5159 case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int: 5160 case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int: 5161 case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int: 5162 case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int: 5163 case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int: 5164 case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int: 5165 case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int: 5166 case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int: 5167 case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int: 5168 case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk: 5169 case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk: 5170 case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk: 5171 case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk: 5172 case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk: 5173 case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk: 5174 case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz: 5175 case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz: 5176 case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz: 5177 case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz: 5178 case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz: 5179 case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz: 5180 return false; 5181 default: 5182 return true; 5183 } 5184 } 5185 5186 return false; 5187} 5188 5189MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 5190 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 5191 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 5192 LiveIntervals *LIS) const { 5193 5194 // TODO: Support the case where LoadMI loads a wide register, but MI 5195 // only uses a subreg. 5196 for (auto Op : Ops) { 5197 if (MI.getOperand(Op).getSubReg()) 5198 return nullptr; 5199 } 5200 5201 // If loading from a FrameIndex, fold directly from the FrameIndex. 5202 unsigned NumOps = LoadMI.getDesc().getNumOperands(); 5203 int FrameIndex; 5204 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { 5205 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) 5206 return nullptr; 5207 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); 5208 } 5209 5210 // Check switch flag 5211 if (NoFusing) return nullptr; 5212 5213 // Avoid partial and undef register update stalls unless optimizing for size. 5214 if (!MF.getFunction().optForSize() && 5215 (hasPartialRegUpdate(MI.getOpcode(), Subtarget) || 5216 shouldPreventUndefRegUpdateMemFold(MF, MI))) 5217 return nullptr; 5218 5219 // Determine the alignment of the load. 5220 unsigned Alignment = 0; 5221 if (LoadMI.hasOneMemOperand()) 5222 Alignment = (*LoadMI.memoperands_begin())->getAlignment(); 5223 else 5224 switch (LoadMI.getOpcode()) { 5225 case X86::AVX512_512_SET0: 5226 case X86::AVX512_512_SETALLONES: 5227 Alignment = 64; 5228 break; 5229 case X86::AVX2_SETALLONES: 5230 case X86::AVX1_SETALLONES: 5231 case X86::AVX_SET0: 5232 case X86::AVX512_256_SET0: 5233 Alignment = 32; 5234 break; 5235 case X86::V_SET0: 5236 case X86::V_SETALLONES: 5237 case X86::AVX512_128_SET0: 5238 Alignment = 16; 5239 break; 5240 case X86::MMX_SET0: 5241 case X86::FsFLD0SD: 5242 case X86::AVX512_FsFLD0SD: 5243 Alignment = 8; 5244 break; 5245 case X86::FsFLD0SS: 5246 case X86::AVX512_FsFLD0SS: 5247 Alignment = 4; 5248 break; 5249 default: 5250 return nullptr; 5251 } 5252 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5253 unsigned NewOpc = 0; 5254 switch (MI.getOpcode()) { 5255 default: return nullptr; 5256 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 5257 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; 5258 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; 5259 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; 5260 } 5261 // Change to CMPXXri r, 0 first. 5262 MI.setDesc(get(NewOpc)); 5263 MI.getOperand(1).ChangeToImmediate(0); 5264 } else if (Ops.size() != 1) 5265 return nullptr; 5266 5267 // Make sure the subregisters match. 5268 // Otherwise we risk changing the size of the load. 5269 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg()) 5270 return nullptr; 5271 5272 SmallVector<MachineOperand,X86::AddrNumOperands> MOs; 5273 switch (LoadMI.getOpcode()) { 5274 case X86::MMX_SET0: 5275 case X86::V_SET0: 5276 case X86::V_SETALLONES: 5277 case X86::AVX2_SETALLONES: 5278 case X86::AVX1_SETALLONES: 5279 case X86::AVX_SET0: 5280 case X86::AVX512_128_SET0: 5281 case X86::AVX512_256_SET0: 5282 case X86::AVX512_512_SET0: 5283 case X86::AVX512_512_SETALLONES: 5284 case X86::FsFLD0SD: 5285 case X86::AVX512_FsFLD0SD: 5286 case X86::FsFLD0SS: 5287 case X86::AVX512_FsFLD0SS: { 5288 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. 5289 // Create a constant-pool entry and operands to load from it. 5290 5291 // Medium and large mode can't fold loads this way. 5292 if (MF.getTarget().getCodeModel() != CodeModel::Small && 5293 MF.getTarget().getCodeModel() != CodeModel::Kernel) 5294 return nullptr; 5295 5296 // x86-32 PIC requires a PIC base register for constant pools. 5297 unsigned PICBase = 0; 5298 if (MF.getTarget().isPositionIndependent()) { 5299 if (Subtarget.is64Bit()) 5300 PICBase = X86::RIP; 5301 else 5302 // FIXME: PICBase = getGlobalBaseReg(&MF); 5303 // This doesn't work for several reasons. 5304 // 1. GlobalBaseReg may have been spilled. 5305 // 2. It may not be live at MI. 5306 return nullptr; 5307 } 5308 5309 // Create a constant-pool entry. 5310 MachineConstantPool &MCP = *MF.getConstantPool(); 5311 Type *Ty; 5312 unsigned Opc = LoadMI.getOpcode(); 5313 if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) 5314 Ty = Type::getFloatTy(MF.getFunction().getContext()); 5315 else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) 5316 Ty = Type::getDoubleTy(MF.getFunction().getContext()); 5317 else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) 5318 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16); 5319 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || 5320 Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) 5321 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8); 5322 else if (Opc == X86::MMX_SET0) 5323 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2); 5324 else 5325 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4); 5326 5327 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || 5328 Opc == X86::AVX512_512_SETALLONES || 5329 Opc == X86::AVX1_SETALLONES); 5330 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : 5331 Constant::getNullValue(Ty); 5332 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); 5333 5334 // Create operands to load from the constant pool entry. 5335 MOs.push_back(MachineOperand::CreateReg(PICBase, false)); 5336 MOs.push_back(MachineOperand::CreateImm(1)); 5337 MOs.push_back(MachineOperand::CreateReg(0, false)); 5338 MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); 5339 MOs.push_back(MachineOperand::CreateReg(0, false)); 5340 break; 5341 } 5342 default: { 5343 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) 5344 return nullptr; 5345 5346 // Folding a normal load. Just copy the load's address operands. 5347 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, 5348 LoadMI.operands_begin() + NumOps); 5349 break; 5350 } 5351 } 5352 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, 5353 /*Size=*/0, Alignment, /*AllowCommute=*/true); 5354} 5355 5356bool X86InstrInfo::unfoldMemoryOperand( 5357 MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, 5358 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { 5359 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode()); 5360 if (I == nullptr) 5361 return false; 5362 unsigned Opc = I->DstOp; 5363 unsigned Index = I->Flags & TB_INDEX_MASK; 5364 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5365 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5366 if (UnfoldLoad && !FoldedLoad) 5367 return false; 5368 UnfoldLoad &= FoldedLoad; 5369 if (UnfoldStore && !FoldedStore) 5370 return false; 5371 UnfoldStore &= FoldedStore; 5372 5373 const MCInstrDesc &MCID = get(Opc); 5374 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5375 // TODO: Check if 32-byte or greater accesses are slow too? 5376 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass && 5377 Subtarget.isUnalignedMem16Slow()) 5378 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will 5379 // conservatively assume the address is unaligned. That's bad for 5380 // performance. 5381 return false; 5382 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; 5383 SmallVector<MachineOperand,2> BeforeOps; 5384 SmallVector<MachineOperand,2> AfterOps; 5385 SmallVector<MachineOperand,4> ImpOps; 5386 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 5387 MachineOperand &Op = MI.getOperand(i); 5388 if (i >= Index && i < Index + X86::AddrNumOperands) 5389 AddrOps.push_back(Op); 5390 else if (Op.isReg() && Op.isImplicit()) 5391 ImpOps.push_back(Op); 5392 else if (i < Index) 5393 BeforeOps.push_back(Op); 5394 else if (i > Index) 5395 AfterOps.push_back(Op); 5396 } 5397 5398 // Emit the load instruction. 5399 if (UnfoldLoad) { 5400 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs = 5401 MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 5402 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs); 5403 if (UnfoldStore) { 5404 // Address operands cannot be marked isKill. 5405 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { 5406 MachineOperand &MO = NewMIs[0]->getOperand(i); 5407 if (MO.isReg()) 5408 MO.setIsKill(false); 5409 } 5410 } 5411 } 5412 5413 // Emit the data processing instruction. 5414 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true); 5415 MachineInstrBuilder MIB(MF, DataMI); 5416 5417 if (FoldedStore) 5418 MIB.addReg(Reg, RegState::Define); 5419 for (MachineOperand &BeforeOp : BeforeOps) 5420 MIB.add(BeforeOp); 5421 if (FoldedLoad) 5422 MIB.addReg(Reg); 5423 for (MachineOperand &AfterOp : AfterOps) 5424 MIB.add(AfterOp); 5425 for (MachineOperand &ImpOp : ImpOps) { 5426 MIB.addReg(ImpOp.getReg(), 5427 getDefRegState(ImpOp.isDef()) | 5428 RegState::Implicit | 5429 getKillRegState(ImpOp.isKill()) | 5430 getDeadRegState(ImpOp.isDead()) | 5431 getUndefRegState(ImpOp.isUndef())); 5432 } 5433 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 5434 switch (DataMI->getOpcode()) { 5435 default: break; 5436 case X86::CMP64ri32: 5437 case X86::CMP64ri8: 5438 case X86::CMP32ri: 5439 case X86::CMP32ri8: 5440 case X86::CMP16ri: 5441 case X86::CMP16ri8: 5442 case X86::CMP8ri: { 5443 MachineOperand &MO0 = DataMI->getOperand(0); 5444 MachineOperand &MO1 = DataMI->getOperand(1); 5445 if (MO1.getImm() == 0) { 5446 unsigned NewOpc; 5447 switch (DataMI->getOpcode()) { 5448 default: llvm_unreachable("Unreachable!"); 5449 case X86::CMP64ri8: 5450 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 5451 case X86::CMP32ri8: 5452 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 5453 case X86::CMP16ri8: 5454 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 5455 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 5456 } 5457 DataMI->setDesc(get(NewOpc)); 5458 MO1.ChangeToRegister(MO0.getReg(), false); 5459 } 5460 } 5461 } 5462 NewMIs.push_back(DataMI); 5463 5464 // Emit the store instruction. 5465 if (UnfoldStore) { 5466 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); 5467 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs = 5468 MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 5469 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs); 5470 } 5471 5472 return true; 5473} 5474 5475bool 5476X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 5477 SmallVectorImpl<SDNode*> &NewNodes) const { 5478 if (!N->isMachineOpcode()) 5479 return false; 5480 5481 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode()); 5482 if (I == nullptr) 5483 return false; 5484 unsigned Opc = I->DstOp; 5485 unsigned Index = I->Flags & TB_INDEX_MASK; 5486 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5487 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5488 const MCInstrDesc &MCID = get(Opc); 5489 MachineFunction &MF = DAG.getMachineFunction(); 5490 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 5491 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5492 unsigned NumDefs = MCID.NumDefs; 5493 std::vector<SDValue> AddrOps; 5494 std::vector<SDValue> BeforeOps; 5495 std::vector<SDValue> AfterOps; 5496 SDLoc dl(N); 5497 unsigned NumOps = N->getNumOperands(); 5498 for (unsigned i = 0; i != NumOps-1; ++i) { 5499 SDValue Op = N->getOperand(i); 5500 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) 5501 AddrOps.push_back(Op); 5502 else if (i < Index-NumDefs) 5503 BeforeOps.push_back(Op); 5504 else if (i > Index-NumDefs) 5505 AfterOps.push_back(Op); 5506 } 5507 SDValue Chain = N->getOperand(NumOps-1); 5508 AddrOps.push_back(Chain); 5509 5510 // Emit the load instruction. 5511 SDNode *Load = nullptr; 5512 if (FoldedLoad) { 5513 EVT VT = *TRI.legalclasstypes_begin(*RC); 5514 std::pair<MachineInstr::mmo_iterator, 5515 MachineInstr::mmo_iterator> MMOs = 5516 MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 5517 cast<MachineSDNode>(N)->memoperands_end()); 5518 if (!(*MMOs.first) && 5519 RC == &X86::VR128RegClass && 5520 Subtarget.isUnalignedMem16Slow()) 5521 // Do not introduce a slow unaligned load. 5522 return false; 5523 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 5524 // memory access is slow above. 5525 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 5526 bool isAligned = (*MMOs.first) && 5527 (*MMOs.first)->getAlignment() >= Alignment; 5528 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, 5529 VT, MVT::Other, AddrOps); 5530 NewNodes.push_back(Load); 5531 5532 // Preserve memory reference information. 5533 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second); 5534 } 5535 5536 // Emit the data processing instruction. 5537 std::vector<EVT> VTs; 5538 const TargetRegisterClass *DstRC = nullptr; 5539 if (MCID.getNumDefs() > 0) { 5540 DstRC = getRegClass(MCID, 0, &RI, MF); 5541 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC)); 5542 } 5543 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 5544 EVT VT = N->getValueType(i); 5545 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) 5546 VTs.push_back(VT); 5547 } 5548 if (Load) 5549 BeforeOps.push_back(SDValue(Load, 0)); 5550 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end()); 5551 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 5552 switch (Opc) { 5553 default: break; 5554 case X86::CMP64ri32: 5555 case X86::CMP64ri8: 5556 case X86::CMP32ri: 5557 case X86::CMP32ri8: 5558 case X86::CMP16ri: 5559 case X86::CMP16ri8: 5560 case X86::CMP8ri: 5561 if (isNullConstant(BeforeOps[1])) { 5562 switch (Opc) { 5563 default: llvm_unreachable("Unreachable!"); 5564 case X86::CMP64ri8: 5565 case X86::CMP64ri32: Opc = X86::TEST64rr; break; 5566 case X86::CMP32ri8: 5567 case X86::CMP32ri: Opc = X86::TEST32rr; break; 5568 case X86::CMP16ri8: 5569 case X86::CMP16ri: Opc = X86::TEST16rr; break; 5570 case X86::CMP8ri: Opc = X86::TEST8rr; break; 5571 } 5572 BeforeOps[1] = BeforeOps[0]; 5573 } 5574 } 5575 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); 5576 NewNodes.push_back(NewNode); 5577 5578 // Emit the store instruction. 5579 if (FoldedStore) { 5580 AddrOps.pop_back(); 5581 AddrOps.push_back(SDValue(NewNode, 0)); 5582 AddrOps.push_back(Chain); 5583 std::pair<MachineInstr::mmo_iterator, 5584 MachineInstr::mmo_iterator> MMOs = 5585 MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 5586 cast<MachineSDNode>(N)->memoperands_end()); 5587 if (!(*MMOs.first) && 5588 RC == &X86::VR128RegClass && 5589 Subtarget.isUnalignedMem16Slow()) 5590 // Do not introduce a slow unaligned store. 5591 return false; 5592 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 5593 // memory access is slow above. 5594 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 5595 bool isAligned = (*MMOs.first) && 5596 (*MMOs.first)->getAlignment() >= Alignment; 5597 SDNode *Store = 5598 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), 5599 dl, MVT::Other, AddrOps); 5600 NewNodes.push_back(Store); 5601 5602 // Preserve memory reference information. 5603 cast<MachineSDNode>(Store)->setMemRefs(MMOs.first, MMOs.second); 5604 } 5605 5606 return true; 5607} 5608 5609unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 5610 bool UnfoldLoad, bool UnfoldStore, 5611 unsigned *LoadRegIndex) const { 5612 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc); 5613 if (I == nullptr) 5614 return 0; 5615 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5616 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5617 if (UnfoldLoad && !FoldedLoad) 5618 return 0; 5619 if (UnfoldStore && !FoldedStore) 5620 return 0; 5621 if (LoadRegIndex) 5622 *LoadRegIndex = I->Flags & TB_INDEX_MASK; 5623 return I->DstOp; 5624} 5625 5626bool 5627X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 5628 int64_t &Offset1, int64_t &Offset2) const { 5629 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 5630 return false; 5631 unsigned Opc1 = Load1->getMachineOpcode(); 5632 unsigned Opc2 = Load2->getMachineOpcode(); 5633 switch (Opc1) { 5634 default: return false; 5635 case X86::MOV8rm: 5636 case X86::MOV16rm: 5637 case X86::MOV32rm: 5638 case X86::MOV64rm: 5639 case X86::LD_Fp32m: 5640 case X86::LD_Fp64m: 5641 case X86::LD_Fp80m: 5642 case X86::MOVSSrm: 5643 case X86::MOVSDrm: 5644 case X86::MMX_MOVD64rm: 5645 case X86::MMX_MOVQ64rm: 5646 case X86::MOVAPSrm: 5647 case X86::MOVUPSrm: 5648 case X86::MOVAPDrm: 5649 case X86::MOVUPDrm: 5650 case X86::MOVDQArm: 5651 case X86::MOVDQUrm: 5652 // AVX load instructions 5653 case X86::VMOVSSrm: 5654 case X86::VMOVSDrm: 5655 case X86::VMOVAPSrm: 5656 case X86::VMOVUPSrm: 5657 case X86::VMOVAPDrm: 5658 case X86::VMOVUPDrm: 5659 case X86::VMOVDQArm: 5660 case X86::VMOVDQUrm: 5661 case X86::VMOVAPSYrm: 5662 case X86::VMOVUPSYrm: 5663 case X86::VMOVAPDYrm: 5664 case X86::VMOVUPDYrm: 5665 case X86::VMOVDQAYrm: 5666 case X86::VMOVDQUYrm: 5667 // AVX512 load instructions 5668 case X86::VMOVSSZrm: 5669 case X86::VMOVSDZrm: 5670 case X86::VMOVAPSZ128rm: 5671 case X86::VMOVUPSZ128rm: 5672 case X86::VMOVAPSZ128rm_NOVLX: 5673 case X86::VMOVUPSZ128rm_NOVLX: 5674 case X86::VMOVAPDZ128rm: 5675 case X86::VMOVUPDZ128rm: 5676 case X86::VMOVDQU8Z128rm: 5677 case X86::VMOVDQU16Z128rm: 5678 case X86::VMOVDQA32Z128rm: 5679 case X86::VMOVDQU32Z128rm: 5680 case X86::VMOVDQA64Z128rm: 5681 case X86::VMOVDQU64Z128rm: 5682 case X86::VMOVAPSZ256rm: 5683 case X86::VMOVUPSZ256rm: 5684 case X86::VMOVAPSZ256rm_NOVLX: 5685 case X86::VMOVUPSZ256rm_NOVLX: 5686 case X86::VMOVAPDZ256rm: 5687 case X86::VMOVUPDZ256rm: 5688 case X86::VMOVDQU8Z256rm: 5689 case X86::VMOVDQU16Z256rm: 5690 case X86::VMOVDQA32Z256rm: 5691 case X86::VMOVDQU32Z256rm: 5692 case X86::VMOVDQA64Z256rm: 5693 case X86::VMOVDQU64Z256rm: 5694 case X86::VMOVAPSZrm: 5695 case X86::VMOVUPSZrm: 5696 case X86::VMOVAPDZrm: 5697 case X86::VMOVUPDZrm: 5698 case X86::VMOVDQU8Zrm: 5699 case X86::VMOVDQU16Zrm: 5700 case X86::VMOVDQA32Zrm: 5701 case X86::VMOVDQU32Zrm: 5702 case X86::VMOVDQA64Zrm: 5703 case X86::VMOVDQU64Zrm: 5704 case X86::KMOVBkm: 5705 case X86::KMOVWkm: 5706 case X86::KMOVDkm: 5707 case X86::KMOVQkm: 5708 break; 5709 } 5710 switch (Opc2) { 5711 default: return false; 5712 case X86::MOV8rm: 5713 case X86::MOV16rm: 5714 case X86::MOV32rm: 5715 case X86::MOV64rm: 5716 case X86::LD_Fp32m: 5717 case X86::LD_Fp64m: 5718 case X86::LD_Fp80m: 5719 case X86::MOVSSrm: 5720 case X86::MOVSDrm: 5721 case X86::MMX_MOVD64rm: 5722 case X86::MMX_MOVQ64rm: 5723 case X86::MOVAPSrm: 5724 case X86::MOVUPSrm: 5725 case X86::MOVAPDrm: 5726 case X86::MOVUPDrm: 5727 case X86::MOVDQArm: 5728 case X86::MOVDQUrm: 5729 // AVX load instructions 5730 case X86::VMOVSSrm: 5731 case X86::VMOVSDrm: 5732 case X86::VMOVAPSrm: 5733 case X86::VMOVUPSrm: 5734 case X86::VMOVAPDrm: 5735 case X86::VMOVUPDrm: 5736 case X86::VMOVDQArm: 5737 case X86::VMOVDQUrm: 5738 case X86::VMOVAPSYrm: 5739 case X86::VMOVUPSYrm: 5740 case X86::VMOVAPDYrm: 5741 case X86::VMOVUPDYrm: 5742 case X86::VMOVDQAYrm: 5743 case X86::VMOVDQUYrm: 5744 // AVX512 load instructions 5745 case X86::VMOVSSZrm: 5746 case X86::VMOVSDZrm: 5747 case X86::VMOVAPSZ128rm: 5748 case X86::VMOVUPSZ128rm: 5749 case X86::VMOVAPSZ128rm_NOVLX: 5750 case X86::VMOVUPSZ128rm_NOVLX: 5751 case X86::VMOVAPDZ128rm: 5752 case X86::VMOVUPDZ128rm: 5753 case X86::VMOVDQU8Z128rm: 5754 case X86::VMOVDQU16Z128rm: 5755 case X86::VMOVDQA32Z128rm: 5756 case X86::VMOVDQU32Z128rm: 5757 case X86::VMOVDQA64Z128rm: 5758 case X86::VMOVDQU64Z128rm: 5759 case X86::VMOVAPSZ256rm: 5760 case X86::VMOVUPSZ256rm: 5761 case X86::VMOVAPSZ256rm_NOVLX: 5762 case X86::VMOVUPSZ256rm_NOVLX: 5763 case X86::VMOVAPDZ256rm: 5764 case X86::VMOVUPDZ256rm: 5765 case X86::VMOVDQU8Z256rm: 5766 case X86::VMOVDQU16Z256rm: 5767 case X86::VMOVDQA32Z256rm: 5768 case X86::VMOVDQU32Z256rm: 5769 case X86::VMOVDQA64Z256rm: 5770 case X86::VMOVDQU64Z256rm: 5771 case X86::VMOVAPSZrm: 5772 case X86::VMOVUPSZrm: 5773 case X86::VMOVAPDZrm: 5774 case X86::VMOVUPDZrm: 5775 case X86::VMOVDQU8Zrm: 5776 case X86::VMOVDQU16Zrm: 5777 case X86::VMOVDQA32Zrm: 5778 case X86::VMOVDQU32Zrm: 5779 case X86::VMOVDQA64Zrm: 5780 case X86::VMOVDQU64Zrm: 5781 case X86::KMOVBkm: 5782 case X86::KMOVWkm: 5783 case X86::KMOVDkm: 5784 case X86::KMOVQkm: 5785 break; 5786 } 5787 5788 // Lambda to check if both the loads have the same value for an operand index. 5789 auto HasSameOp = [&](int I) { 5790 return Load1->getOperand(I) == Load2->getOperand(I); 5791 }; 5792 5793 // All operands except the displacement should match. 5794 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) || 5795 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg)) 5796 return false; 5797 5798 // Chain Operand must be the same. 5799 if (!HasSameOp(5)) 5800 return false; 5801 5802 // Now let's examine if the displacements are constants. 5803 auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp)); 5804 auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp)); 5805 if (!Disp1 || !Disp2) 5806 return false; 5807 5808 Offset1 = Disp1->getSExtValue(); 5809 Offset2 = Disp2->getSExtValue(); 5810 return true; 5811} 5812 5813bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 5814 int64_t Offset1, int64_t Offset2, 5815 unsigned NumLoads) const { 5816 assert(Offset2 > Offset1); 5817 if ((Offset2 - Offset1) / 8 > 64) 5818 return false; 5819 5820 unsigned Opc1 = Load1->getMachineOpcode(); 5821 unsigned Opc2 = Load2->getMachineOpcode(); 5822 if (Opc1 != Opc2) 5823 return false; // FIXME: overly conservative? 5824 5825 switch (Opc1) { 5826 default: break; 5827 case X86::LD_Fp32m: 5828 case X86::LD_Fp64m: 5829 case X86::LD_Fp80m: 5830 case X86::MMX_MOVD64rm: 5831 case X86::MMX_MOVQ64rm: 5832 return false; 5833 } 5834 5835 EVT VT = Load1->getValueType(0); 5836 switch (VT.getSimpleVT().SimpleTy) { 5837 default: 5838 // XMM registers. In 64-bit mode we can be a bit more aggressive since we 5839 // have 16 of them to play with. 5840 if (Subtarget.is64Bit()) { 5841 if (NumLoads >= 3) 5842 return false; 5843 } else if (NumLoads) { 5844 return false; 5845 } 5846 break; 5847 case MVT::i8: 5848 case MVT::i16: 5849 case MVT::i32: 5850 case MVT::i64: 5851 case MVT::f32: 5852 case MVT::f64: 5853 if (NumLoads) 5854 return false; 5855 break; 5856 } 5857 5858 return true; 5859} 5860 5861bool X86InstrInfo:: 5862reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 5863 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 5864 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 5865 Cond[0].setImm(GetOppositeBranchCondition(CC)); 5866 return false; 5867} 5868 5869bool X86InstrInfo:: 5870isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 5871 // FIXME: Return false for x87 stack register classes for now. We can't 5872 // allow any loads of these registers before FpGet_ST0_80. 5873 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass || 5874 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass || 5875 RC == &X86::RFP80RegClass); 5876} 5877 5878/// Return a virtual register initialized with the 5879/// the global base register value. Output instructions required to 5880/// initialize the register in the function entry block, if necessary. 5881/// 5882/// TODO: Eliminate this and move the code to X86MachineFunctionInfo. 5883/// 5884unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 5885 assert((!Subtarget.is64Bit() || 5886 MF->getTarget().getCodeModel() == CodeModel::Medium || 5887 MF->getTarget().getCodeModel() == CodeModel::Large) && 5888 "X86-64 PIC uses RIP relative addressing"); 5889 5890 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 5891 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 5892 if (GlobalBaseReg != 0) 5893 return GlobalBaseReg; 5894 5895 // Create the register. The code to initialize it is inserted 5896 // later, by the CGBR pass (below). 5897 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 5898 GlobalBaseReg = RegInfo.createVirtualRegister( 5899 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); 5900 X86FI->setGlobalBaseReg(GlobalBaseReg); 5901 return GlobalBaseReg; 5902} 5903 5904// These are the replaceable SSE instructions. Some of these have Int variants 5905// that we don't include here. We don't want to replace instructions selected 5906// by intrinsics. 5907static const uint16_t ReplaceableInstrs[][3] = { 5908 //PackedSingle PackedDouble PackedInt 5909 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, 5910 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, 5911 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, 5912 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, 5913 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, 5914 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, 5915 { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr }, 5916 { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr }, 5917 { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, 5918 { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, 5919 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, 5920 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, 5921 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, 5922 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, 5923 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, 5924 { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, 5925 { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, 5926 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, 5927 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, 5928 { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm }, 5929 { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr }, 5930 { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm }, 5931 { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr }, 5932 { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm }, 5933 { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr }, 5934 { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm }, 5935 { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr }, 5936 { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr }, 5937 { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr }, 5938 // AVX 128-bit support 5939 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, 5940 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, 5941 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, 5942 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, 5943 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, 5944 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, 5945 { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr }, 5946 { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr }, 5947 { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, 5948 { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, 5949 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, 5950 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, 5951 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, 5952 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, 5953 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, 5954 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, 5955 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, 5956 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, 5957 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, 5958 { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm }, 5959 { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr }, 5960 { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm }, 5961 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr }, 5962 { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm }, 5963 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr }, 5964 { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm }, 5965 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr }, 5966 { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr }, 5967 { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr }, 5968 // AVX 256-bit support 5969 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, 5970 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, 5971 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, 5972 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, 5973 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, 5974 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }, 5975 { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm }, 5976 { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr }, 5977 { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi }, 5978 { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri }, 5979 // AVX512 support 5980 { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr }, 5981 { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr }, 5982 { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr }, 5983 { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr }, 5984 { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr }, 5985 { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr }, 5986 { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm }, 5987 { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm }, 5988 { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r }, 5989 { X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m }, 5990 { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r }, 5991 { X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m }, 5992 { X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr }, 5993 { X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm }, 5994 { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r }, 5995 { X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m }, 5996 { X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr }, 5997 { X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm }, 5998 { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr }, 5999 { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm }, 6000 { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr }, 6001 { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm }, 6002 { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr }, 6003 { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm }, 6004 { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr }, 6005 { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm }, 6006 { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr }, 6007 { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm }, 6008 { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr }, 6009 { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm }, 6010 { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr }, 6011 { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr }, 6012 { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr }, 6013 { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr }, 6014 { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr }, 6015 { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr }, 6016 { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr }, 6017 { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr }, 6018 { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr }, 6019 { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr }, 6020 { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr }, 6021 { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr }, 6022 { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi }, 6023 { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri }, 6024 { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi }, 6025 { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri }, 6026 { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi }, 6027 { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri }, 6028 { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi }, 6029 { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri }, 6030 { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm }, 6031 { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr }, 6032 { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi }, 6033 { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri }, 6034 { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm }, 6035 { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr }, 6036 { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm }, 6037 { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr }, 6038 { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi }, 6039 { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri }, 6040 { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm }, 6041 { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr }, 6042 { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm }, 6043 { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr }, 6044 { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm }, 6045 { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr }, 6046 { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm }, 6047 { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr }, 6048 { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm }, 6049 { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr }, 6050 { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm }, 6051 { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr }, 6052 { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm }, 6053 { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr }, 6054 { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm }, 6055 { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr }, 6056 { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm }, 6057 { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr }, 6058 { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm }, 6059 { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr }, 6060 { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm }, 6061 { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr }, 6062 { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm }, 6063 { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr }, 6064 { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm }, 6065 { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr }, 6066 { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr }, 6067 { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr }, 6068}; 6069 6070static const uint16_t ReplaceableInstrsAVX2[][3] = { 6071 //PackedSingle PackedDouble PackedInt 6072 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, 6073 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, 6074 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, 6075 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, 6076 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, 6077 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, 6078 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, 6079 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, 6080 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, 6081 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, 6082 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, 6083 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, 6084 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, 6085 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, 6086 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, 6087 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}, 6088 { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 }, 6089 { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri }, 6090 { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi }, 6091 { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi }, 6092 { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri }, 6093 { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm }, 6094 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr }, 6095 { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm }, 6096 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr }, 6097 { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm }, 6098 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr }, 6099 { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm }, 6100 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr }, 6101}; 6102 6103static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { 6104 //PackedSingle PackedDouble PackedInt 6105 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, 6106 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, 6107 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, 6108 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, 6109}; 6110 6111static const uint16_t ReplaceableInstrsAVX512[][4] = { 6112 // Two integer columns for 64-bit and 32-bit elements. 6113 //PackedSingle PackedDouble PackedInt PackedInt 6114 { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr }, 6115 { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm }, 6116 { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr }, 6117 { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr }, 6118 { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm }, 6119 { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr }, 6120 { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm }, 6121 { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr }, 6122 { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr }, 6123 { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm }, 6124 { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr }, 6125 { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm }, 6126 { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, 6127 { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, 6128 { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, 6129}; 6130 6131static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { 6132 // Two integer columns for 64-bit and 32-bit elements. 6133 //PackedSingle PackedDouble PackedInt PackedInt 6134 { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, 6135 { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, 6136 { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, 6137 { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, 6138 { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm }, 6139 { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr }, 6140 { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, 6141 { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, 6142 { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, 6143 { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, 6144 { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, 6145 { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, 6146 { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm }, 6147 { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr }, 6148 { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, 6149 { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, 6150 { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm }, 6151 { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr }, 6152 { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm }, 6153 { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr }, 6154 { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm }, 6155 { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr }, 6156 { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm }, 6157 { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr }, 6158}; 6159 6160static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = { 6161 // Two integer columns for 64-bit and 32-bit elements. 6162 //PackedSingle PackedDouble 6163 //PackedInt PackedInt 6164 { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk, 6165 X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk }, 6166 { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz, 6167 X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz }, 6168 { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk, 6169 X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk }, 6170 { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz, 6171 X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz }, 6172 { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk, 6173 X86::VPANDQZ128rmk, X86::VPANDDZ128rmk }, 6174 { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz, 6175 X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz }, 6176 { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk, 6177 X86::VPANDQZ128rrk, X86::VPANDDZ128rrk }, 6178 { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz, 6179 X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz }, 6180 { X86::VORPSZ128rmk, X86::VORPDZ128rmk, 6181 X86::VPORQZ128rmk, X86::VPORDZ128rmk }, 6182 { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz, 6183 X86::VPORQZ128rmkz, X86::VPORDZ128rmkz }, 6184 { X86::VORPSZ128rrk, X86::VORPDZ128rrk, 6185 X86::VPORQZ128rrk, X86::VPORDZ128rrk }, 6186 { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz, 6187 X86::VPORQZ128rrkz, X86::VPORDZ128rrkz }, 6188 { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk, 6189 X86::VPXORQZ128rmk, X86::VPXORDZ128rmk }, 6190 { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz, 6191 X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz }, 6192 { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk, 6193 X86::VPXORQZ128rrk, X86::VPXORDZ128rrk }, 6194 { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz, 6195 X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz }, 6196 { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk, 6197 X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk }, 6198 { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz, 6199 X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz }, 6200 { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk, 6201 X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk }, 6202 { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz, 6203 X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz }, 6204 { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk, 6205 X86::VPANDQZ256rmk, X86::VPANDDZ256rmk }, 6206 { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz, 6207 X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz }, 6208 { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk, 6209 X86::VPANDQZ256rrk, X86::VPANDDZ256rrk }, 6210 { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz, 6211 X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz }, 6212 { X86::VORPSZ256rmk, X86::VORPDZ256rmk, 6213 X86::VPORQZ256rmk, X86::VPORDZ256rmk }, 6214 { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz, 6215 X86::VPORQZ256rmkz, X86::VPORDZ256rmkz }, 6216 { X86::VORPSZ256rrk, X86::VORPDZ256rrk, 6217 X86::VPORQZ256rrk, X86::VPORDZ256rrk }, 6218 { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz, 6219 X86::VPORQZ256rrkz, X86::VPORDZ256rrkz }, 6220 { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk, 6221 X86::VPXORQZ256rmk, X86::VPXORDZ256rmk }, 6222 { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz, 6223 X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz }, 6224 { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk, 6225 X86::VPXORQZ256rrk, X86::VPXORDZ256rrk }, 6226 { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz, 6227 X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz }, 6228 { X86::VANDNPSZrmk, X86::VANDNPDZrmk, 6229 X86::VPANDNQZrmk, X86::VPANDNDZrmk }, 6230 { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz, 6231 X86::VPANDNQZrmkz, X86::VPANDNDZrmkz }, 6232 { X86::VANDNPSZrrk, X86::VANDNPDZrrk, 6233 X86::VPANDNQZrrk, X86::VPANDNDZrrk }, 6234 { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz, 6235 X86::VPANDNQZrrkz, X86::VPANDNDZrrkz }, 6236 { X86::VANDPSZrmk, X86::VANDPDZrmk, 6237 X86::VPANDQZrmk, X86::VPANDDZrmk }, 6238 { X86::VANDPSZrmkz, X86::VANDPDZrmkz, 6239 X86::VPANDQZrmkz, X86::VPANDDZrmkz }, 6240 { X86::VANDPSZrrk, X86::VANDPDZrrk, 6241 X86::VPANDQZrrk, X86::VPANDDZrrk }, 6242 { X86::VANDPSZrrkz, X86::VANDPDZrrkz, 6243 X86::VPANDQZrrkz, X86::VPANDDZrrkz }, 6244 { X86::VORPSZrmk, X86::VORPDZrmk, 6245 X86::VPORQZrmk, X86::VPORDZrmk }, 6246 { X86::VORPSZrmkz, X86::VORPDZrmkz, 6247 X86::VPORQZrmkz, X86::VPORDZrmkz }, 6248 { X86::VORPSZrrk, X86::VORPDZrrk, 6249 X86::VPORQZrrk, X86::VPORDZrrk }, 6250 { X86::VORPSZrrkz, X86::VORPDZrrkz, 6251 X86::VPORQZrrkz, X86::VPORDZrrkz }, 6252 { X86::VXORPSZrmk, X86::VXORPDZrmk, 6253 X86::VPXORQZrmk, X86::VPXORDZrmk }, 6254 { X86::VXORPSZrmkz, X86::VXORPDZrmkz, 6255 X86::VPXORQZrmkz, X86::VPXORDZrmkz }, 6256 { X86::VXORPSZrrk, X86::VXORPDZrrk, 6257 X86::VPXORQZrrk, X86::VPXORDZrrk }, 6258 { X86::VXORPSZrrkz, X86::VXORPDZrrkz, 6259 X86::VPXORQZrrkz, X86::VPXORDZrrkz }, 6260 // Broadcast loads can be handled the same as masked operations to avoid 6261 // changing element size. 6262 { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb, 6263 X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb }, 6264 { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb, 6265 X86::VPANDQZ128rmb, X86::VPANDDZ128rmb }, 6266 { X86::VORPSZ128rmb, X86::VORPDZ128rmb, 6267 X86::VPORQZ128rmb, X86::VPORDZ128rmb }, 6268 { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb, 6269 X86::VPXORQZ128rmb, X86::VPXORDZ128rmb }, 6270 { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb, 6271 X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb }, 6272 { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb, 6273 X86::VPANDQZ256rmb, X86::VPANDDZ256rmb }, 6274 { X86::VORPSZ256rmb, X86::VORPDZ256rmb, 6275 X86::VPORQZ256rmb, X86::VPORDZ256rmb }, 6276 { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb, 6277 X86::VPXORQZ256rmb, X86::VPXORDZ256rmb }, 6278 { X86::VANDNPSZrmb, X86::VANDNPDZrmb, 6279 X86::VPANDNQZrmb, X86::VPANDNDZrmb }, 6280 { X86::VANDPSZrmb, X86::VANDPDZrmb, 6281 X86::VPANDQZrmb, X86::VPANDDZrmb }, 6282 { X86::VANDPSZrmb, X86::VANDPDZrmb, 6283 X86::VPANDQZrmb, X86::VPANDDZrmb }, 6284 { X86::VORPSZrmb, X86::VORPDZrmb, 6285 X86::VPORQZrmb, X86::VPORDZrmb }, 6286 { X86::VXORPSZrmb, X86::VXORPDZrmb, 6287 X86::VPXORQZrmb, X86::VPXORDZrmb }, 6288 { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk, 6289 X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk }, 6290 { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk, 6291 X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk }, 6292 { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk, 6293 X86::VPORQZ128rmbk, X86::VPORDZ128rmbk }, 6294 { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk, 6295 X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk }, 6296 { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk, 6297 X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk }, 6298 { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk, 6299 X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk }, 6300 { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk, 6301 X86::VPORQZ256rmbk, X86::VPORDZ256rmbk }, 6302 { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk, 6303 X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk }, 6304 { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk, 6305 X86::VPANDNQZrmbk, X86::VPANDNDZrmbk }, 6306 { X86::VANDPSZrmbk, X86::VANDPDZrmbk, 6307 X86::VPANDQZrmbk, X86::VPANDDZrmbk }, 6308 { X86::VANDPSZrmbk, X86::VANDPDZrmbk, 6309 X86::VPANDQZrmbk, X86::VPANDDZrmbk }, 6310 { X86::VORPSZrmbk, X86::VORPDZrmbk, 6311 X86::VPORQZrmbk, X86::VPORDZrmbk }, 6312 { X86::VXORPSZrmbk, X86::VXORPDZrmbk, 6313 X86::VPXORQZrmbk, X86::VPXORDZrmbk }, 6314 { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz, 6315 X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz}, 6316 { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz, 6317 X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz }, 6318 { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz, 6319 X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz }, 6320 { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz, 6321 X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz }, 6322 { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz, 6323 X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz}, 6324 { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz, 6325 X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz }, 6326 { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz, 6327 X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz }, 6328 { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz, 6329 X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz }, 6330 { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz, 6331 X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz }, 6332 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, 6333 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, 6334 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, 6335 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, 6336 { X86::VORPSZrmbkz, X86::VORPDZrmbkz, 6337 X86::VPORQZrmbkz, X86::VPORDZrmbkz }, 6338 { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz, 6339 X86::VPXORQZrmbkz, X86::VPXORDZrmbkz }, 6340}; 6341 6342// NOTE: These should only be used by the custom domain methods. 6343static const uint16_t ReplaceableCustomInstrs[][3] = { 6344 //PackedSingle PackedDouble PackedInt 6345 { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi }, 6346 { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri }, 6347 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi }, 6348 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri }, 6349 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi }, 6350 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri }, 6351}; 6352static const uint16_t ReplaceableCustomAVX2Instrs[][3] = { 6353 //PackedSingle PackedDouble PackedInt 6354 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi }, 6355 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri }, 6356 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi }, 6357 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri }, 6358}; 6359 6360// Special table for changing EVEX logic instructions to VEX. 6361// TODO: Should we run EVEX->VEX earlier? 6362static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = { 6363 // Two integer columns for 64-bit and 32-bit elements. 6364 //PackedSingle PackedDouble PackedInt PackedInt 6365 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, 6366 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, 6367 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, 6368 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, 6369 { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm }, 6370 { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr }, 6371 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, 6372 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, 6373 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, 6374 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, 6375 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, 6376 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, 6377 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm }, 6378 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr }, 6379 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, 6380 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, 6381}; 6382 6383// FIXME: Some shuffle and unpack instructions have equivalents in different 6384// domains, but they require a bit more work than just switching opcodes. 6385 6386static const uint16_t *lookup(unsigned opcode, unsigned domain, 6387 ArrayRef<uint16_t[3]> Table) { 6388 for (const uint16_t (&Row)[3] : Table) 6389 if (Row[domain-1] == opcode) 6390 return Row; 6391 return nullptr; 6392} 6393 6394static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain, 6395 ArrayRef<uint16_t[4]> Table) { 6396 // If this is the integer domain make sure to check both integer columns. 6397 for (const uint16_t (&Row)[4] : Table) 6398 if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode)) 6399 return Row; 6400 return nullptr; 6401} 6402 6403// Helper to attempt to widen/narrow blend masks. 6404static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, 6405 unsigned NewWidth, unsigned *pNewMask = nullptr) { 6406 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && 6407 "Illegal blend mask scale"); 6408 unsigned NewMask = 0; 6409 6410 if ((OldWidth % NewWidth) == 0) { 6411 unsigned Scale = OldWidth / NewWidth; 6412 unsigned SubMask = (1u << Scale) - 1; 6413 for (unsigned i = 0; i != NewWidth; ++i) { 6414 unsigned Sub = (OldMask >> (i * Scale)) & SubMask; 6415 if (Sub == SubMask) 6416 NewMask |= (1u << i); 6417 else if (Sub != 0x0) 6418 return false; 6419 } 6420 } else { 6421 unsigned Scale = NewWidth / OldWidth; 6422 unsigned SubMask = (1u << Scale) - 1; 6423 for (unsigned i = 0; i != OldWidth; ++i) { 6424 if (OldMask & (1 << i)) { 6425 NewMask |= (SubMask << (i * Scale)); 6426 } 6427 } 6428 } 6429 6430 if (pNewMask) 6431 *pNewMask = NewMask; 6432 return true; 6433} 6434 6435uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { 6436 unsigned Opcode = MI.getOpcode(); 6437 unsigned NumOperands = MI.getDesc().getNumOperands(); 6438 6439 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) { 6440 uint16_t validDomains = 0; 6441 if (MI.getOperand(NumOperands - 1).isImm()) { 6442 unsigned Imm = MI.getOperand(NumOperands - 1).getImm(); 6443 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4)) 6444 validDomains |= 0x2; // PackedSingle 6445 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2)) 6446 validDomains |= 0x4; // PackedDouble 6447 if (!Is256 || Subtarget.hasAVX2()) 6448 validDomains |= 0x8; // PackedInt 6449 } 6450 return validDomains; 6451 }; 6452 6453 switch (Opcode) { 6454 case X86::BLENDPDrmi: 6455 case X86::BLENDPDrri: 6456 case X86::VBLENDPDrmi: 6457 case X86::VBLENDPDrri: 6458 return GetBlendDomains(2, false); 6459 case X86::VBLENDPDYrmi: 6460 case X86::VBLENDPDYrri: 6461 return GetBlendDomains(4, true); 6462 case X86::BLENDPSrmi: 6463 case X86::BLENDPSrri: 6464 case X86::VBLENDPSrmi: 6465 case X86::VBLENDPSrri: 6466 case X86::VPBLENDDrmi: 6467 case X86::VPBLENDDrri: 6468 return GetBlendDomains(4, false); 6469 case X86::VBLENDPSYrmi: 6470 case X86::VBLENDPSYrri: 6471 case X86::VPBLENDDYrmi: 6472 case X86::VPBLENDDYrri: 6473 return GetBlendDomains(8, true); 6474 case X86::PBLENDWrmi: 6475 case X86::PBLENDWrri: 6476 case X86::VPBLENDWrmi: 6477 case X86::VPBLENDWrri: 6478 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks. 6479 case X86::VPBLENDWYrmi: 6480 case X86::VPBLENDWYrri: 6481 return GetBlendDomains(8, false); 6482 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: 6483 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: 6484 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: 6485 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: 6486 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: 6487 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: 6488 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: 6489 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: 6490 case X86::VPORDZ128rr: case X86::VPORDZ128rm: 6491 case X86::VPORDZ256rr: case X86::VPORDZ256rm: 6492 case X86::VPORQZ128rr: case X86::VPORQZ128rm: 6493 case X86::VPORQZ256rr: case X86::VPORQZ256rm: 6494 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: 6495 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: 6496 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: 6497 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: 6498 // If we don't have DQI see if we can still switch from an EVEX integer 6499 // instruction to a VEX floating point instruction. 6500 if (Subtarget.hasDQI()) 6501 return 0; 6502 6503 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16) 6504 return 0; 6505 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16) 6506 return 0; 6507 // Register forms will have 3 operands. Memory form will have more. 6508 if (NumOperands == 3 && 6509 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16) 6510 return 0; 6511 6512 // All domains are valid. 6513 return 0xe; 6514 } 6515 return 0; 6516} 6517 6518bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI, 6519 unsigned Domain) const { 6520 assert(Domain > 0 && Domain < 4 && "Invalid execution domain"); 6521 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6522 assert(dom && "Not an SSE instruction"); 6523 6524 unsigned Opcode = MI.getOpcode(); 6525 unsigned NumOperands = MI.getDesc().getNumOperands(); 6526 6527 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) { 6528 if (MI.getOperand(NumOperands - 1).isImm()) { 6529 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255; 6530 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm); 6531 unsigned NewImm = Imm; 6532 6533 const uint16_t *table = lookup(Opcode, dom, ReplaceableCustomInstrs); 6534 if (!table) 6535 table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs); 6536 6537 if (Domain == 1) { // PackedSingle 6538 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); 6539 } else if (Domain == 2) { // PackedDouble 6540 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm); 6541 } else if (Domain == 3) { // PackedInt 6542 if (Subtarget.hasAVX2()) { 6543 // If we are already VPBLENDW use that, else use VPBLENDD. 6544 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) { 6545 table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs); 6546 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); 6547 } 6548 } else { 6549 assert(!Is256 && "128-bit vector expected"); 6550 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm); 6551 } 6552 } 6553 6554 assert(table && table[Domain - 1] && "Unknown domain op"); 6555 MI.setDesc(get(table[Domain - 1])); 6556 MI.getOperand(NumOperands - 1).setImm(NewImm & 255); 6557 } 6558 return true; 6559 }; 6560 6561 switch (Opcode) { 6562 case X86::BLENDPDrmi: 6563 case X86::BLENDPDrri: 6564 case X86::VBLENDPDrmi: 6565 case X86::VBLENDPDrri: 6566 return SetBlendDomain(2, false); 6567 case X86::VBLENDPDYrmi: 6568 case X86::VBLENDPDYrri: 6569 return SetBlendDomain(4, true); 6570 case X86::BLENDPSrmi: 6571 case X86::BLENDPSrri: 6572 case X86::VBLENDPSrmi: 6573 case X86::VBLENDPSrri: 6574 case X86::VPBLENDDrmi: 6575 case X86::VPBLENDDrri: 6576 return SetBlendDomain(4, false); 6577 case X86::VBLENDPSYrmi: 6578 case X86::VBLENDPSYrri: 6579 case X86::VPBLENDDYrmi: 6580 case X86::VPBLENDDYrri: 6581 return SetBlendDomain(8, true); 6582 case X86::PBLENDWrmi: 6583 case X86::PBLENDWrri: 6584 case X86::VPBLENDWrmi: 6585 case X86::VPBLENDWrri: 6586 return SetBlendDomain(8, false); 6587 case X86::VPBLENDWYrmi: 6588 case X86::VPBLENDWYrri: 6589 return SetBlendDomain(16, true); 6590 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: 6591 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: 6592 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: 6593 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: 6594 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: 6595 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: 6596 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: 6597 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: 6598 case X86::VPORDZ128rr: case X86::VPORDZ128rm: 6599 case X86::VPORDZ256rr: case X86::VPORDZ256rm: 6600 case X86::VPORQZ128rr: case X86::VPORQZ128rm: 6601 case X86::VPORQZ256rr: case X86::VPORQZ256rm: 6602 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: 6603 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: 6604 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: 6605 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: { 6606 // Without DQI, convert EVEX instructions to VEX instructions. 6607 if (Subtarget.hasDQI()) 6608 return false; 6609 6610 const uint16_t *table = lookupAVX512(MI.getOpcode(), dom, 6611 ReplaceableCustomAVX512LogicInstrs); 6612 assert(table && "Instruction not found in table?"); 6613 // Don't change integer Q instructions to D instructions and 6614 // use D intructions if we started with a PS instruction. 6615 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6616 Domain = 4; 6617 MI.setDesc(get(table[Domain - 1])); 6618 return true; 6619 } 6620 } 6621 return false; 6622} 6623 6624std::pair<uint16_t, uint16_t> 6625X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { 6626 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6627 unsigned opcode = MI.getOpcode(); 6628 uint16_t validDomains = 0; 6629 if (domain) { 6630 // Attempt to match for custom instructions. 6631 validDomains = getExecutionDomainCustom(MI); 6632 if (validDomains) 6633 return std::make_pair(domain, validDomains); 6634 6635 if (lookup(opcode, domain, ReplaceableInstrs)) { 6636 validDomains = 0xe; 6637 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) { 6638 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6; 6639 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) { 6640 // Insert/extract instructions should only effect domain if AVX2 6641 // is enabled. 6642 if (!Subtarget.hasAVX2()) 6643 return std::make_pair(0, 0); 6644 validDomains = 0xe; 6645 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) { 6646 validDomains = 0xe; 6647 } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain, 6648 ReplaceableInstrsAVX512DQ)) { 6649 validDomains = 0xe; 6650 } else if (Subtarget.hasDQI()) { 6651 if (const uint16_t *table = lookupAVX512(opcode, domain, 6652 ReplaceableInstrsAVX512DQMasked)) { 6653 if (domain == 1 || (domain == 3 && table[3] == opcode)) 6654 validDomains = 0xa; 6655 else 6656 validDomains = 0xc; 6657 } 6658 } 6659 } 6660 return std::make_pair(domain, validDomains); 6661} 6662 6663void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { 6664 assert(Domain>0 && Domain<4 && "Invalid execution domain"); 6665 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6666 assert(dom && "Not an SSE instruction"); 6667 6668 // Attempt to match for custom instructions. 6669 if (setExecutionDomainCustom(MI, Domain)) 6670 return; 6671 6672 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs); 6673 if (!table) { // try the other table 6674 assert((Subtarget.hasAVX2() || Domain < 3) && 6675 "256-bit vector operations only available in AVX2"); 6676 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2); 6677 } 6678 if (!table) { // try the other table 6679 assert(Subtarget.hasAVX2() && 6680 "256-bit insert/extract only available in AVX2"); 6681 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract); 6682 } 6683 if (!table) { // try the AVX512 table 6684 assert(Subtarget.hasAVX512() && "Requires AVX-512"); 6685 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512); 6686 // Don't change integer Q instructions to D instructions. 6687 if (table && Domain == 3 && table[3] == MI.getOpcode()) 6688 Domain = 4; 6689 } 6690 if (!table) { // try the AVX512DQ table 6691 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); 6692 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ); 6693 // Don't change integer Q instructions to D instructions and 6694 // use D intructions if we started with a PS instruction. 6695 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6696 Domain = 4; 6697 } 6698 if (!table) { // try the AVX512DQMasked table 6699 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); 6700 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked); 6701 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6702 Domain = 4; 6703 } 6704 assert(table && "Cannot change domain"); 6705 MI.setDesc(get(table[Domain - 1])); 6706} 6707 6708/// Return the noop instruction to use for a noop. 6709void X86InstrInfo::getNoop(MCInst &NopInst) const { 6710 NopInst.setOpcode(X86::NOOP); 6711} 6712 6713bool X86InstrInfo::isHighLatencyDef(int opc) const { 6714 switch (opc) { 6715 default: return false; 6716 case X86::DIVPDrm: 6717 case X86::DIVPDrr: 6718 case X86::DIVPSrm: 6719 case X86::DIVPSrr: 6720 case X86::DIVSDrm: 6721 case X86::DIVSDrm_Int: 6722 case X86::DIVSDrr: 6723 case X86::DIVSDrr_Int: 6724 case X86::DIVSSrm: 6725 case X86::DIVSSrm_Int: 6726 case X86::DIVSSrr: 6727 case X86::DIVSSrr_Int: 6728 case X86::SQRTPDm: 6729 case X86::SQRTPDr: 6730 case X86::SQRTPSm: 6731 case X86::SQRTPSr: 6732 case X86::SQRTSDm: 6733 case X86::SQRTSDm_Int: 6734 case X86::SQRTSDr: 6735 case X86::SQRTSDr_Int: 6736 case X86::SQRTSSm: 6737 case X86::SQRTSSm_Int: 6738 case X86::SQRTSSr: 6739 case X86::SQRTSSr_Int: 6740 // AVX instructions with high latency 6741 case X86::VDIVPDrm: 6742 case X86::VDIVPDrr: 6743 case X86::VDIVPDYrm: 6744 case X86::VDIVPDYrr: 6745 case X86::VDIVPSrm: 6746 case X86::VDIVPSrr: 6747 case X86::VDIVPSYrm: 6748 case X86::VDIVPSYrr: 6749 case X86::VDIVSDrm: 6750 case X86::VDIVSDrm_Int: 6751 case X86::VDIVSDrr: 6752 case X86::VDIVSDrr_Int: 6753 case X86::VDIVSSrm: 6754 case X86::VDIVSSrm_Int: 6755 case X86::VDIVSSrr: 6756 case X86::VDIVSSrr_Int: 6757 case X86::VSQRTPDm: 6758 case X86::VSQRTPDr: 6759 case X86::VSQRTPDYm: 6760 case X86::VSQRTPDYr: 6761 case X86::VSQRTPSm: 6762 case X86::VSQRTPSr: 6763 case X86::VSQRTPSYm: 6764 case X86::VSQRTPSYr: 6765 case X86::VSQRTSDm: 6766 case X86::VSQRTSDm_Int: 6767 case X86::VSQRTSDr: 6768 case X86::VSQRTSDr_Int: 6769 case X86::VSQRTSSm: 6770 case X86::VSQRTSSm_Int: 6771 case X86::VSQRTSSr: 6772 case X86::VSQRTSSr_Int: 6773 // AVX512 instructions with high latency 6774 case X86::VDIVPDZ128rm: 6775 case X86::VDIVPDZ128rmb: 6776 case X86::VDIVPDZ128rmbk: 6777 case X86::VDIVPDZ128rmbkz: 6778 case X86::VDIVPDZ128rmk: 6779 case X86::VDIVPDZ128rmkz: 6780 case X86::VDIVPDZ128rr: 6781 case X86::VDIVPDZ128rrk: 6782 case X86::VDIVPDZ128rrkz: 6783 case X86::VDIVPDZ256rm: 6784 case X86::VDIVPDZ256rmb: 6785 case X86::VDIVPDZ256rmbk: 6786 case X86::VDIVPDZ256rmbkz: 6787 case X86::VDIVPDZ256rmk: 6788 case X86::VDIVPDZ256rmkz: 6789 case X86::VDIVPDZ256rr: 6790 case X86::VDIVPDZ256rrk: 6791 case X86::VDIVPDZ256rrkz: 6792 case X86::VDIVPDZrrb: 6793 case X86::VDIVPDZrrbk: 6794 case X86::VDIVPDZrrbkz: 6795 case X86::VDIVPDZrm: 6796 case X86::VDIVPDZrmb: 6797 case X86::VDIVPDZrmbk: 6798 case X86::VDIVPDZrmbkz: 6799 case X86::VDIVPDZrmk: 6800 case X86::VDIVPDZrmkz: 6801 case X86::VDIVPDZrr: 6802 case X86::VDIVPDZrrk: 6803 case X86::VDIVPDZrrkz: 6804 case X86::VDIVPSZ128rm: 6805 case X86::VDIVPSZ128rmb: 6806 case X86::VDIVPSZ128rmbk: 6807 case X86::VDIVPSZ128rmbkz: 6808 case X86::VDIVPSZ128rmk: 6809 case X86::VDIVPSZ128rmkz: 6810 case X86::VDIVPSZ128rr: 6811 case X86::VDIVPSZ128rrk: 6812 case X86::VDIVPSZ128rrkz: 6813 case X86::VDIVPSZ256rm: 6814 case X86::VDIVPSZ256rmb: 6815 case X86::VDIVPSZ256rmbk: 6816 case X86::VDIVPSZ256rmbkz: 6817 case X86::VDIVPSZ256rmk: 6818 case X86::VDIVPSZ256rmkz: 6819 case X86::VDIVPSZ256rr: 6820 case X86::VDIVPSZ256rrk: 6821 case X86::VDIVPSZ256rrkz: 6822 case X86::VDIVPSZrrb: 6823 case X86::VDIVPSZrrbk: 6824 case X86::VDIVPSZrrbkz: 6825 case X86::VDIVPSZrm: 6826 case X86::VDIVPSZrmb: 6827 case X86::VDIVPSZrmbk: 6828 case X86::VDIVPSZrmbkz: 6829 case X86::VDIVPSZrmk: 6830 case X86::VDIVPSZrmkz: 6831 case X86::VDIVPSZrr: 6832 case X86::VDIVPSZrrk: 6833 case X86::VDIVPSZrrkz: 6834 case X86::VDIVSDZrm: 6835 case X86::VDIVSDZrr: 6836 case X86::VDIVSDZrm_Int: 6837 case X86::VDIVSDZrm_Intk: 6838 case X86::VDIVSDZrm_Intkz: 6839 case X86::VDIVSDZrr_Int: 6840 case X86::VDIVSDZrr_Intk: 6841 case X86::VDIVSDZrr_Intkz: 6842 case X86::VDIVSDZrrb_Int: 6843 case X86::VDIVSDZrrb_Intk: 6844 case X86::VDIVSDZrrb_Intkz: 6845 case X86::VDIVSSZrm: 6846 case X86::VDIVSSZrr: 6847 case X86::VDIVSSZrm_Int: 6848 case X86::VDIVSSZrm_Intk: 6849 case X86::VDIVSSZrm_Intkz: 6850 case X86::VDIVSSZrr_Int: 6851 case X86::VDIVSSZrr_Intk: 6852 case X86::VDIVSSZrr_Intkz: 6853 case X86::VDIVSSZrrb_Int: 6854 case X86::VDIVSSZrrb_Intk: 6855 case X86::VDIVSSZrrb_Intkz: 6856 case X86::VSQRTPDZ128m: 6857 case X86::VSQRTPDZ128mb: 6858 case X86::VSQRTPDZ128mbk: 6859 case X86::VSQRTPDZ128mbkz: 6860 case X86::VSQRTPDZ128mk: 6861 case X86::VSQRTPDZ128mkz: 6862 case X86::VSQRTPDZ128r: 6863 case X86::VSQRTPDZ128rk: 6864 case X86::VSQRTPDZ128rkz: 6865 case X86::VSQRTPDZ256m: 6866 case X86::VSQRTPDZ256mb: 6867 case X86::VSQRTPDZ256mbk: 6868 case X86::VSQRTPDZ256mbkz: 6869 case X86::VSQRTPDZ256mk: 6870 case X86::VSQRTPDZ256mkz: 6871 case X86::VSQRTPDZ256r: 6872 case X86::VSQRTPDZ256rk: 6873 case X86::VSQRTPDZ256rkz: 6874 case X86::VSQRTPDZm: 6875 case X86::VSQRTPDZmb: 6876 case X86::VSQRTPDZmbk: 6877 case X86::VSQRTPDZmbkz: 6878 case X86::VSQRTPDZmk: 6879 case X86::VSQRTPDZmkz: 6880 case X86::VSQRTPDZr: 6881 case X86::VSQRTPDZrb: 6882 case X86::VSQRTPDZrbk: 6883 case X86::VSQRTPDZrbkz: 6884 case X86::VSQRTPDZrk: 6885 case X86::VSQRTPDZrkz: 6886 case X86::VSQRTPSZ128m: 6887 case X86::VSQRTPSZ128mb: 6888 case X86::VSQRTPSZ128mbk: 6889 case X86::VSQRTPSZ128mbkz: 6890 case X86::VSQRTPSZ128mk: 6891 case X86::VSQRTPSZ128mkz: 6892 case X86::VSQRTPSZ128r: 6893 case X86::VSQRTPSZ128rk: 6894 case X86::VSQRTPSZ128rkz: 6895 case X86::VSQRTPSZ256m: 6896 case X86::VSQRTPSZ256mb: 6897 case X86::VSQRTPSZ256mbk: 6898 case X86::VSQRTPSZ256mbkz: 6899 case X86::VSQRTPSZ256mk: 6900 case X86::VSQRTPSZ256mkz: 6901 case X86::VSQRTPSZ256r: 6902 case X86::VSQRTPSZ256rk: 6903 case X86::VSQRTPSZ256rkz: 6904 case X86::VSQRTPSZm: 6905 case X86::VSQRTPSZmb: 6906 case X86::VSQRTPSZmbk: 6907 case X86::VSQRTPSZmbkz: 6908 case X86::VSQRTPSZmk: 6909 case X86::VSQRTPSZmkz: 6910 case X86::VSQRTPSZr: 6911 case X86::VSQRTPSZrb: 6912 case X86::VSQRTPSZrbk: 6913 case X86::VSQRTPSZrbkz: 6914 case X86::VSQRTPSZrk: 6915 case X86::VSQRTPSZrkz: 6916 case X86::VSQRTSDZm: 6917 case X86::VSQRTSDZm_Int: 6918 case X86::VSQRTSDZm_Intk: 6919 case X86::VSQRTSDZm_Intkz: 6920 case X86::VSQRTSDZr: 6921 case X86::VSQRTSDZr_Int: 6922 case X86::VSQRTSDZr_Intk: 6923 case X86::VSQRTSDZr_Intkz: 6924 case X86::VSQRTSDZrb_Int: 6925 case X86::VSQRTSDZrb_Intk: 6926 case X86::VSQRTSDZrb_Intkz: 6927 case X86::VSQRTSSZm: 6928 case X86::VSQRTSSZm_Int: 6929 case X86::VSQRTSSZm_Intk: 6930 case X86::VSQRTSSZm_Intkz: 6931 case X86::VSQRTSSZr: 6932 case X86::VSQRTSSZr_Int: 6933 case X86::VSQRTSSZr_Intk: 6934 case X86::VSQRTSSZr_Intkz: 6935 case X86::VSQRTSSZrb_Int: 6936 case X86::VSQRTSSZrb_Intk: 6937 case X86::VSQRTSSZrb_Intkz: 6938 6939 case X86::VGATHERDPDYrm: 6940 case X86::VGATHERDPDZ128rm: 6941 case X86::VGATHERDPDZ256rm: 6942 case X86::VGATHERDPDZrm: 6943 case X86::VGATHERDPDrm: 6944 case X86::VGATHERDPSYrm: 6945 case X86::VGATHERDPSZ128rm: 6946 case X86::VGATHERDPSZ256rm: 6947 case X86::VGATHERDPSZrm: 6948 case X86::VGATHERDPSrm: 6949 case X86::VGATHERPF0DPDm: 6950 case X86::VGATHERPF0DPSm: 6951 case X86::VGATHERPF0QPDm: 6952 case X86::VGATHERPF0QPSm: 6953 case X86::VGATHERPF1DPDm: 6954 case X86::VGATHERPF1DPSm: 6955 case X86::VGATHERPF1QPDm: 6956 case X86::VGATHERPF1QPSm: 6957 case X86::VGATHERQPDYrm: 6958 case X86::VGATHERQPDZ128rm: 6959 case X86::VGATHERQPDZ256rm: 6960 case X86::VGATHERQPDZrm: 6961 case X86::VGATHERQPDrm: 6962 case X86::VGATHERQPSYrm: 6963 case X86::VGATHERQPSZ128rm: 6964 case X86::VGATHERQPSZ256rm: 6965 case X86::VGATHERQPSZrm: 6966 case X86::VGATHERQPSrm: 6967 case X86::VPGATHERDDYrm: 6968 case X86::VPGATHERDDZ128rm: 6969 case X86::VPGATHERDDZ256rm: 6970 case X86::VPGATHERDDZrm: 6971 case X86::VPGATHERDDrm: 6972 case X86::VPGATHERDQYrm: 6973 case X86::VPGATHERDQZ128rm: 6974 case X86::VPGATHERDQZ256rm: 6975 case X86::VPGATHERDQZrm: 6976 case X86::VPGATHERDQrm: 6977 case X86::VPGATHERQDYrm: 6978 case X86::VPGATHERQDZ128rm: 6979 case X86::VPGATHERQDZ256rm: 6980 case X86::VPGATHERQDZrm: 6981 case X86::VPGATHERQDrm: 6982 case X86::VPGATHERQQYrm: 6983 case X86::VPGATHERQQZ128rm: 6984 case X86::VPGATHERQQZ256rm: 6985 case X86::VPGATHERQQZrm: 6986 case X86::VPGATHERQQrm: 6987 case X86::VSCATTERDPDZ128mr: 6988 case X86::VSCATTERDPDZ256mr: 6989 case X86::VSCATTERDPDZmr: 6990 case X86::VSCATTERDPSZ128mr: 6991 case X86::VSCATTERDPSZ256mr: 6992 case X86::VSCATTERDPSZmr: 6993 case X86::VSCATTERPF0DPDm: 6994 case X86::VSCATTERPF0DPSm: 6995 case X86::VSCATTERPF0QPDm: 6996 case X86::VSCATTERPF0QPSm: 6997 case X86::VSCATTERPF1DPDm: 6998 case X86::VSCATTERPF1DPSm: 6999 case X86::VSCATTERPF1QPDm: 7000 case X86::VSCATTERPF1QPSm: 7001 case X86::VSCATTERQPDZ128mr: 7002 case X86::VSCATTERQPDZ256mr: 7003 case X86::VSCATTERQPDZmr: 7004 case X86::VSCATTERQPSZ128mr: 7005 case X86::VSCATTERQPSZ256mr: 7006 case X86::VSCATTERQPSZmr: 7007 case X86::VPSCATTERDDZ128mr: 7008 case X86::VPSCATTERDDZ256mr: 7009 case X86::VPSCATTERDDZmr: 7010 case X86::VPSCATTERDQZ128mr: 7011 case X86::VPSCATTERDQZ256mr: 7012 case X86::VPSCATTERDQZmr: 7013 case X86::VPSCATTERQDZ128mr: 7014 case X86::VPSCATTERQDZ256mr: 7015 case X86::VPSCATTERQDZmr: 7016 case X86::VPSCATTERQQZ128mr: 7017 case X86::VPSCATTERQQZ256mr: 7018 case X86::VPSCATTERQQZmr: 7019 return true; 7020 } 7021} 7022 7023bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 7024 const MachineRegisterInfo *MRI, 7025 const MachineInstr &DefMI, 7026 unsigned DefIdx, 7027 const MachineInstr &UseMI, 7028 unsigned UseIdx) const { 7029 return isHighLatencyDef(DefMI.getOpcode()); 7030} 7031 7032bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, 7033 const MachineBasicBlock *MBB) const { 7034 assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && 7035 "Reassociation needs binary operators"); 7036 7037 // Integer binary math/logic instructions have a third source operand: 7038 // the EFLAGS register. That operand must be both defined here and never 7039 // used; ie, it must be dead. If the EFLAGS operand is live, then we can 7040 // not change anything because rearranging the operands could affect other 7041 // instructions that depend on the exact status flags (zero, sign, etc.) 7042 // that are set by using these particular operands with this operation. 7043 if (Inst.getNumOperands() == 4) { 7044 assert(Inst.getOperand(3).isReg() && 7045 Inst.getOperand(3).getReg() == X86::EFLAGS && 7046 "Unexpected operand in reassociable instruction"); 7047 if (!Inst.getOperand(3).isDead()) 7048 return false; 7049 } 7050 7051 return TargetInstrInfo::hasReassociableOperands(Inst, MBB); 7052} 7053 7054// TODO: There are many more machine instruction opcodes to match: 7055// 1. Other data types (integer, vectors) 7056// 2. Other math / logic operations (xor, or) 7057// 3. Other forms of the same operation (intrinsics and other variants) 7058bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { 7059 switch (Inst.getOpcode()) { 7060 case X86::AND8rr: 7061 case X86::AND16rr: 7062 case X86::AND32rr: 7063 case X86::AND64rr: 7064 case X86::OR8rr: 7065 case X86::OR16rr: 7066 case X86::OR32rr: 7067 case X86::OR64rr: 7068 case X86::XOR8rr: 7069 case X86::XOR16rr: 7070 case X86::XOR32rr: 7071 case X86::XOR64rr: 7072 case X86::IMUL16rr: 7073 case X86::IMUL32rr: 7074 case X86::IMUL64rr: 7075 case X86::PANDrr: 7076 case X86::PORrr: 7077 case X86::PXORrr: 7078 case X86::ANDPDrr: 7079 case X86::ANDPSrr: 7080 case X86::ORPDrr: 7081 case X86::ORPSrr: 7082 case X86::XORPDrr: 7083 case X86::XORPSrr: 7084 case X86::PADDBrr: 7085 case X86::PADDWrr: 7086 case X86::PADDDrr: 7087 case X86::PADDQrr: 7088 case X86::VPANDrr: 7089 case X86::VPANDYrr: 7090 case X86::VPANDDZ128rr: 7091 case X86::VPANDDZ256rr: 7092 case X86::VPANDDZrr: 7093 case X86::VPANDQZ128rr: 7094 case X86::VPANDQZ256rr: 7095 case X86::VPANDQZrr: 7096 case X86::VPORrr: 7097 case X86::VPORYrr: 7098 case X86::VPORDZ128rr: 7099 case X86::VPORDZ256rr: 7100 case X86::VPORDZrr: 7101 case X86::VPORQZ128rr: 7102 case X86::VPORQZ256rr: 7103 case X86::VPORQZrr: 7104 case X86::VPXORrr: 7105 case X86::VPXORYrr: 7106 case X86::VPXORDZ128rr: 7107 case X86::VPXORDZ256rr: 7108 case X86::VPXORDZrr: 7109 case X86::VPXORQZ128rr: 7110 case X86::VPXORQZ256rr: 7111 case X86::VPXORQZrr: 7112 case X86::VANDPDrr: 7113 case X86::VANDPSrr: 7114 case X86::VANDPDYrr: 7115 case X86::VANDPSYrr: 7116 case X86::VANDPDZ128rr: 7117 case X86::VANDPSZ128rr: 7118 case X86::VANDPDZ256rr: 7119 case X86::VANDPSZ256rr: 7120 case X86::VANDPDZrr: 7121 case X86::VANDPSZrr: 7122 case X86::VORPDrr: 7123 case X86::VORPSrr: 7124 case X86::VORPDYrr: 7125 case X86::VORPSYrr: 7126 case X86::VORPDZ128rr: 7127 case X86::VORPSZ128rr: 7128 case X86::VORPDZ256rr: 7129 case X86::VORPSZ256rr: 7130 case X86::VORPDZrr: 7131 case X86::VORPSZrr: 7132 case X86::VXORPDrr: 7133 case X86::VXORPSrr: 7134 case X86::VXORPDYrr: 7135 case X86::VXORPSYrr: 7136 case X86::VXORPDZ128rr: 7137 case X86::VXORPSZ128rr: 7138 case X86::VXORPDZ256rr: 7139 case X86::VXORPSZ256rr: 7140 case X86::VXORPDZrr: 7141 case X86::VXORPSZrr: 7142 case X86::KADDBrr: 7143 case X86::KADDWrr: 7144 case X86::KADDDrr: 7145 case X86::KADDQrr: 7146 case X86::KANDBrr: 7147 case X86::KANDWrr: 7148 case X86::KANDDrr: 7149 case X86::KANDQrr: 7150 case X86::KORBrr: 7151 case X86::KORWrr: 7152 case X86::KORDrr: 7153 case X86::KORQrr: 7154 case X86::KXORBrr: 7155 case X86::KXORWrr: 7156 case X86::KXORDrr: 7157 case X86::KXORQrr: 7158 case X86::VPADDBrr: 7159 case X86::VPADDWrr: 7160 case X86::VPADDDrr: 7161 case X86::VPADDQrr: 7162 case X86::VPADDBYrr: 7163 case X86::VPADDWYrr: 7164 case X86::VPADDDYrr: 7165 case X86::VPADDQYrr: 7166 case X86::VPADDBZ128rr: 7167 case X86::VPADDWZ128rr: 7168 case X86::VPADDDZ128rr: 7169 case X86::VPADDQZ128rr: 7170 case X86::VPADDBZ256rr: 7171 case X86::VPADDWZ256rr: 7172 case X86::VPADDDZ256rr: 7173 case X86::VPADDQZ256rr: 7174 case X86::VPADDBZrr: 7175 case X86::VPADDWZrr: 7176 case X86::VPADDDZrr: 7177 case X86::VPADDQZrr: 7178 case X86::VPMULLWrr: 7179 case X86::VPMULLWYrr: 7180 case X86::VPMULLWZ128rr: 7181 case X86::VPMULLWZ256rr: 7182 case X86::VPMULLWZrr: 7183 case X86::VPMULLDrr: 7184 case X86::VPMULLDYrr: 7185 case X86::VPMULLDZ128rr: 7186 case X86::VPMULLDZ256rr: 7187 case X86::VPMULLDZrr: 7188 case X86::VPMULLQZ128rr: 7189 case X86::VPMULLQZ256rr: 7190 case X86::VPMULLQZrr: 7191 // Normal min/max instructions are not commutative because of NaN and signed 7192 // zero semantics, but these are. Thus, there's no need to check for global 7193 // relaxed math; the instructions themselves have the properties we need. 7194 case X86::MAXCPDrr: 7195 case X86::MAXCPSrr: 7196 case X86::MAXCSDrr: 7197 case X86::MAXCSSrr: 7198 case X86::MINCPDrr: 7199 case X86::MINCPSrr: 7200 case X86::MINCSDrr: 7201 case X86::MINCSSrr: 7202 case X86::VMAXCPDrr: 7203 case X86::VMAXCPSrr: 7204 case X86::VMAXCPDYrr: 7205 case X86::VMAXCPSYrr: 7206 case X86::VMAXCPDZ128rr: 7207 case X86::VMAXCPSZ128rr: 7208 case X86::VMAXCPDZ256rr: 7209 case X86::VMAXCPSZ256rr: 7210 case X86::VMAXCPDZrr: 7211 case X86::VMAXCPSZrr: 7212 case X86::VMAXCSDrr: 7213 case X86::VMAXCSSrr: 7214 case X86::VMAXCSDZrr: 7215 case X86::VMAXCSSZrr: 7216 case X86::VMINCPDrr: 7217 case X86::VMINCPSrr: 7218 case X86::VMINCPDYrr: 7219 case X86::VMINCPSYrr: 7220 case X86::VMINCPDZ128rr: 7221 case X86::VMINCPSZ128rr: 7222 case X86::VMINCPDZ256rr: 7223 case X86::VMINCPSZ256rr: 7224 case X86::VMINCPDZrr: 7225 case X86::VMINCPSZrr: 7226 case X86::VMINCSDrr: 7227 case X86::VMINCSSrr: 7228 case X86::VMINCSDZrr: 7229 case X86::VMINCSSZrr: 7230 return true; 7231 case X86::ADDPDrr: 7232 case X86::ADDPSrr: 7233 case X86::ADDSDrr: 7234 case X86::ADDSSrr: 7235 case X86::MULPDrr: 7236 case X86::MULPSrr: 7237 case X86::MULSDrr: 7238 case X86::MULSSrr: 7239 case X86::VADDPDrr: 7240 case X86::VADDPSrr: 7241 case X86::VADDPDYrr: 7242 case X86::VADDPSYrr: 7243 case X86::VADDPDZ128rr: 7244 case X86::VADDPSZ128rr: 7245 case X86::VADDPDZ256rr: 7246 case X86::VADDPSZ256rr: 7247 case X86::VADDPDZrr: 7248 case X86::VADDPSZrr: 7249 case X86::VADDSDrr: 7250 case X86::VADDSSrr: 7251 case X86::VADDSDZrr: 7252 case X86::VADDSSZrr: 7253 case X86::VMULPDrr: 7254 case X86::VMULPSrr: 7255 case X86::VMULPDYrr: 7256 case X86::VMULPSYrr: 7257 case X86::VMULPDZ128rr: 7258 case X86::VMULPSZ128rr: 7259 case X86::VMULPDZ256rr: 7260 case X86::VMULPSZ256rr: 7261 case X86::VMULPDZrr: 7262 case X86::VMULPSZrr: 7263 case X86::VMULSDrr: 7264 case X86::VMULSSrr: 7265 case X86::VMULSDZrr: 7266 case X86::VMULSSZrr: 7267 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; 7268 default: 7269 return false; 7270 } 7271} 7272 7273/// This is an architecture-specific helper function of reassociateOps. 7274/// Set special operand attributes for new instructions after reassociation. 7275void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, 7276 MachineInstr &OldMI2, 7277 MachineInstr &NewMI1, 7278 MachineInstr &NewMI2) const { 7279 // Integer instructions define an implicit EFLAGS source register operand as 7280 // the third source (fourth total) operand. 7281 if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4) 7282 return; 7283 7284 assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 && 7285 "Unexpected instruction type for reassociation"); 7286 7287 MachineOperand &OldOp1 = OldMI1.getOperand(3); 7288 MachineOperand &OldOp2 = OldMI2.getOperand(3); 7289 MachineOperand &NewOp1 = NewMI1.getOperand(3); 7290 MachineOperand &NewOp2 = NewMI2.getOperand(3); 7291 7292 assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && 7293 "Must have dead EFLAGS operand in reassociable instruction"); 7294 assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && 7295 "Must have dead EFLAGS operand in reassociable instruction"); 7296 7297 (void)OldOp1; 7298 (void)OldOp2; 7299 7300 assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && 7301 "Unexpected operand in reassociable instruction"); 7302 assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && 7303 "Unexpected operand in reassociable instruction"); 7304 7305 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations 7306 // of this pass or other passes. The EFLAGS operands must be dead in these new 7307 // instructions because the EFLAGS operands in the original instructions must 7308 // be dead in order for reassociation to occur. 7309 NewOp1.setIsDead(); 7310 NewOp2.setIsDead(); 7311} 7312 7313std::pair<unsigned, unsigned> 7314X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7315 return std::make_pair(TF, 0u); 7316} 7317 7318ArrayRef<std::pair<unsigned, const char *>> 7319X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7320 using namespace X86II; 7321 static const std::pair<unsigned, const char *> TargetFlags[] = { 7322 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, 7323 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, 7324 {MO_GOT, "x86-got"}, 7325 {MO_GOTOFF, "x86-gotoff"}, 7326 {MO_GOTPCREL, "x86-gotpcrel"}, 7327 {MO_PLT, "x86-plt"}, 7328 {MO_TLSGD, "x86-tlsgd"}, 7329 {MO_TLSLD, "x86-tlsld"}, 7330 {MO_TLSLDM, "x86-tlsldm"}, 7331 {MO_GOTTPOFF, "x86-gottpoff"}, 7332 {MO_INDNTPOFF, "x86-indntpoff"}, 7333 {MO_TPOFF, "x86-tpoff"}, 7334 {MO_DTPOFF, "x86-dtpoff"}, 7335 {MO_NTPOFF, "x86-ntpoff"}, 7336 {MO_GOTNTPOFF, "x86-gotntpoff"}, 7337 {MO_DLLIMPORT, "x86-dllimport"}, 7338 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, 7339 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, 7340 {MO_TLVP, "x86-tlvp"}, 7341 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, 7342 {MO_SECREL, "x86-secrel"}}; 7343 return makeArrayRef(TargetFlags); 7344} 7345 7346namespace { 7347 /// Create Global Base Reg pass. This initializes the PIC 7348 /// global base register for x86-32. 7349 struct CGBR : public MachineFunctionPass { 7350 static char ID; 7351 CGBR() : MachineFunctionPass(ID) {} 7352 7353 bool runOnMachineFunction(MachineFunction &MF) override { 7354 const X86TargetMachine *TM = 7355 static_cast<const X86TargetMachine *>(&MF.getTarget()); 7356 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 7357 7358 // Don't do anything in the 64-bit small and kernel code models. They use 7359 // RIP-relative addressing for everything. 7360 if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || 7361 TM->getCodeModel() == CodeModel::Kernel)) 7362 return false; 7363 7364 // Only emit a global base reg in PIC mode. 7365 if (!TM->isPositionIndependent()) 7366 return false; 7367 7368 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 7369 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 7370 7371 // If we didn't need a GlobalBaseReg, don't insert code. 7372 if (GlobalBaseReg == 0) 7373 return false; 7374 7375 // Insert the set of GlobalBaseReg into the first MBB of the function 7376 MachineBasicBlock &FirstMBB = MF.front(); 7377 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 7378 DebugLoc DL = FirstMBB.findDebugLoc(MBBI); 7379 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7380 const X86InstrInfo *TII = STI.getInstrInfo(); 7381 7382 unsigned PC; 7383 if (STI.isPICStyleGOT()) 7384 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); 7385 else 7386 PC = GlobalBaseReg; 7387 7388 if (STI.is64Bit()) { 7389 if (TM->getCodeModel() == CodeModel::Medium) { 7390 // In the medium code model, use a RIP-relative LEA to materialize the 7391 // GOT. 7392 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) 7393 .addReg(X86::RIP) 7394 .addImm(0) 7395 .addReg(0) 7396 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") 7397 .addReg(0); 7398 } else if (TM->getCodeModel() == CodeModel::Large) { 7399 // Loading the GOT in the large code model requires math with labels, 7400 // so we use a pseudo instruction and expand it during MC emission. 7401 unsigned Scratch = RegInfo.createVirtualRegister(&X86::GR64RegClass); 7402 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVGOT64r), PC) 7403 .addReg(Scratch, RegState::Undef | RegState::Define) 7404 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_"); 7405 } else { 7406 llvm_unreachable("unexpected code model"); 7407 } 7408 } else { 7409 // Operand of MovePCtoStack is completely ignored by asm printer. It's 7410 // only used in JIT code emission as displacement to pc. 7411 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); 7412 7413 // If we're using vanilla 'GOT' PIC style, we should use relative 7414 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. 7415 if (STI.isPICStyleGOT()) { 7416 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], 7417 // %some_register 7418 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) 7419 .addReg(PC) 7420 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 7421 X86II::MO_GOT_ABSOLUTE_ADDRESS); 7422 } 7423 } 7424 7425 return true; 7426 } 7427 7428 StringRef getPassName() const override { 7429 return "X86 PIC Global Base Reg Initialization"; 7430 } 7431 7432 void getAnalysisUsage(AnalysisUsage &AU) const override { 7433 AU.setPreservesCFG(); 7434 MachineFunctionPass::getAnalysisUsage(AU); 7435 } 7436 }; 7437} 7438 7439char CGBR::ID = 0; 7440FunctionPass* 7441llvm::createX86GlobalBaseRegPass() { return new CGBR(); } 7442 7443namespace { 7444 struct LDTLSCleanup : public MachineFunctionPass { 7445 static char ID; 7446 LDTLSCleanup() : MachineFunctionPass(ID) {} 7447 7448 bool runOnMachineFunction(MachineFunction &MF) override { 7449 if (skipFunction(MF.getFunction())) 7450 return false; 7451 7452 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); 7453 if (MFI->getNumLocalDynamicTLSAccesses() < 2) { 7454 // No point folding accesses if there isn't at least two. 7455 return false; 7456 } 7457 7458 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); 7459 return VisitNode(DT->getRootNode(), 0); 7460 } 7461 7462 // Visit the dominator subtree rooted at Node in pre-order. 7463 // If TLSBaseAddrReg is non-null, then use that to replace any 7464 // TLS_base_addr instructions. Otherwise, create the register 7465 // when the first such instruction is seen, and then use it 7466 // as we encounter more instructions. 7467 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { 7468 MachineBasicBlock *BB = Node->getBlock(); 7469 bool Changed = false; 7470 7471 // Traverse the current block. 7472 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; 7473 ++I) { 7474 switch (I->getOpcode()) { 7475 case X86::TLS_base_addr32: 7476 case X86::TLS_base_addr64: 7477 if (TLSBaseAddrReg) 7478 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg); 7479 else 7480 I = SetRegister(*I, &TLSBaseAddrReg); 7481 Changed = true; 7482 break; 7483 default: 7484 break; 7485 } 7486 } 7487 7488 // Visit the children of this block in the dominator tree. 7489 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); 7490 I != E; ++I) { 7491 Changed |= VisitNode(*I, TLSBaseAddrReg); 7492 } 7493 7494 return Changed; 7495 } 7496 7497 // Replace the TLS_base_addr instruction I with a copy from 7498 // TLSBaseAddrReg, returning the new instruction. 7499 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I, 7500 unsigned TLSBaseAddrReg) { 7501 MachineFunction *MF = I.getParent()->getParent(); 7502 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7503 const bool is64Bit = STI.is64Bit(); 7504 const X86InstrInfo *TII = STI.getInstrInfo(); 7505 7506 // Insert a Copy from TLSBaseAddrReg to RAX/EAX. 7507 MachineInstr *Copy = 7508 BuildMI(*I.getParent(), I, I.getDebugLoc(), 7509 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX) 7510 .addReg(TLSBaseAddrReg); 7511 7512 // Erase the TLS_base_addr instruction. 7513 I.eraseFromParent(); 7514 7515 return Copy; 7516 } 7517 7518 // Create a virtual register in *TLSBaseAddrReg, and populate it by 7519 // inserting a copy instruction after I. Returns the new instruction. 7520 MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { 7521 MachineFunction *MF = I.getParent()->getParent(); 7522 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7523 const bool is64Bit = STI.is64Bit(); 7524 const X86InstrInfo *TII = STI.getInstrInfo(); 7525 7526 // Create a virtual register for the TLS base address. 7527 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 7528 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit 7529 ? &X86::GR64RegClass 7530 : &X86::GR32RegClass); 7531 7532 // Insert a copy from RAX/EAX to TLSBaseAddrReg. 7533 MachineInstr *Next = I.getNextNode(); 7534 MachineInstr *Copy = 7535 BuildMI(*I.getParent(), Next, I.getDebugLoc(), 7536 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) 7537 .addReg(is64Bit ? X86::RAX : X86::EAX); 7538 7539 return Copy; 7540 } 7541 7542 StringRef getPassName() const override { 7543 return "Local Dynamic TLS Access Clean-up"; 7544 } 7545 7546 void getAnalysisUsage(AnalysisUsage &AU) const override { 7547 AU.setPreservesCFG(); 7548 AU.addRequired<MachineDominatorTree>(); 7549 MachineFunctionPass::getAnalysisUsage(AU); 7550 } 7551 }; 7552} 7553 7554char LDTLSCleanup::ID = 0; 7555FunctionPass* 7556llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } 7557 7558/// Constants defining how certain sequences should be outlined. 7559/// 7560/// \p MachineOutlinerDefault implies that the function is called with a call 7561/// instruction, and a return must be emitted for the outlined function frame. 7562/// 7563/// That is, 7564/// 7565/// I1 OUTLINED_FUNCTION: 7566/// I2 --> call OUTLINED_FUNCTION I1 7567/// I3 I2 7568/// I3 7569/// ret 7570/// 7571/// * Call construction overhead: 1 (call instruction) 7572/// * Frame construction overhead: 1 (return instruction) 7573/// 7574/// \p MachineOutlinerTailCall implies that the function is being tail called. 7575/// A jump is emitted instead of a call, and the return is already present in 7576/// the outlined sequence. That is, 7577/// 7578/// I1 OUTLINED_FUNCTION: 7579/// I2 --> jmp OUTLINED_FUNCTION I1 7580/// ret I2 7581/// ret 7582/// 7583/// * Call construction overhead: 1 (jump instruction) 7584/// * Frame construction overhead: 0 (don't need to return) 7585/// 7586enum MachineOutlinerClass { 7587 MachineOutlinerDefault, 7588 MachineOutlinerTailCall 7589}; 7590 7591outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo( 7592 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 7593 unsigned SequenceSize = 7594 std::accumulate(RepeatedSequenceLocs[0].front(), 7595 std::next(RepeatedSequenceLocs[0].back()), 0, 7596 [](unsigned Sum, const MachineInstr &MI) { 7597 // FIXME: x86 doesn't implement getInstSizeInBytes, so 7598 // we can't tell the cost. Just assume each instruction 7599 // is one byte. 7600 if (MI.isDebugInstr() || MI.isKill()) 7601 return Sum; 7602 return Sum + 1; 7603 }); 7604 7605 // FIXME: Use real size in bytes for call and ret instructions. 7606 if (RepeatedSequenceLocs[0].back()->isTerminator()) { 7607 for (outliner::Candidate &C : RepeatedSequenceLocs) 7608 C.setCallInfo(MachineOutlinerTailCall, 1); 7609 7610 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 7611 0, // Number of bytes to emit frame. 7612 MachineOutlinerTailCall // Type of frame. 7613 ); 7614 } 7615 7616 for (outliner::Candidate &C : RepeatedSequenceLocs) 7617 C.setCallInfo(MachineOutlinerDefault, 1); 7618 7619 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1, 7620 MachineOutlinerDefault); 7621} 7622 7623bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, 7624 bool OutlineFromLinkOnceODRs) const { 7625 const Function &F = MF.getFunction(); 7626 7627 // Does the function use a red zone? If it does, then we can't risk messing 7628 // with the stack. 7629 if (!F.hasFnAttribute(Attribute::NoRedZone)) { 7630 // It could have a red zone. If it does, then we don't want to touch it. 7631 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 7632 if (!X86FI || X86FI->getUsesRedZone()) 7633 return false; 7634 } 7635 7636 // If we *don't* want to outline from things that could potentially be deduped 7637 // then return false. 7638 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 7639 return false; 7640 7641 // This function is viable for outlining, so return true. 7642 return true; 7643} 7644 7645outliner::InstrType 7646X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { 7647 MachineInstr &MI = *MIT; 7648 // Don't allow debug values to impact outlining type. 7649 if (MI.isDebugInstr() || MI.isIndirectDebugValue()) 7650 return outliner::InstrType::Invisible; 7651 7652 // At this point, KILL instructions don't really tell us much so we can go 7653 // ahead and skip over them. 7654 if (MI.isKill()) 7655 return outliner::InstrType::Invisible; 7656 7657 // Is this a tail call? If yes, we can outline as a tail call. 7658 if (isTailCall(MI)) 7659 return outliner::InstrType::Legal; 7660 7661 // Is this the terminator of a basic block? 7662 if (MI.isTerminator() || MI.isReturn()) { 7663 7664 // Does its parent have any successors in its MachineFunction? 7665 if (MI.getParent()->succ_empty()) 7666 return outliner::InstrType::Legal; 7667 7668 // It does, so we can't tail call it. 7669 return outliner::InstrType::Illegal; 7670 } 7671 7672 // Don't outline anything that modifies or reads from the stack pointer. 7673 // 7674 // FIXME: There are instructions which are being manually built without 7675 // explicit uses/defs so we also have to check the MCInstrDesc. We should be 7676 // able to remove the extra checks once those are fixed up. For example, 7677 // sometimes we might get something like %rax = POP64r 1. This won't be 7678 // caught by modifiesRegister or readsRegister even though the instruction 7679 // really ought to be formed so that modifiesRegister/readsRegister would 7680 // catch it. 7681 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) || 7682 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) || 7683 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP)) 7684 return outliner::InstrType::Illegal; 7685 7686 // Outlined calls change the instruction pointer, so don't read from it. 7687 if (MI.readsRegister(X86::RIP, &RI) || 7688 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) || 7689 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP)) 7690 return outliner::InstrType::Illegal; 7691 7692 // Positions can't safely be outlined. 7693 if (MI.isPosition()) 7694 return outliner::InstrType::Illegal; 7695 7696 // Make sure none of the operands of this instruction do anything tricky. 7697 for (const MachineOperand &MOP : MI.operands()) 7698 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || 7699 MOP.isTargetIndex()) 7700 return outliner::InstrType::Illegal; 7701 7702 return outliner::InstrType::Legal; 7703} 7704 7705void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB, 7706 MachineFunction &MF, 7707 const outliner::OutlinedFunction &OF) 7708 const { 7709 // If we're a tail call, we already have a return, so don't do anything. 7710 if (OF.FrameConstructionID == MachineOutlinerTailCall) 7711 return; 7712 7713 // We're a normal call, so our sequence doesn't have a return instruction. 7714 // Add it in. 7715 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ)); 7716 MBB.insert(MBB.end(), retq); 7717} 7718 7719MachineBasicBlock::iterator 7720X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 7721 MachineBasicBlock::iterator &It, 7722 MachineFunction &MF, 7723 const outliner::Candidate &C) const { 7724 // Is it a tail call? 7725 if (C.CallConstructionID == MachineOutlinerTailCall) { 7726 // Yes, just insert a JMP. 7727 It = MBB.insert(It, 7728 BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64)) 7729 .addGlobalAddress(M.getNamedValue(MF.getName()))); 7730 } else { 7731 // No, insert a call. 7732 It = MBB.insert(It, 7733 BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32)) 7734 .addGlobalAddress(M.getNamedValue(MF.getName()))); 7735 } 7736 7737 return It; 7738} 7739