X86InstrInfo.cpp revision 353358
1//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file contains the X86 implementation of the TargetInstrInfo class. 10// 11//===----------------------------------------------------------------------===// 12 13#include "X86InstrInfo.h" 14#include "X86.h" 15#include "X86InstrBuilder.h" 16#include "X86InstrFoldTables.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/ADT/STLExtras.h" 21#include "llvm/ADT/Sequence.h" 22#include "llvm/CodeGen/LivePhysRegs.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineConstantPool.h" 25#include "llvm/CodeGen/MachineDominators.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineModuleInfo.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/StackMaps.h" 31#include "llvm/IR/DerivedTypes.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/LLVMContext.h" 34#include "llvm/MC/MCAsmInfo.h" 35#include "llvm/MC/MCExpr.h" 36#include "llvm/MC/MCInst.h" 37#include "llvm/Support/CommandLine.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Support/ErrorHandling.h" 40#include "llvm/Support/raw_ostream.h" 41#include "llvm/Target/TargetOptions.h" 42 43using namespace llvm; 44 45#define DEBUG_TYPE "x86-instr-info" 46 47#define GET_INSTRINFO_CTOR_DTOR 48#include "X86GenInstrInfo.inc" 49 50static cl::opt<bool> 51 NoFusing("disable-spill-fusing", 52 cl::desc("Disable fusing of spill code into instructions"), 53 cl::Hidden); 54static cl::opt<bool> 55PrintFailedFusing("print-failed-fuse-candidates", 56 cl::desc("Print instructions that the allocator wants to" 57 " fuse, but the X86 backend currently can't"), 58 cl::Hidden); 59static cl::opt<bool> 60ReMatPICStubLoad("remat-pic-stub-load", 61 cl::desc("Re-materialize load from stub in PIC mode"), 62 cl::init(false), cl::Hidden); 63static cl::opt<unsigned> 64PartialRegUpdateClearance("partial-reg-update-clearance", 65 cl::desc("Clearance between two register writes " 66 "for inserting XOR to avoid partial " 67 "register update"), 68 cl::init(64), cl::Hidden); 69static cl::opt<unsigned> 70UndefRegClearance("undef-reg-clearance", 71 cl::desc("How many idle instructions we would like before " 72 "certain undef register reads"), 73 cl::init(128), cl::Hidden); 74 75 76// Pin the vtable to this file. 77void X86InstrInfo::anchor() {} 78 79X86InstrInfo::X86InstrInfo(X86Subtarget &STI) 80 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 81 : X86::ADJCALLSTACKDOWN32), 82 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 83 : X86::ADJCALLSTACKUP32), 84 X86::CATCHRET, 85 (STI.is64Bit() ? X86::RETQ : X86::RETL)), 86 Subtarget(STI), RI(STI.getTargetTriple()) { 87} 88 89bool 90X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 91 unsigned &SrcReg, unsigned &DstReg, 92 unsigned &SubIdx) const { 93 switch (MI.getOpcode()) { 94 default: break; 95 case X86::MOVSX16rr8: 96 case X86::MOVZX16rr8: 97 case X86::MOVSX32rr8: 98 case X86::MOVZX32rr8: 99 case X86::MOVSX64rr8: 100 if (!Subtarget.is64Bit()) 101 // It's not always legal to reference the low 8-bit of the larger 102 // register in 32-bit mode. 103 return false; 104 LLVM_FALLTHROUGH; 105 case X86::MOVSX32rr16: 106 case X86::MOVZX32rr16: 107 case X86::MOVSX64rr16: 108 case X86::MOVSX64rr32: { 109 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 110 // Be conservative. 111 return false; 112 SrcReg = MI.getOperand(1).getReg(); 113 DstReg = MI.getOperand(0).getReg(); 114 switch (MI.getOpcode()) { 115 default: llvm_unreachable("Unreachable!"); 116 case X86::MOVSX16rr8: 117 case X86::MOVZX16rr8: 118 case X86::MOVSX32rr8: 119 case X86::MOVZX32rr8: 120 case X86::MOVSX64rr8: 121 SubIdx = X86::sub_8bit; 122 break; 123 case X86::MOVSX32rr16: 124 case X86::MOVZX32rr16: 125 case X86::MOVSX64rr16: 126 SubIdx = X86::sub_16bit; 127 break; 128 case X86::MOVSX64rr32: 129 SubIdx = X86::sub_32bit; 130 break; 131 } 132 return true; 133 } 134 } 135 return false; 136} 137 138int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { 139 const MachineFunction *MF = MI.getParent()->getParent(); 140 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 141 142 if (isFrameInstr(MI)) { 143 unsigned StackAlign = TFI->getStackAlignment(); 144 int SPAdj = alignTo(getFrameSize(MI), StackAlign); 145 SPAdj -= getFrameAdjustment(MI); 146 if (!isFrameSetup(MI)) 147 SPAdj = -SPAdj; 148 return SPAdj; 149 } 150 151 // To know whether a call adjusts the stack, we need information 152 // that is bound to the following ADJCALLSTACKUP pseudo. 153 // Look for the next ADJCALLSTACKUP that follows the call. 154 if (MI.isCall()) { 155 const MachineBasicBlock *MBB = MI.getParent(); 156 auto I = ++MachineBasicBlock::const_iterator(MI); 157 for (auto E = MBB->end(); I != E; ++I) { 158 if (I->getOpcode() == getCallFrameDestroyOpcode() || 159 I->isCall()) 160 break; 161 } 162 163 // If we could not find a frame destroy opcode, then it has already 164 // been simplified, so we don't care. 165 if (I->getOpcode() != getCallFrameDestroyOpcode()) 166 return 0; 167 168 return -(I->getOperand(1).getImm()); 169 } 170 171 // Currently handle only PUSHes we can reasonably expect to see 172 // in call sequences 173 switch (MI.getOpcode()) { 174 default: 175 return 0; 176 case X86::PUSH32i8: 177 case X86::PUSH32r: 178 case X86::PUSH32rmm: 179 case X86::PUSH32rmr: 180 case X86::PUSHi32: 181 return 4; 182 case X86::PUSH64i8: 183 case X86::PUSH64r: 184 case X86::PUSH64rmm: 185 case X86::PUSH64rmr: 186 case X86::PUSH64i32: 187 return 8; 188 } 189} 190 191/// Return true and the FrameIndex if the specified 192/// operand and follow operands form a reference to the stack frame. 193bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op, 194 int &FrameIndex) const { 195 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() && 196 MI.getOperand(Op + X86::AddrScaleAmt).isImm() && 197 MI.getOperand(Op + X86::AddrIndexReg).isReg() && 198 MI.getOperand(Op + X86::AddrDisp).isImm() && 199 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 && 200 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 && 201 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) { 202 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex(); 203 return true; 204 } 205 return false; 206} 207 208static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { 209 switch (Opcode) { 210 default: 211 return false; 212 case X86::MOV8rm: 213 case X86::KMOVBkm: 214 MemBytes = 1; 215 return true; 216 case X86::MOV16rm: 217 case X86::KMOVWkm: 218 MemBytes = 2; 219 return true; 220 case X86::MOV32rm: 221 case X86::MOVSSrm: 222 case X86::MOVSSrm_alt: 223 case X86::VMOVSSrm: 224 case X86::VMOVSSrm_alt: 225 case X86::VMOVSSZrm: 226 case X86::VMOVSSZrm_alt: 227 case X86::KMOVDkm: 228 MemBytes = 4; 229 return true; 230 case X86::MOV64rm: 231 case X86::LD_Fp64m: 232 case X86::MOVSDrm: 233 case X86::MOVSDrm_alt: 234 case X86::VMOVSDrm: 235 case X86::VMOVSDrm_alt: 236 case X86::VMOVSDZrm: 237 case X86::VMOVSDZrm_alt: 238 case X86::MMX_MOVD64rm: 239 case X86::MMX_MOVQ64rm: 240 case X86::KMOVQkm: 241 MemBytes = 8; 242 return true; 243 case X86::MOVAPSrm: 244 case X86::MOVUPSrm: 245 case X86::MOVAPDrm: 246 case X86::MOVUPDrm: 247 case X86::MOVDQArm: 248 case X86::MOVDQUrm: 249 case X86::VMOVAPSrm: 250 case X86::VMOVUPSrm: 251 case X86::VMOVAPDrm: 252 case X86::VMOVUPDrm: 253 case X86::VMOVDQArm: 254 case X86::VMOVDQUrm: 255 case X86::VMOVAPSZ128rm: 256 case X86::VMOVUPSZ128rm: 257 case X86::VMOVAPSZ128rm_NOVLX: 258 case X86::VMOVUPSZ128rm_NOVLX: 259 case X86::VMOVAPDZ128rm: 260 case X86::VMOVUPDZ128rm: 261 case X86::VMOVDQU8Z128rm: 262 case X86::VMOVDQU16Z128rm: 263 case X86::VMOVDQA32Z128rm: 264 case X86::VMOVDQU32Z128rm: 265 case X86::VMOVDQA64Z128rm: 266 case X86::VMOVDQU64Z128rm: 267 MemBytes = 16; 268 return true; 269 case X86::VMOVAPSYrm: 270 case X86::VMOVUPSYrm: 271 case X86::VMOVAPDYrm: 272 case X86::VMOVUPDYrm: 273 case X86::VMOVDQAYrm: 274 case X86::VMOVDQUYrm: 275 case X86::VMOVAPSZ256rm: 276 case X86::VMOVUPSZ256rm: 277 case X86::VMOVAPSZ256rm_NOVLX: 278 case X86::VMOVUPSZ256rm_NOVLX: 279 case X86::VMOVAPDZ256rm: 280 case X86::VMOVUPDZ256rm: 281 case X86::VMOVDQU8Z256rm: 282 case X86::VMOVDQU16Z256rm: 283 case X86::VMOVDQA32Z256rm: 284 case X86::VMOVDQU32Z256rm: 285 case X86::VMOVDQA64Z256rm: 286 case X86::VMOVDQU64Z256rm: 287 MemBytes = 32; 288 return true; 289 case X86::VMOVAPSZrm: 290 case X86::VMOVUPSZrm: 291 case X86::VMOVAPDZrm: 292 case X86::VMOVUPDZrm: 293 case X86::VMOVDQU8Zrm: 294 case X86::VMOVDQU16Zrm: 295 case X86::VMOVDQA32Zrm: 296 case X86::VMOVDQU32Zrm: 297 case X86::VMOVDQA64Zrm: 298 case X86::VMOVDQU64Zrm: 299 MemBytes = 64; 300 return true; 301 } 302} 303 304static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { 305 switch (Opcode) { 306 default: 307 return false; 308 case X86::MOV8mr: 309 case X86::KMOVBmk: 310 MemBytes = 1; 311 return true; 312 case X86::MOV16mr: 313 case X86::KMOVWmk: 314 MemBytes = 2; 315 return true; 316 case X86::MOV32mr: 317 case X86::MOVSSmr: 318 case X86::VMOVSSmr: 319 case X86::VMOVSSZmr: 320 case X86::KMOVDmk: 321 MemBytes = 4; 322 return true; 323 case X86::MOV64mr: 324 case X86::ST_FpP64m: 325 case X86::MOVSDmr: 326 case X86::VMOVSDmr: 327 case X86::VMOVSDZmr: 328 case X86::MMX_MOVD64mr: 329 case X86::MMX_MOVQ64mr: 330 case X86::MMX_MOVNTQmr: 331 case X86::KMOVQmk: 332 MemBytes = 8; 333 return true; 334 case X86::MOVAPSmr: 335 case X86::MOVUPSmr: 336 case X86::MOVAPDmr: 337 case X86::MOVUPDmr: 338 case X86::MOVDQAmr: 339 case X86::MOVDQUmr: 340 case X86::VMOVAPSmr: 341 case X86::VMOVUPSmr: 342 case X86::VMOVAPDmr: 343 case X86::VMOVUPDmr: 344 case X86::VMOVDQAmr: 345 case X86::VMOVDQUmr: 346 case X86::VMOVUPSZ128mr: 347 case X86::VMOVAPSZ128mr: 348 case X86::VMOVUPSZ128mr_NOVLX: 349 case X86::VMOVAPSZ128mr_NOVLX: 350 case X86::VMOVUPDZ128mr: 351 case X86::VMOVAPDZ128mr: 352 case X86::VMOVDQA32Z128mr: 353 case X86::VMOVDQU32Z128mr: 354 case X86::VMOVDQA64Z128mr: 355 case X86::VMOVDQU64Z128mr: 356 case X86::VMOVDQU8Z128mr: 357 case X86::VMOVDQU16Z128mr: 358 MemBytes = 16; 359 return true; 360 case X86::VMOVUPSYmr: 361 case X86::VMOVAPSYmr: 362 case X86::VMOVUPDYmr: 363 case X86::VMOVAPDYmr: 364 case X86::VMOVDQUYmr: 365 case X86::VMOVDQAYmr: 366 case X86::VMOVUPSZ256mr: 367 case X86::VMOVAPSZ256mr: 368 case X86::VMOVUPSZ256mr_NOVLX: 369 case X86::VMOVAPSZ256mr_NOVLX: 370 case X86::VMOVUPDZ256mr: 371 case X86::VMOVAPDZ256mr: 372 case X86::VMOVDQU8Z256mr: 373 case X86::VMOVDQU16Z256mr: 374 case X86::VMOVDQA32Z256mr: 375 case X86::VMOVDQU32Z256mr: 376 case X86::VMOVDQA64Z256mr: 377 case X86::VMOVDQU64Z256mr: 378 MemBytes = 32; 379 return true; 380 case X86::VMOVUPSZmr: 381 case X86::VMOVAPSZmr: 382 case X86::VMOVUPDZmr: 383 case X86::VMOVAPDZmr: 384 case X86::VMOVDQU8Zmr: 385 case X86::VMOVDQU16Zmr: 386 case X86::VMOVDQA32Zmr: 387 case X86::VMOVDQU32Zmr: 388 case X86::VMOVDQA64Zmr: 389 case X86::VMOVDQU64Zmr: 390 MemBytes = 64; 391 return true; 392 } 393 return false; 394} 395 396unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 397 int &FrameIndex) const { 398 unsigned Dummy; 399 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy); 400} 401 402unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 403 int &FrameIndex, 404 unsigned &MemBytes) const { 405 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes)) 406 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) 407 return MI.getOperand(0).getReg(); 408 return 0; 409} 410 411unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 412 int &FrameIndex) const { 413 unsigned Dummy; 414 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) { 415 unsigned Reg; 416 if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) 417 return Reg; 418 // Check for post-frame index elimination operations 419 SmallVector<const MachineMemOperand *, 1> Accesses; 420 if (hasLoadFromStackSlot(MI, Accesses)) { 421 FrameIndex = 422 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 423 ->getFrameIndex(); 424 return 1; 425 } 426 } 427 return 0; 428} 429 430unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, 431 int &FrameIndex) const { 432 unsigned Dummy; 433 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy); 434} 435 436unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, 437 int &FrameIndex, 438 unsigned &MemBytes) const { 439 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes)) 440 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 && 441 isFrameOperand(MI, 0, FrameIndex)) 442 return MI.getOperand(X86::AddrNumOperands).getReg(); 443 return 0; 444} 445 446unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 447 int &FrameIndex) const { 448 unsigned Dummy; 449 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) { 450 unsigned Reg; 451 if ((Reg = isStoreToStackSlot(MI, FrameIndex))) 452 return Reg; 453 // Check for post-frame index elimination operations 454 SmallVector<const MachineMemOperand *, 1> Accesses; 455 if (hasStoreToStackSlot(MI, Accesses)) { 456 FrameIndex = 457 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 458 ->getFrameIndex(); 459 return 1; 460 } 461 } 462 return 0; 463} 464 465/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. 466static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 467 // Don't waste compile time scanning use-def chains of physregs. 468 if (!TargetRegisterInfo::isVirtualRegister(BaseReg)) 469 return false; 470 bool isPICBase = false; 471 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), 472 E = MRI.def_instr_end(); I != E; ++I) { 473 MachineInstr *DefMI = &*I; 474 if (DefMI->getOpcode() != X86::MOVPC32r) 475 return false; 476 assert(!isPICBase && "More than one PIC base?"); 477 isPICBase = true; 478 } 479 return isPICBase; 480} 481 482bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 483 AliasAnalysis *AA) const { 484 switch (MI.getOpcode()) { 485 default: break; 486 case X86::MOV8rm: 487 case X86::MOV8rm_NOREX: 488 case X86::MOV16rm: 489 case X86::MOV32rm: 490 case X86::MOV64rm: 491 case X86::MOVSSrm: 492 case X86::MOVSSrm_alt: 493 case X86::MOVSDrm: 494 case X86::MOVSDrm_alt: 495 case X86::MOVAPSrm: 496 case X86::MOVUPSrm: 497 case X86::MOVAPDrm: 498 case X86::MOVUPDrm: 499 case X86::MOVDQArm: 500 case X86::MOVDQUrm: 501 case X86::VMOVSSrm: 502 case X86::VMOVSSrm_alt: 503 case X86::VMOVSDrm: 504 case X86::VMOVSDrm_alt: 505 case X86::VMOVAPSrm: 506 case X86::VMOVUPSrm: 507 case X86::VMOVAPDrm: 508 case X86::VMOVUPDrm: 509 case X86::VMOVDQArm: 510 case X86::VMOVDQUrm: 511 case X86::VMOVAPSYrm: 512 case X86::VMOVUPSYrm: 513 case X86::VMOVAPDYrm: 514 case X86::VMOVUPDYrm: 515 case X86::VMOVDQAYrm: 516 case X86::VMOVDQUYrm: 517 case X86::MMX_MOVD64rm: 518 case X86::MMX_MOVQ64rm: 519 // AVX-512 520 case X86::VMOVSSZrm: 521 case X86::VMOVSSZrm_alt: 522 case X86::VMOVSDZrm: 523 case X86::VMOVSDZrm_alt: 524 case X86::VMOVAPDZ128rm: 525 case X86::VMOVAPDZ256rm: 526 case X86::VMOVAPDZrm: 527 case X86::VMOVAPSZ128rm: 528 case X86::VMOVAPSZ256rm: 529 case X86::VMOVAPSZ128rm_NOVLX: 530 case X86::VMOVAPSZ256rm_NOVLX: 531 case X86::VMOVAPSZrm: 532 case X86::VMOVDQA32Z128rm: 533 case X86::VMOVDQA32Z256rm: 534 case X86::VMOVDQA32Zrm: 535 case X86::VMOVDQA64Z128rm: 536 case X86::VMOVDQA64Z256rm: 537 case X86::VMOVDQA64Zrm: 538 case X86::VMOVDQU16Z128rm: 539 case X86::VMOVDQU16Z256rm: 540 case X86::VMOVDQU16Zrm: 541 case X86::VMOVDQU32Z128rm: 542 case X86::VMOVDQU32Z256rm: 543 case X86::VMOVDQU32Zrm: 544 case X86::VMOVDQU64Z128rm: 545 case X86::VMOVDQU64Z256rm: 546 case X86::VMOVDQU64Zrm: 547 case X86::VMOVDQU8Z128rm: 548 case X86::VMOVDQU8Z256rm: 549 case X86::VMOVDQU8Zrm: 550 case X86::VMOVUPDZ128rm: 551 case X86::VMOVUPDZ256rm: 552 case X86::VMOVUPDZrm: 553 case X86::VMOVUPSZ128rm: 554 case X86::VMOVUPSZ256rm: 555 case X86::VMOVUPSZ128rm_NOVLX: 556 case X86::VMOVUPSZ256rm_NOVLX: 557 case X86::VMOVUPSZrm: { 558 // Loads from constant pools are trivially rematerializable. 559 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() && 560 MI.getOperand(1 + X86::AddrScaleAmt).isImm() && 561 MI.getOperand(1 + X86::AddrIndexReg).isReg() && 562 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && 563 MI.isDereferenceableInvariantLoad(AA)) { 564 unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); 565 if (BaseReg == 0 || BaseReg == X86::RIP) 566 return true; 567 // Allow re-materialization of PIC load. 568 if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal()) 569 return false; 570 const MachineFunction &MF = *MI.getParent()->getParent(); 571 const MachineRegisterInfo &MRI = MF.getRegInfo(); 572 return regIsPICBase(BaseReg, MRI); 573 } 574 return false; 575 } 576 577 case X86::LEA32r: 578 case X86::LEA64r: { 579 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() && 580 MI.getOperand(1 + X86::AddrIndexReg).isReg() && 581 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && 582 !MI.getOperand(1 + X86::AddrDisp).isReg()) { 583 // lea fi#, lea GV, etc. are all rematerializable. 584 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) 585 return true; 586 unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); 587 if (BaseReg == 0) 588 return true; 589 // Allow re-materialization of lea PICBase + x. 590 const MachineFunction &MF = *MI.getParent()->getParent(); 591 const MachineRegisterInfo &MRI = MF.getRegInfo(); 592 return regIsPICBase(BaseReg, MRI); 593 } 594 return false; 595 } 596 } 597 598 // All other instructions marked M_REMATERIALIZABLE are always trivially 599 // rematerializable. 600 return true; 601} 602 603void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 604 MachineBasicBlock::iterator I, 605 unsigned DestReg, unsigned SubIdx, 606 const MachineInstr &Orig, 607 const TargetRegisterInfo &TRI) const { 608 bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI); 609 if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) { 610 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side 611 // effects. 612 int Value; 613 switch (Orig.getOpcode()) { 614 case X86::MOV32r0: Value = 0; break; 615 case X86::MOV32r1: Value = 1; break; 616 case X86::MOV32r_1: Value = -1; break; 617 default: 618 llvm_unreachable("Unexpected instruction!"); 619 } 620 621 const DebugLoc &DL = Orig.getDebugLoc(); 622 BuildMI(MBB, I, DL, get(X86::MOV32ri)) 623 .add(Orig.getOperand(0)) 624 .addImm(Value); 625 } else { 626 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 627 MBB.insert(I, MI); 628 } 629 630 MachineInstr &NewMI = *std::prev(I); 631 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 632} 633 634/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. 635bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { 636 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 637 MachineOperand &MO = MI.getOperand(i); 638 if (MO.isReg() && MO.isDef() && 639 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 640 return true; 641 } 642 } 643 return false; 644} 645 646/// Check whether the shift count for a machine operand is non-zero. 647inline static unsigned getTruncatedShiftCount(const MachineInstr &MI, 648 unsigned ShiftAmtOperandIdx) { 649 // The shift count is six bits with the REX.W prefix and five bits without. 650 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31; 651 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm(); 652 return Imm & ShiftCountMask; 653} 654 655/// Check whether the given shift count is appropriate 656/// can be represented by a LEA instruction. 657inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { 658 // Left shift instructions can be transformed into load-effective-address 659 // instructions if we can encode them appropriately. 660 // A LEA instruction utilizes a SIB byte to encode its scale factor. 661 // The SIB.scale field is two bits wide which means that we can encode any 662 // shift amount less than 4. 663 return ShAmt < 4 && ShAmt > 0; 664} 665 666bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, 667 unsigned Opc, bool AllowSP, unsigned &NewSrc, 668 bool &isKill, MachineOperand &ImplicitOp, 669 LiveVariables *LV) const { 670 MachineFunction &MF = *MI.getParent()->getParent(); 671 const TargetRegisterClass *RC; 672 if (AllowSP) { 673 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; 674 } else { 675 RC = Opc != X86::LEA32r ? 676 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; 677 } 678 unsigned SrcReg = Src.getReg(); 679 680 // For both LEA64 and LEA32 the register already has essentially the right 681 // type (32-bit or 64-bit) we may just need to forbid SP. 682 if (Opc != X86::LEA64_32r) { 683 NewSrc = SrcReg; 684 isKill = Src.isKill(); 685 assert(!Src.isUndef() && "Undef op doesn't need optimization"); 686 687 if (TargetRegisterInfo::isVirtualRegister(NewSrc) && 688 !MF.getRegInfo().constrainRegClass(NewSrc, RC)) 689 return false; 690 691 return true; 692 } 693 694 // This is for an LEA64_32r and incoming registers are 32-bit. One way or 695 // another we need to add 64-bit registers to the final MI. 696 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { 697 ImplicitOp = Src; 698 ImplicitOp.setImplicit(); 699 700 NewSrc = getX86SubSuperRegister(Src.getReg(), 64); 701 isKill = Src.isKill(); 702 assert(!Src.isUndef() && "Undef op doesn't need optimization"); 703 } else { 704 // Virtual register of the wrong class, we have to create a temporary 64-bit 705 // vreg to feed into the LEA. 706 NewSrc = MF.getRegInfo().createVirtualRegister(RC); 707 MachineInstr *Copy = 708 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 709 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) 710 .add(Src); 711 712 // Which is obviously going to be dead after we're done with it. 713 isKill = true; 714 715 if (LV) 716 LV->replaceKillInstruction(SrcReg, MI, *Copy); 717 } 718 719 // We've set all the parameters without issue. 720 return true; 721} 722 723MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( 724 unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI, 725 LiveVariables *LV, bool Is8BitOp) const { 726 // We handle 8-bit adds and various 16-bit opcodes in the switch below. 727 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 728 assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( 729 *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && 730 "Unexpected type for LEA transform"); 731 732 // TODO: For a 32-bit target, we need to adjust the LEA variables with 733 // something like this: 734 // Opcode = X86::LEA32r; 735 // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 736 // OutRegLEA = 737 // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass) 738 // : RegInfo.createVirtualRegister(&X86::GR32RegClass); 739 if (!Subtarget.is64Bit()) 740 return nullptr; 741 742 unsigned Opcode = X86::LEA64_32r; 743 unsigned InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 744 unsigned OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); 745 746 // Build and insert into an implicit UNDEF value. This is OK because 747 // we will be shifting and then extracting the lower 8/16-bits. 748 // This has the potential to cause partial register stall. e.g. 749 // movw (%rbp,%rcx,2), %dx 750 // leal -65(%rdx), %esi 751 // But testing has shown this *does* help performance in 64-bit mode (at 752 // least on modern x86 machines). 753 MachineBasicBlock::iterator MBBI = MI.getIterator(); 754 unsigned Dest = MI.getOperand(0).getReg(); 755 unsigned Src = MI.getOperand(1).getReg(); 756 bool IsDead = MI.getOperand(0).isDead(); 757 bool IsKill = MI.getOperand(1).isKill(); 758 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; 759 assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization"); 760 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA); 761 MachineInstr *InsMI = 762 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 763 .addReg(InRegLEA, RegState::Define, SubReg) 764 .addReg(Src, getKillRegState(IsKill)); 765 766 MachineInstrBuilder MIB = 767 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA); 768 switch (MIOpc) { 769 default: llvm_unreachable("Unreachable!"); 770 case X86::SHL8ri: 771 case X86::SHL16ri: { 772 unsigned ShAmt = MI.getOperand(2).getImm(); 773 MIB.addReg(0).addImm(1ULL << ShAmt) 774 .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0); 775 break; 776 } 777 case X86::INC8r: 778 case X86::INC16r: 779 addRegOffset(MIB, InRegLEA, true, 1); 780 break; 781 case X86::DEC8r: 782 case X86::DEC16r: 783 addRegOffset(MIB, InRegLEA, true, -1); 784 break; 785 case X86::ADD8ri: 786 case X86::ADD8ri_DB: 787 case X86::ADD16ri: 788 case X86::ADD16ri8: 789 case X86::ADD16ri_DB: 790 case X86::ADD16ri8_DB: 791 addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm()); 792 break; 793 case X86::ADD8rr: 794 case X86::ADD8rr_DB: 795 case X86::ADD16rr: 796 case X86::ADD16rr_DB: { 797 unsigned Src2 = MI.getOperand(2).getReg(); 798 bool IsKill2 = MI.getOperand(2).isKill(); 799 assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); 800 unsigned InRegLEA2 = 0; 801 MachineInstr *InsMI2 = nullptr; 802 if (Src == Src2) { 803 // ADD8rr/ADD16rr killed %reg1028, %reg1028 804 // just a single insert_subreg. 805 addRegReg(MIB, InRegLEA, true, InRegLEA, false); 806 } else { 807 if (Subtarget.is64Bit()) 808 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 809 else 810 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 811 // Build and insert into an implicit UNDEF value. This is OK because 812 // we will be shifting and then extracting the lower 8/16-bits. 813 BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2); 814 InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) 815 .addReg(InRegLEA2, RegState::Define, SubReg) 816 .addReg(Src2, getKillRegState(IsKill2)); 817 addRegReg(MIB, InRegLEA, true, InRegLEA2, true); 818 } 819 if (LV && IsKill2 && InsMI2) 820 LV->replaceKillInstruction(Src2, MI, *InsMI2); 821 break; 822 } 823 } 824 825 MachineInstr *NewMI = MIB; 826 MachineInstr *ExtMI = 827 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) 828 .addReg(Dest, RegState::Define | getDeadRegState(IsDead)) 829 .addReg(OutRegLEA, RegState::Kill, SubReg); 830 831 if (LV) { 832 // Update live variables. 833 LV->getVarInfo(InRegLEA).Kills.push_back(NewMI); 834 LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI); 835 if (IsKill) 836 LV->replaceKillInstruction(Src, MI, *InsMI); 837 if (IsDead) 838 LV->replaceKillInstruction(Dest, MI, *ExtMI); 839 } 840 841 return ExtMI; 842} 843 844/// This method must be implemented by targets that 845/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 846/// may be able to convert a two-address instruction into a true 847/// three-address instruction on demand. This allows the X86 target (for 848/// example) to convert ADD and SHL instructions into LEA instructions if they 849/// would require register copies due to two-addressness. 850/// 851/// This method returns a null pointer if the transformation cannot be 852/// performed, otherwise it returns the new instruction. 853/// 854MachineInstr * 855X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 856 MachineInstr &MI, LiveVariables *LV) const { 857 // The following opcodes also sets the condition code register(s). Only 858 // convert them to equivalent lea if the condition code register def's 859 // are dead! 860 if (hasLiveCondCodeDef(MI)) 861 return nullptr; 862 863 MachineFunction &MF = *MI.getParent()->getParent(); 864 // All instructions input are two-addr instructions. Get the known operands. 865 const MachineOperand &Dest = MI.getOperand(0); 866 const MachineOperand &Src = MI.getOperand(1); 867 868 // Ideally, operations with undef should be folded before we get here, but we 869 // can't guarantee it. Bail out because optimizing undefs is a waste of time. 870 // Without this, we have to forward undef state to new register operands to 871 // avoid machine verifier errors. 872 if (Src.isUndef()) 873 return nullptr; 874 if (MI.getNumOperands() > 2) 875 if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef()) 876 return nullptr; 877 878 MachineInstr *NewMI = nullptr; 879 bool Is64Bit = Subtarget.is64Bit(); 880 881 bool Is8BitOp = false; 882 unsigned MIOpc = MI.getOpcode(); 883 switch (MIOpc) { 884 default: llvm_unreachable("Unreachable!"); 885 case X86::SHL64ri: { 886 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 887 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 888 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 889 890 // LEA can't handle RSP. 891 if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && 892 !MF.getRegInfo().constrainRegClass(Src.getReg(), 893 &X86::GR64_NOSPRegClass)) 894 return nullptr; 895 896 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) 897 .add(Dest) 898 .addReg(0) 899 .addImm(1ULL << ShAmt) 900 .add(Src) 901 .addImm(0) 902 .addReg(0); 903 break; 904 } 905 case X86::SHL32ri: { 906 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 907 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 908 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 909 910 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; 911 912 // LEA can't handle ESP. 913 bool isKill; 914 unsigned SrcReg; 915 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 916 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 917 SrcReg, isKill, ImplicitOp, LV)) 918 return nullptr; 919 920 MachineInstrBuilder MIB = 921 BuildMI(MF, MI.getDebugLoc(), get(Opc)) 922 .add(Dest) 923 .addReg(0) 924 .addImm(1ULL << ShAmt) 925 .addReg(SrcReg, getKillRegState(isKill)) 926 .addImm(0) 927 .addReg(0); 928 if (ImplicitOp.getReg() != 0) 929 MIB.add(ImplicitOp); 930 NewMI = MIB; 931 932 break; 933 } 934 case X86::SHL8ri: 935 Is8BitOp = true; 936 LLVM_FALLTHROUGH; 937 case X86::SHL16ri: { 938 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); 939 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 940 if (!isTruncatedShiftCountForLEA(ShAmt)) 941 return nullptr; 942 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); 943 } 944 case X86::INC64r: 945 case X86::INC32r: { 946 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); 947 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : 948 (Is64Bit ? X86::LEA64_32r : X86::LEA32r); 949 bool isKill; 950 unsigned SrcReg; 951 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 952 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, 953 ImplicitOp, LV)) 954 return nullptr; 955 956 MachineInstrBuilder MIB = 957 BuildMI(MF, MI.getDebugLoc(), get(Opc)) 958 .add(Dest) 959 .addReg(SrcReg, getKillRegState(isKill)); 960 if (ImplicitOp.getReg() != 0) 961 MIB.add(ImplicitOp); 962 963 NewMI = addOffset(MIB, 1); 964 break; 965 } 966 case X86::DEC64r: 967 case X86::DEC32r: { 968 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); 969 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 970 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r); 971 972 bool isKill; 973 unsigned SrcReg; 974 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 975 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, 976 ImplicitOp, LV)) 977 return nullptr; 978 979 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 980 .add(Dest) 981 .addReg(SrcReg, getKillRegState(isKill)); 982 if (ImplicitOp.getReg() != 0) 983 MIB.add(ImplicitOp); 984 985 NewMI = addOffset(MIB, -1); 986 987 break; 988 } 989 case X86::DEC8r: 990 case X86::INC8r: 991 Is8BitOp = true; 992 LLVM_FALLTHROUGH; 993 case X86::DEC16r: 994 case X86::INC16r: 995 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); 996 case X86::ADD64rr: 997 case X86::ADD64rr_DB: 998 case X86::ADD32rr: 999 case X86::ADD32rr_DB: { 1000 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1001 unsigned Opc; 1002 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) 1003 Opc = X86::LEA64r; 1004 else 1005 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; 1006 1007 bool isKill; 1008 unsigned SrcReg; 1009 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1010 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 1011 SrcReg, isKill, ImplicitOp, LV)) 1012 return nullptr; 1013 1014 const MachineOperand &Src2 = MI.getOperand(2); 1015 bool isKill2; 1016 unsigned SrcReg2; 1017 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); 1018 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, 1019 SrcReg2, isKill2, ImplicitOp2, LV)) 1020 return nullptr; 1021 1022 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); 1023 if (ImplicitOp.getReg() != 0) 1024 MIB.add(ImplicitOp); 1025 if (ImplicitOp2.getReg() != 0) 1026 MIB.add(ImplicitOp2); 1027 1028 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); 1029 if (LV && Src2.isKill()) 1030 LV->replaceKillInstruction(SrcReg2, MI, *NewMI); 1031 break; 1032 } 1033 case X86::ADD8rr: 1034 case X86::ADD8rr_DB: 1035 Is8BitOp = true; 1036 LLVM_FALLTHROUGH; 1037 case X86::ADD16rr: 1038 case X86::ADD16rr_DB: 1039 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); 1040 case X86::ADD64ri32: 1041 case X86::ADD64ri8: 1042 case X86::ADD64ri32_DB: 1043 case X86::ADD64ri8_DB: 1044 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1045 NewMI = addOffset( 1046 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), 1047 MI.getOperand(2)); 1048 break; 1049 case X86::ADD32ri: 1050 case X86::ADD32ri8: 1051 case X86::ADD32ri_DB: 1052 case X86::ADD32ri8_DB: { 1053 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1054 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; 1055 1056 bool isKill; 1057 unsigned SrcReg; 1058 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1059 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 1060 SrcReg, isKill, ImplicitOp, LV)) 1061 return nullptr; 1062 1063 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1064 .add(Dest) 1065 .addReg(SrcReg, getKillRegState(isKill)); 1066 if (ImplicitOp.getReg() != 0) 1067 MIB.add(ImplicitOp); 1068 1069 NewMI = addOffset(MIB, MI.getOperand(2)); 1070 break; 1071 } 1072 case X86::ADD8ri: 1073 case X86::ADD8ri_DB: 1074 Is8BitOp = true; 1075 LLVM_FALLTHROUGH; 1076 case X86::ADD16ri: 1077 case X86::ADD16ri8: 1078 case X86::ADD16ri_DB: 1079 case X86::ADD16ri8_DB: 1080 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); 1081 case X86::SUB8ri: 1082 case X86::SUB16ri8: 1083 case X86::SUB16ri: 1084 /// FIXME: Support these similar to ADD8ri/ADD16ri*. 1085 return nullptr; 1086 case X86::SUB32ri8: 1087 case X86::SUB32ri: { 1088 int64_t Imm = MI.getOperand(2).getImm(); 1089 if (!isInt<32>(-Imm)) 1090 return nullptr; 1091 1092 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); 1093 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; 1094 1095 bool isKill; 1096 unsigned SrcReg; 1097 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 1098 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 1099 SrcReg, isKill, ImplicitOp, LV)) 1100 return nullptr; 1101 1102 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1103 .add(Dest) 1104 .addReg(SrcReg, getKillRegState(isKill)); 1105 if (ImplicitOp.getReg() != 0) 1106 MIB.add(ImplicitOp); 1107 1108 NewMI = addOffset(MIB, -Imm); 1109 break; 1110 } 1111 1112 case X86::SUB64ri8: 1113 case X86::SUB64ri32: { 1114 int64_t Imm = MI.getOperand(2).getImm(); 1115 if (!isInt<32>(-Imm)) 1116 return nullptr; 1117 1118 assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!"); 1119 1120 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), 1121 get(X86::LEA64r)).add(Dest).add(Src); 1122 NewMI = addOffset(MIB, -Imm); 1123 break; 1124 } 1125 1126 case X86::VMOVDQU8Z128rmk: 1127 case X86::VMOVDQU8Z256rmk: 1128 case X86::VMOVDQU8Zrmk: 1129 case X86::VMOVDQU16Z128rmk: 1130 case X86::VMOVDQU16Z256rmk: 1131 case X86::VMOVDQU16Zrmk: 1132 case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk: 1133 case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk: 1134 case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk: 1135 case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk: 1136 case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk: 1137 case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk: 1138 case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk: 1139 case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk: 1140 case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk: 1141 case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk: 1142 case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk: 1143 case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: { 1144 unsigned Opc; 1145 switch (MIOpc) { 1146 default: llvm_unreachable("Unreachable!"); 1147 case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break; 1148 case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break; 1149 case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break; 1150 case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break; 1151 case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break; 1152 case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break; 1153 case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; 1154 case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; 1155 case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break; 1156 case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; 1157 case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; 1158 case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break; 1159 case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; 1160 case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; 1161 case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break; 1162 case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; 1163 case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; 1164 case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break; 1165 case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; 1166 case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; 1167 case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break; 1168 case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; 1169 case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; 1170 case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break; 1171 case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; 1172 case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; 1173 case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break; 1174 case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; 1175 case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; 1176 case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break; 1177 } 1178 1179 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1180 .add(Dest) 1181 .add(MI.getOperand(2)) 1182 .add(Src) 1183 .add(MI.getOperand(3)) 1184 .add(MI.getOperand(4)) 1185 .add(MI.getOperand(5)) 1186 .add(MI.getOperand(6)) 1187 .add(MI.getOperand(7)); 1188 break; 1189 } 1190 case X86::VMOVDQU8Z128rrk: 1191 case X86::VMOVDQU8Z256rrk: 1192 case X86::VMOVDQU8Zrrk: 1193 case X86::VMOVDQU16Z128rrk: 1194 case X86::VMOVDQU16Z256rrk: 1195 case X86::VMOVDQU16Zrrk: 1196 case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk: 1197 case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk: 1198 case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk: 1199 case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk: 1200 case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk: 1201 case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk: 1202 case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk: 1203 case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk: 1204 case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk: 1205 case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk: 1206 case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk: 1207 case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: { 1208 unsigned Opc; 1209 switch (MIOpc) { 1210 default: llvm_unreachable("Unreachable!"); 1211 case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break; 1212 case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break; 1213 case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break; 1214 case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break; 1215 case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break; 1216 case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break; 1217 case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; 1218 case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; 1219 case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break; 1220 case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; 1221 case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; 1222 case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break; 1223 case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; 1224 case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; 1225 case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break; 1226 case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; 1227 case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; 1228 case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break; 1229 case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; 1230 case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; 1231 case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break; 1232 case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; 1233 case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; 1234 case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break; 1235 case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; 1236 case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; 1237 case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break; 1238 case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; 1239 case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; 1240 case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break; 1241 } 1242 1243 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) 1244 .add(Dest) 1245 .add(MI.getOperand(2)) 1246 .add(Src) 1247 .add(MI.getOperand(3)); 1248 break; 1249 } 1250 } 1251 1252 if (!NewMI) return nullptr; 1253 1254 if (LV) { // Update live variables 1255 if (Src.isKill()) 1256 LV->replaceKillInstruction(Src.getReg(), MI, *NewMI); 1257 if (Dest.isDead()) 1258 LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI); 1259 } 1260 1261 MFI->insert(MI.getIterator(), NewMI); // Insert the new inst 1262 return NewMI; 1263} 1264 1265/// This determines which of three possible cases of a three source commute 1266/// the source indexes correspond to taking into account any mask operands. 1267/// All prevents commuting a passthru operand. Returns -1 if the commute isn't 1268/// possible. 1269/// Case 0 - Possible to commute the first and second operands. 1270/// Case 1 - Possible to commute the first and third operands. 1271/// Case 2 - Possible to commute the second and third operands. 1272static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, 1273 unsigned SrcOpIdx2) { 1274 // Put the lowest index to SrcOpIdx1 to simplify the checks below. 1275 if (SrcOpIdx1 > SrcOpIdx2) 1276 std::swap(SrcOpIdx1, SrcOpIdx2); 1277 1278 unsigned Op1 = 1, Op2 = 2, Op3 = 3; 1279 if (X86II::isKMasked(TSFlags)) { 1280 Op2++; 1281 Op3++; 1282 } 1283 1284 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2) 1285 return 0; 1286 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3) 1287 return 1; 1288 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3) 1289 return 2; 1290 llvm_unreachable("Unknown three src commute case."); 1291} 1292 1293unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands( 1294 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, 1295 const X86InstrFMA3Group &FMA3Group) const { 1296 1297 unsigned Opc = MI.getOpcode(); 1298 1299 // TODO: Commuting the 1st operand of FMA*_Int requires some additional 1300 // analysis. The commute optimization is legal only if all users of FMA*_Int 1301 // use only the lowest element of the FMA*_Int instruction. Such analysis are 1302 // not implemented yet. So, just return 0 in that case. 1303 // When such analysis are available this place will be the right place for 1304 // calling it. 1305 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && 1306 "Intrinsic instructions can't commute operand 1"); 1307 1308 // Determine which case this commute is or if it can't be done. 1309 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, 1310 SrcOpIdx2); 1311 assert(Case < 3 && "Unexpected case number!"); 1312 1313 // Define the FMA forms mapping array that helps to map input FMA form 1314 // to output FMA form to preserve the operation semantics after 1315 // commuting the operands. 1316 const unsigned Form132Index = 0; 1317 const unsigned Form213Index = 1; 1318 const unsigned Form231Index = 2; 1319 static const unsigned FormMapping[][3] = { 1320 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2; 1321 // FMA132 A, C, b; ==> FMA231 C, A, b; 1322 // FMA213 B, A, c; ==> FMA213 A, B, c; 1323 // FMA231 C, A, b; ==> FMA132 A, C, b; 1324 { Form231Index, Form213Index, Form132Index }, 1325 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3; 1326 // FMA132 A, c, B; ==> FMA132 B, c, A; 1327 // FMA213 B, a, C; ==> FMA231 C, a, B; 1328 // FMA231 C, a, B; ==> FMA213 B, a, C; 1329 { Form132Index, Form231Index, Form213Index }, 1330 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3; 1331 // FMA132 a, C, B; ==> FMA213 a, B, C; 1332 // FMA213 b, A, C; ==> FMA132 b, C, A; 1333 // FMA231 c, A, B; ==> FMA231 c, B, A; 1334 { Form213Index, Form132Index, Form231Index } 1335 }; 1336 1337 unsigned FMAForms[3]; 1338 FMAForms[0] = FMA3Group.get132Opcode(); 1339 FMAForms[1] = FMA3Group.get213Opcode(); 1340 FMAForms[2] = FMA3Group.get231Opcode(); 1341 unsigned FormIndex; 1342 for (FormIndex = 0; FormIndex < 3; FormIndex++) 1343 if (Opc == FMAForms[FormIndex]) 1344 break; 1345 1346 // Everything is ready, just adjust the FMA opcode and return it. 1347 FormIndex = FormMapping[Case][FormIndex]; 1348 return FMAForms[FormIndex]; 1349} 1350 1351static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, 1352 unsigned SrcOpIdx2) { 1353 // Determine which case this commute is or if it can't be done. 1354 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, 1355 SrcOpIdx2); 1356 assert(Case < 3 && "Unexpected case value!"); 1357 1358 // For each case we need to swap two pairs of bits in the final immediate. 1359 static const uint8_t SwapMasks[3][4] = { 1360 { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5. 1361 { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6. 1362 { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6. 1363 }; 1364 1365 uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm(); 1366 // Clear out the bits we are swapping. 1367 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] | 1368 SwapMasks[Case][2] | SwapMasks[Case][3]); 1369 // If the immediate had a bit of the pair set, then set the opposite bit. 1370 if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1]; 1371 if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0]; 1372 if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3]; 1373 if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2]; 1374 MI.getOperand(MI.getNumOperands()-1).setImm(NewImm); 1375} 1376 1377// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be 1378// commuted. 1379static bool isCommutableVPERMV3Instruction(unsigned Opcode) { 1380#define VPERM_CASES(Suffix) \ 1381 case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \ 1382 case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \ 1383 case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \ 1384 case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \ 1385 case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \ 1386 case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \ 1387 case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \ 1388 case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \ 1389 case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \ 1390 case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \ 1391 case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \ 1392 case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz: 1393 1394#define VPERM_CASES_BROADCAST(Suffix) \ 1395 VPERM_CASES(Suffix) \ 1396 case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \ 1397 case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \ 1398 case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \ 1399 case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \ 1400 case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \ 1401 case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz: 1402 1403 switch (Opcode) { 1404 default: return false; 1405 VPERM_CASES(B) 1406 VPERM_CASES_BROADCAST(D) 1407 VPERM_CASES_BROADCAST(PD) 1408 VPERM_CASES_BROADCAST(PS) 1409 VPERM_CASES_BROADCAST(Q) 1410 VPERM_CASES(W) 1411 return true; 1412 } 1413#undef VPERM_CASES_BROADCAST 1414#undef VPERM_CASES 1415} 1416 1417// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching 1418// from the I opcode to the T opcode and vice versa. 1419static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { 1420#define VPERM_CASES(Orig, New) \ 1421 case X86::Orig##128rr: return X86::New##128rr; \ 1422 case X86::Orig##128rrkz: return X86::New##128rrkz; \ 1423 case X86::Orig##128rm: return X86::New##128rm; \ 1424 case X86::Orig##128rmkz: return X86::New##128rmkz; \ 1425 case X86::Orig##256rr: return X86::New##256rr; \ 1426 case X86::Orig##256rrkz: return X86::New##256rrkz; \ 1427 case X86::Orig##256rm: return X86::New##256rm; \ 1428 case X86::Orig##256rmkz: return X86::New##256rmkz; \ 1429 case X86::Orig##rr: return X86::New##rr; \ 1430 case X86::Orig##rrkz: return X86::New##rrkz; \ 1431 case X86::Orig##rm: return X86::New##rm; \ 1432 case X86::Orig##rmkz: return X86::New##rmkz; 1433 1434#define VPERM_CASES_BROADCAST(Orig, New) \ 1435 VPERM_CASES(Orig, New) \ 1436 case X86::Orig##128rmb: return X86::New##128rmb; \ 1437 case X86::Orig##128rmbkz: return X86::New##128rmbkz; \ 1438 case X86::Orig##256rmb: return X86::New##256rmb; \ 1439 case X86::Orig##256rmbkz: return X86::New##256rmbkz; \ 1440 case X86::Orig##rmb: return X86::New##rmb; \ 1441 case X86::Orig##rmbkz: return X86::New##rmbkz; 1442 1443 switch (Opcode) { 1444 VPERM_CASES(VPERMI2B, VPERMT2B) 1445 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D) 1446 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD) 1447 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS) 1448 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q) 1449 VPERM_CASES(VPERMI2W, VPERMT2W) 1450 VPERM_CASES(VPERMT2B, VPERMI2B) 1451 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D) 1452 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD) 1453 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS) 1454 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q) 1455 VPERM_CASES(VPERMT2W, VPERMI2W) 1456 } 1457 1458 llvm_unreachable("Unreachable!"); 1459#undef VPERM_CASES_BROADCAST 1460#undef VPERM_CASES 1461} 1462 1463MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1464 unsigned OpIdx1, 1465 unsigned OpIdx2) const { 1466 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { 1467 if (NewMI) 1468 return *MI.getParent()->getParent()->CloneMachineInstr(&MI); 1469 return MI; 1470 }; 1471 1472 switch (MI.getOpcode()) { 1473 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 1474 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 1475 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 1476 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 1477 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 1478 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 1479 unsigned Opc; 1480 unsigned Size; 1481 switch (MI.getOpcode()) { 1482 default: llvm_unreachable("Unreachable!"); 1483 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 1484 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 1485 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 1486 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 1487 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 1488 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 1489 } 1490 unsigned Amt = MI.getOperand(3).getImm(); 1491 auto &WorkingMI = cloneIfNew(MI); 1492 WorkingMI.setDesc(get(Opc)); 1493 WorkingMI.getOperand(3).setImm(Size - Amt); 1494 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1495 OpIdx1, OpIdx2); 1496 } 1497 case X86::PFSUBrr: 1498 case X86::PFSUBRrr: { 1499 // PFSUB x, y: x = x - y 1500 // PFSUBR x, y: x = y - x 1501 unsigned Opc = 1502 (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr); 1503 auto &WorkingMI = cloneIfNew(MI); 1504 WorkingMI.setDesc(get(Opc)); 1505 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1506 OpIdx1, OpIdx2); 1507 } 1508 case X86::BLENDPDrri: 1509 case X86::BLENDPSrri: 1510 case X86::VBLENDPDrri: 1511 case X86::VBLENDPSrri: 1512 // If we're optimizing for size, try to use MOVSD/MOVSS. 1513 if (MI.getParent()->getParent()->getFunction().hasOptSize()) { 1514 unsigned Mask, Opc; 1515 switch (MI.getOpcode()) { 1516 default: llvm_unreachable("Unreachable!"); 1517 case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break; 1518 case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break; 1519 case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break; 1520 case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break; 1521 } 1522 if ((MI.getOperand(3).getImm() ^ Mask) == 1) { 1523 auto &WorkingMI = cloneIfNew(MI); 1524 WorkingMI.setDesc(get(Opc)); 1525 WorkingMI.RemoveOperand(3); 1526 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, 1527 /*NewMI=*/false, 1528 OpIdx1, OpIdx2); 1529 } 1530 } 1531 LLVM_FALLTHROUGH; 1532 case X86::PBLENDWrri: 1533 case X86::VBLENDPDYrri: 1534 case X86::VBLENDPSYrri: 1535 case X86::VPBLENDDrri: 1536 case X86::VPBLENDWrri: 1537 case X86::VPBLENDDYrri: 1538 case X86::VPBLENDWYrri:{ 1539 int8_t Mask; 1540 switch (MI.getOpcode()) { 1541 default: llvm_unreachable("Unreachable!"); 1542 case X86::BLENDPDrri: Mask = (int8_t)0x03; break; 1543 case X86::BLENDPSrri: Mask = (int8_t)0x0F; break; 1544 case X86::PBLENDWrri: Mask = (int8_t)0xFF; break; 1545 case X86::VBLENDPDrri: Mask = (int8_t)0x03; break; 1546 case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break; 1547 case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break; 1548 case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break; 1549 case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break; 1550 case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break; 1551 case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break; 1552 case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break; 1553 } 1554 // Only the least significant bits of Imm are used. 1555 // Using int8_t to ensure it will be sign extended to the int64_t that 1556 // setImm takes in order to match isel behavior. 1557 int8_t Imm = MI.getOperand(3).getImm() & Mask; 1558 auto &WorkingMI = cloneIfNew(MI); 1559 WorkingMI.getOperand(3).setImm(Mask ^ Imm); 1560 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1561 OpIdx1, OpIdx2); 1562 } 1563 case X86::INSERTPSrr: 1564 case X86::VINSERTPSrr: 1565 case X86::VINSERTPSZrr: { 1566 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); 1567 unsigned ZMask = Imm & 15; 1568 unsigned DstIdx = (Imm >> 4) & 3; 1569 unsigned SrcIdx = (Imm >> 6) & 3; 1570 1571 // We can commute insertps if we zero 2 of the elements, the insertion is 1572 // "inline" and we don't override the insertion with a zero. 1573 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && 1574 countPopulation(ZMask) == 2) { 1575 unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15); 1576 assert(AltIdx < 4 && "Illegal insertion index"); 1577 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask; 1578 auto &WorkingMI = cloneIfNew(MI); 1579 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm); 1580 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1581 OpIdx1, OpIdx2); 1582 } 1583 return nullptr; 1584 } 1585 case X86::MOVSDrr: 1586 case X86::MOVSSrr: 1587 case X86::VMOVSDrr: 1588 case X86::VMOVSSrr:{ 1589 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD. 1590 if (Subtarget.hasSSE41()) { 1591 unsigned Mask, Opc; 1592 switch (MI.getOpcode()) { 1593 default: llvm_unreachable("Unreachable!"); 1594 case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break; 1595 case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break; 1596 case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break; 1597 case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break; 1598 } 1599 1600 auto &WorkingMI = cloneIfNew(MI); 1601 WorkingMI.setDesc(get(Opc)); 1602 WorkingMI.addOperand(MachineOperand::CreateImm(Mask)); 1603 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1604 OpIdx1, OpIdx2); 1605 } 1606 1607 // Convert to SHUFPD. 1608 assert(MI.getOpcode() == X86::MOVSDrr && 1609 "Can only commute MOVSDrr without SSE4.1"); 1610 1611 auto &WorkingMI = cloneIfNew(MI); 1612 WorkingMI.setDesc(get(X86::SHUFPDrri)); 1613 WorkingMI.addOperand(MachineOperand::CreateImm(0x02)); 1614 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1615 OpIdx1, OpIdx2); 1616 } 1617 case X86::SHUFPDrri: { 1618 // Commute to MOVSD. 1619 assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!"); 1620 auto &WorkingMI = cloneIfNew(MI); 1621 WorkingMI.setDesc(get(X86::MOVSDrr)); 1622 WorkingMI.RemoveOperand(3); 1623 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1624 OpIdx1, OpIdx2); 1625 } 1626 case X86::PCLMULQDQrr: 1627 case X86::VPCLMULQDQrr: 1628 case X86::VPCLMULQDQYrr: 1629 case X86::VPCLMULQDQZrr: 1630 case X86::VPCLMULQDQZ128rr: 1631 case X86::VPCLMULQDQZ256rr: { 1632 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] 1633 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] 1634 unsigned Imm = MI.getOperand(3).getImm(); 1635 unsigned Src1Hi = Imm & 0x01; 1636 unsigned Src2Hi = Imm & 0x10; 1637 auto &WorkingMI = cloneIfNew(MI); 1638 WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); 1639 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1640 OpIdx1, OpIdx2); 1641 } 1642 case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri: 1643 case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri: 1644 case X86::VPCMPBZrri: case X86::VPCMPUBZrri: 1645 case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri: 1646 case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri: 1647 case X86::VPCMPDZrri: case X86::VPCMPUDZrri: 1648 case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri: 1649 case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri: 1650 case X86::VPCMPQZrri: case X86::VPCMPUQZrri: 1651 case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri: 1652 case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri: 1653 case X86::VPCMPWZrri: case X86::VPCMPUWZrri: 1654 case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik: 1655 case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik: 1656 case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik: 1657 case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik: 1658 case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik: 1659 case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik: 1660 case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik: 1661 case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik: 1662 case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik: 1663 case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik: 1664 case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik: 1665 case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: { 1666 // Flip comparison mode immediate (if necessary). 1667 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7; 1668 Imm = X86::getSwappedVPCMPImm(Imm); 1669 auto &WorkingMI = cloneIfNew(MI); 1670 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm); 1671 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1672 OpIdx1, OpIdx2); 1673 } 1674 case X86::VPCOMBri: case X86::VPCOMUBri: 1675 case X86::VPCOMDri: case X86::VPCOMUDri: 1676 case X86::VPCOMQri: case X86::VPCOMUQri: 1677 case X86::VPCOMWri: case X86::VPCOMUWri: { 1678 // Flip comparison mode immediate (if necessary). 1679 unsigned Imm = MI.getOperand(3).getImm() & 0x7; 1680 Imm = X86::getSwappedVPCOMImm(Imm); 1681 auto &WorkingMI = cloneIfNew(MI); 1682 WorkingMI.getOperand(3).setImm(Imm); 1683 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1684 OpIdx1, OpIdx2); 1685 } 1686 case X86::VPERM2F128rr: 1687 case X86::VPERM2I128rr: { 1688 // Flip permute source immediate. 1689 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi. 1690 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi. 1691 int8_t Imm = MI.getOperand(3).getImm() & 0xFF; 1692 auto &WorkingMI = cloneIfNew(MI); 1693 WorkingMI.getOperand(3).setImm(Imm ^ 0x22); 1694 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1695 OpIdx1, OpIdx2); 1696 } 1697 case X86::MOVHLPSrr: 1698 case X86::UNPCKHPDrr: 1699 case X86::VMOVHLPSrr: 1700 case X86::VUNPCKHPDrr: 1701 case X86::VMOVHLPSZrr: 1702 case X86::VUNPCKHPDZ128rr: { 1703 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!"); 1704 1705 unsigned Opc = MI.getOpcode(); 1706 switch (Opc) { 1707 default: llvm_unreachable("Unreachable!"); 1708 case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; 1709 case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; 1710 case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break; 1711 case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break; 1712 case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break; 1713 case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break; 1714 } 1715 auto &WorkingMI = cloneIfNew(MI); 1716 WorkingMI.setDesc(get(Opc)); 1717 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1718 OpIdx1, OpIdx2); 1719 } 1720 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: { 1721 auto &WorkingMI = cloneIfNew(MI); 1722 unsigned OpNo = MI.getDesc().getNumOperands() - 1; 1723 X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm()); 1724 WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC)); 1725 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1726 OpIdx1, OpIdx2); 1727 } 1728 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: 1729 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: 1730 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: 1731 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: 1732 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: 1733 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: 1734 case X86::VPTERNLOGDZrrik: 1735 case X86::VPTERNLOGDZ128rrik: 1736 case X86::VPTERNLOGDZ256rrik: 1737 case X86::VPTERNLOGQZrrik: 1738 case X86::VPTERNLOGQZ128rrik: 1739 case X86::VPTERNLOGQZ256rrik: 1740 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: 1741 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: 1742 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: 1743 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: 1744 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: 1745 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: 1746 case X86::VPTERNLOGDZ128rmbi: 1747 case X86::VPTERNLOGDZ256rmbi: 1748 case X86::VPTERNLOGDZrmbi: 1749 case X86::VPTERNLOGQZ128rmbi: 1750 case X86::VPTERNLOGQZ256rmbi: 1751 case X86::VPTERNLOGQZrmbi: 1752 case X86::VPTERNLOGDZ128rmbikz: 1753 case X86::VPTERNLOGDZ256rmbikz: 1754 case X86::VPTERNLOGDZrmbikz: 1755 case X86::VPTERNLOGQZ128rmbikz: 1756 case X86::VPTERNLOGQZ256rmbikz: 1757 case X86::VPTERNLOGQZrmbikz: { 1758 auto &WorkingMI = cloneIfNew(MI); 1759 commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2); 1760 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1761 OpIdx1, OpIdx2); 1762 } 1763 default: { 1764 if (isCommutableVPERMV3Instruction(MI.getOpcode())) { 1765 unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode()); 1766 auto &WorkingMI = cloneIfNew(MI); 1767 WorkingMI.setDesc(get(Opc)); 1768 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1769 OpIdx1, OpIdx2); 1770 } 1771 1772 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), 1773 MI.getDesc().TSFlags); 1774 if (FMA3Group) { 1775 unsigned Opc = 1776 getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group); 1777 auto &WorkingMI = cloneIfNew(MI); 1778 WorkingMI.setDesc(get(Opc)); 1779 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, 1780 OpIdx1, OpIdx2); 1781 } 1782 1783 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 1784 } 1785 } 1786} 1787 1788bool 1789X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, 1790 unsigned &SrcOpIdx1, 1791 unsigned &SrcOpIdx2, 1792 bool IsIntrinsic) const { 1793 uint64_t TSFlags = MI.getDesc().TSFlags; 1794 1795 unsigned FirstCommutableVecOp = 1; 1796 unsigned LastCommutableVecOp = 3; 1797 unsigned KMaskOp = -1U; 1798 if (X86II::isKMasked(TSFlags)) { 1799 // For k-zero-masked operations it is Ok to commute the first vector 1800 // operand. 1801 // For regular k-masked operations a conservative choice is done as the 1802 // elements of the first vector operand, for which the corresponding bit 1803 // in the k-mask operand is set to 0, are copied to the result of the 1804 // instruction. 1805 // TODO/FIXME: The commute still may be legal if it is known that the 1806 // k-mask operand is set to either all ones or all zeroes. 1807 // It is also Ok to commute the 1st operand if all users of MI use only 1808 // the elements enabled by the k-mask operand. For example, 1809 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i] 1810 // : v1[i]; 1811 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 -> 1812 // // Ok, to commute v1 in FMADD213PSZrk. 1813 1814 // The k-mask operand has index = 2 for masked and zero-masked operations. 1815 KMaskOp = 2; 1816 1817 // The operand with index = 1 is used as a source for those elements for 1818 // which the corresponding bit in the k-mask is set to 0. 1819 if (X86II::isKMergeMasked(TSFlags)) 1820 FirstCommutableVecOp = 3; 1821 1822 LastCommutableVecOp++; 1823 } else if (IsIntrinsic) { 1824 // Commuting the first operand of an intrinsic instruction isn't possible 1825 // unless we can prove that only the lowest element of the result is used. 1826 FirstCommutableVecOp = 2; 1827 } 1828 1829 if (isMem(MI, LastCommutableVecOp)) 1830 LastCommutableVecOp--; 1831 1832 // Only the first RegOpsNum operands are commutable. 1833 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means 1834 // that the operand is not specified/fixed. 1835 if (SrcOpIdx1 != CommuteAnyOperandIndex && 1836 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp || 1837 SrcOpIdx1 == KMaskOp)) 1838 return false; 1839 if (SrcOpIdx2 != CommuteAnyOperandIndex && 1840 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp || 1841 SrcOpIdx2 == KMaskOp)) 1842 return false; 1843 1844 // Look for two different register operands assumed to be commutable 1845 // regardless of the FMA opcode. The FMA opcode is adjusted later. 1846 if (SrcOpIdx1 == CommuteAnyOperandIndex || 1847 SrcOpIdx2 == CommuteAnyOperandIndex) { 1848 unsigned CommutableOpIdx2 = SrcOpIdx2; 1849 1850 // At least one of operands to be commuted is not specified and 1851 // this method is free to choose appropriate commutable operands. 1852 if (SrcOpIdx1 == SrcOpIdx2) 1853 // Both of operands are not fixed. By default set one of commutable 1854 // operands to the last register operand of the instruction. 1855 CommutableOpIdx2 = LastCommutableVecOp; 1856 else if (SrcOpIdx2 == CommuteAnyOperandIndex) 1857 // Only one of operands is not fixed. 1858 CommutableOpIdx2 = SrcOpIdx1; 1859 1860 // CommutableOpIdx2 is well defined now. Let's choose another commutable 1861 // operand and assign its index to CommutableOpIdx1. 1862 unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); 1863 1864 unsigned CommutableOpIdx1; 1865 for (CommutableOpIdx1 = LastCommutableVecOp; 1866 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) { 1867 // Just ignore and skip the k-mask operand. 1868 if (CommutableOpIdx1 == KMaskOp) 1869 continue; 1870 1871 // The commuted operands must have different registers. 1872 // Otherwise, the commute transformation does not change anything and 1873 // is useless then. 1874 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg()) 1875 break; 1876 } 1877 1878 // No appropriate commutable operands were found. 1879 if (CommutableOpIdx1 < FirstCommutableVecOp) 1880 return false; 1881 1882 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2 1883 // to return those values. 1884 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1885 CommutableOpIdx1, CommutableOpIdx2)) 1886 return false; 1887 } 1888 1889 return true; 1890} 1891 1892bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, 1893 unsigned &SrcOpIdx2) const { 1894 const MCInstrDesc &Desc = MI.getDesc(); 1895 if (!Desc.isCommutable()) 1896 return false; 1897 1898 switch (MI.getOpcode()) { 1899 case X86::CMPSDrr: 1900 case X86::CMPSSrr: 1901 case X86::CMPPDrri: 1902 case X86::CMPPSrri: 1903 case X86::VCMPSDrr: 1904 case X86::VCMPSSrr: 1905 case X86::VCMPPDrri: 1906 case X86::VCMPPSrri: 1907 case X86::VCMPPDYrri: 1908 case X86::VCMPPSYrri: 1909 case X86::VCMPSDZrr: 1910 case X86::VCMPSSZrr: 1911 case X86::VCMPPDZrri: 1912 case X86::VCMPPSZrri: 1913 case X86::VCMPPDZ128rri: 1914 case X86::VCMPPSZ128rri: 1915 case X86::VCMPPDZ256rri: 1916 case X86::VCMPPSZ256rri: 1917 case X86::VCMPPDZrrik: 1918 case X86::VCMPPSZrrik: 1919 case X86::VCMPPDZ128rrik: 1920 case X86::VCMPPSZ128rrik: 1921 case X86::VCMPPDZ256rrik: 1922 case X86::VCMPPSZ256rrik: { 1923 unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0; 1924 1925 // Float comparison can be safely commuted for 1926 // Ordered/Unordered/Equal/NotEqual tests 1927 unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7; 1928 switch (Imm) { 1929 case 0x00: // EQUAL 1930 case 0x03: // UNORDERED 1931 case 0x04: // NOT EQUAL 1932 case 0x07: // ORDERED 1933 // The indices of the commutable operands are 1 and 2 (or 2 and 3 1934 // when masked). 1935 // Assign them to the returned operand indices here. 1936 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset, 1937 2 + OpOffset); 1938 } 1939 return false; 1940 } 1941 case X86::MOVSSrr: 1942 // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can 1943 // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since 1944 // AVX implies sse4.1. 1945 if (Subtarget.hasSSE41()) 1946 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 1947 return false; 1948 case X86::SHUFPDrri: 1949 // We can commute this to MOVSD. 1950 if (MI.getOperand(3).getImm() == 0x02) 1951 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 1952 return false; 1953 case X86::MOVHLPSrr: 1954 case X86::UNPCKHPDrr: 1955 case X86::VMOVHLPSrr: 1956 case X86::VUNPCKHPDrr: 1957 case X86::VMOVHLPSZrr: 1958 case X86::VUNPCKHPDZ128rr: 1959 if (Subtarget.hasSSE2()) 1960 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 1961 return false; 1962 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: 1963 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: 1964 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: 1965 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: 1966 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: 1967 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: 1968 case X86::VPTERNLOGDZrrik: 1969 case X86::VPTERNLOGDZ128rrik: 1970 case X86::VPTERNLOGDZ256rrik: 1971 case X86::VPTERNLOGQZrrik: 1972 case X86::VPTERNLOGQZ128rrik: 1973 case X86::VPTERNLOGQZ256rrik: 1974 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: 1975 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: 1976 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: 1977 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: 1978 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: 1979 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: 1980 case X86::VPTERNLOGDZ128rmbi: 1981 case X86::VPTERNLOGDZ256rmbi: 1982 case X86::VPTERNLOGDZrmbi: 1983 case X86::VPTERNLOGQZ128rmbi: 1984 case X86::VPTERNLOGQZ256rmbi: 1985 case X86::VPTERNLOGQZrmbi: 1986 case X86::VPTERNLOGDZ128rmbikz: 1987 case X86::VPTERNLOGDZ256rmbikz: 1988 case X86::VPTERNLOGDZrmbikz: 1989 case X86::VPTERNLOGQZ128rmbikz: 1990 case X86::VPTERNLOGQZ256rmbikz: 1991 case X86::VPTERNLOGQZrmbikz: 1992 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 1993 case X86::VPMADD52HUQZ128r: 1994 case X86::VPMADD52HUQZ128rk: 1995 case X86::VPMADD52HUQZ128rkz: 1996 case X86::VPMADD52HUQZ256r: 1997 case X86::VPMADD52HUQZ256rk: 1998 case X86::VPMADD52HUQZ256rkz: 1999 case X86::VPMADD52HUQZr: 2000 case X86::VPMADD52HUQZrk: 2001 case X86::VPMADD52HUQZrkz: 2002 case X86::VPMADD52LUQZ128r: 2003 case X86::VPMADD52LUQZ128rk: 2004 case X86::VPMADD52LUQZ128rkz: 2005 case X86::VPMADD52LUQZ256r: 2006 case X86::VPMADD52LUQZ256rk: 2007 case X86::VPMADD52LUQZ256rkz: 2008 case X86::VPMADD52LUQZr: 2009 case X86::VPMADD52LUQZrk: 2010 case X86::VPMADD52LUQZrkz: { 2011 unsigned CommutableOpIdx1 = 2; 2012 unsigned CommutableOpIdx2 = 3; 2013 if (X86II::isKMasked(Desc.TSFlags)) { 2014 // Skip the mask register. 2015 ++CommutableOpIdx1; 2016 ++CommutableOpIdx2; 2017 } 2018 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2019 CommutableOpIdx1, CommutableOpIdx2)) 2020 return false; 2021 if (!MI.getOperand(SrcOpIdx1).isReg() || 2022 !MI.getOperand(SrcOpIdx2).isReg()) 2023 // No idea. 2024 return false; 2025 return true; 2026 } 2027 2028 default: 2029 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), 2030 MI.getDesc().TSFlags); 2031 if (FMA3Group) 2032 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2, 2033 FMA3Group->isIntrinsic()); 2034 2035 // Handled masked instructions since we need to skip over the mask input 2036 // and the preserved input. 2037 if (X86II::isKMasked(Desc.TSFlags)) { 2038 // First assume that the first input is the mask operand and skip past it. 2039 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1; 2040 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2; 2041 // Check if the first input is tied. If there isn't one then we only 2042 // need to skip the mask operand which we did above. 2043 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(), 2044 MCOI::TIED_TO) != -1)) { 2045 // If this is zero masking instruction with a tied operand, we need to 2046 // move the first index back to the first input since this must 2047 // be a 3 input instruction and we want the first two non-mask inputs. 2048 // Otherwise this is a 2 input instruction with a preserved input and 2049 // mask, so we need to move the indices to skip one more input. 2050 if (X86II::isKMergeMasked(Desc.TSFlags)) { 2051 ++CommutableOpIdx1; 2052 ++CommutableOpIdx2; 2053 } else { 2054 --CommutableOpIdx1; 2055 } 2056 } 2057 2058 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2059 CommutableOpIdx1, CommutableOpIdx2)) 2060 return false; 2061 2062 if (!MI.getOperand(SrcOpIdx1).isReg() || 2063 !MI.getOperand(SrcOpIdx2).isReg()) 2064 // No idea. 2065 return false; 2066 return true; 2067 } 2068 2069 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 2070 } 2071 return false; 2072} 2073 2074X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) { 2075 switch (MI.getOpcode()) { 2076 default: return X86::COND_INVALID; 2077 case X86::JCC_1: 2078 return static_cast<X86::CondCode>( 2079 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); 2080 } 2081} 2082 2083/// Return condition code of a SETCC opcode. 2084X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { 2085 switch (MI.getOpcode()) { 2086 default: return X86::COND_INVALID; 2087 case X86::SETCCr: case X86::SETCCm: 2088 return static_cast<X86::CondCode>( 2089 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); 2090 } 2091} 2092 2093/// Return condition code of a CMov opcode. 2094X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) { 2095 switch (MI.getOpcode()) { 2096 default: return X86::COND_INVALID; 2097 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: 2098 case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm: 2099 return static_cast<X86::CondCode>( 2100 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); 2101 } 2102} 2103 2104/// Return the inverse of the specified condition, 2105/// e.g. turning COND_E to COND_NE. 2106X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 2107 switch (CC) { 2108 default: llvm_unreachable("Illegal condition code!"); 2109 case X86::COND_E: return X86::COND_NE; 2110 case X86::COND_NE: return X86::COND_E; 2111 case X86::COND_L: return X86::COND_GE; 2112 case X86::COND_LE: return X86::COND_G; 2113 case X86::COND_G: return X86::COND_LE; 2114 case X86::COND_GE: return X86::COND_L; 2115 case X86::COND_B: return X86::COND_AE; 2116 case X86::COND_BE: return X86::COND_A; 2117 case X86::COND_A: return X86::COND_BE; 2118 case X86::COND_AE: return X86::COND_B; 2119 case X86::COND_S: return X86::COND_NS; 2120 case X86::COND_NS: return X86::COND_S; 2121 case X86::COND_P: return X86::COND_NP; 2122 case X86::COND_NP: return X86::COND_P; 2123 case X86::COND_O: return X86::COND_NO; 2124 case X86::COND_NO: return X86::COND_O; 2125 case X86::COND_NE_OR_P: return X86::COND_E_AND_NP; 2126 case X86::COND_E_AND_NP: return X86::COND_NE_OR_P; 2127 } 2128} 2129 2130/// Assuming the flags are set by MI(a,b), return the condition code if we 2131/// modify the instructions such that flags are set by MI(b,a). 2132static X86::CondCode getSwappedCondition(X86::CondCode CC) { 2133 switch (CC) { 2134 default: return X86::COND_INVALID; 2135 case X86::COND_E: return X86::COND_E; 2136 case X86::COND_NE: return X86::COND_NE; 2137 case X86::COND_L: return X86::COND_G; 2138 case X86::COND_LE: return X86::COND_GE; 2139 case X86::COND_G: return X86::COND_L; 2140 case X86::COND_GE: return X86::COND_LE; 2141 case X86::COND_B: return X86::COND_A; 2142 case X86::COND_BE: return X86::COND_AE; 2143 case X86::COND_A: return X86::COND_B; 2144 case X86::COND_AE: return X86::COND_BE; 2145 } 2146} 2147 2148std::pair<X86::CondCode, bool> 2149X86::getX86ConditionCode(CmpInst::Predicate Predicate) { 2150 X86::CondCode CC = X86::COND_INVALID; 2151 bool NeedSwap = false; 2152 switch (Predicate) { 2153 default: break; 2154 // Floating-point Predicates 2155 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break; 2156 case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH; 2157 case CmpInst::FCMP_OGT: CC = X86::COND_A; break; 2158 case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH; 2159 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break; 2160 case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH; 2161 case CmpInst::FCMP_ULT: CC = X86::COND_B; break; 2162 case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH; 2163 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break; 2164 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break; 2165 case CmpInst::FCMP_UNO: CC = X86::COND_P; break; 2166 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break; 2167 case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH; 2168 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break; 2169 2170 // Integer Predicates 2171 case CmpInst::ICMP_EQ: CC = X86::COND_E; break; 2172 case CmpInst::ICMP_NE: CC = X86::COND_NE; break; 2173 case CmpInst::ICMP_UGT: CC = X86::COND_A; break; 2174 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break; 2175 case CmpInst::ICMP_ULT: CC = X86::COND_B; break; 2176 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break; 2177 case CmpInst::ICMP_SGT: CC = X86::COND_G; break; 2178 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break; 2179 case CmpInst::ICMP_SLT: CC = X86::COND_L; break; 2180 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break; 2181 } 2182 2183 return std::make_pair(CC, NeedSwap); 2184} 2185 2186/// Return a setcc opcode based on whether it has memory operand. 2187unsigned X86::getSETOpc(bool HasMemoryOperand) { 2188 return HasMemoryOperand ? X86::SETCCr : X86::SETCCm; 2189} 2190 2191/// Return a cmov opcode for the given register size in bytes, and operand type. 2192unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) { 2193 switch(RegBytes) { 2194 default: llvm_unreachable("Illegal register size!"); 2195 case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr; 2196 case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr; 2197 case 8: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV64rr; 2198 } 2199} 2200 2201/// Get the VPCMP immediate for the given condition. 2202unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { 2203 switch (CC) { 2204 default: llvm_unreachable("Unexpected SETCC condition"); 2205 case ISD::SETNE: return 4; 2206 case ISD::SETEQ: return 0; 2207 case ISD::SETULT: 2208 case ISD::SETLT: return 1; 2209 case ISD::SETUGT: 2210 case ISD::SETGT: return 6; 2211 case ISD::SETUGE: 2212 case ISD::SETGE: return 5; 2213 case ISD::SETULE: 2214 case ISD::SETLE: return 2; 2215 } 2216} 2217 2218/// Get the VPCMP immediate if the opcodes are swapped. 2219unsigned X86::getSwappedVPCMPImm(unsigned Imm) { 2220 switch (Imm) { 2221 default: llvm_unreachable("Unreachable!"); 2222 case 0x01: Imm = 0x06; break; // LT -> NLE 2223 case 0x02: Imm = 0x05; break; // LE -> NLT 2224 case 0x05: Imm = 0x02; break; // NLT -> LE 2225 case 0x06: Imm = 0x01; break; // NLE -> LT 2226 case 0x00: // EQ 2227 case 0x03: // FALSE 2228 case 0x04: // NE 2229 case 0x07: // TRUE 2230 break; 2231 } 2232 2233 return Imm; 2234} 2235 2236/// Get the VPCOM immediate if the opcodes are swapped. 2237unsigned X86::getSwappedVPCOMImm(unsigned Imm) { 2238 switch (Imm) { 2239 default: llvm_unreachable("Unreachable!"); 2240 case 0x00: Imm = 0x02; break; // LT -> GT 2241 case 0x01: Imm = 0x03; break; // LE -> GE 2242 case 0x02: Imm = 0x00; break; // GT -> LT 2243 case 0x03: Imm = 0x01; break; // GE -> LE 2244 case 0x04: // EQ 2245 case 0x05: // NE 2246 case 0x06: // FALSE 2247 case 0x07: // TRUE 2248 break; 2249 } 2250 2251 return Imm; 2252} 2253 2254bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 2255 if (!MI.isTerminator()) return false; 2256 2257 // Conditional branch is a special case. 2258 if (MI.isBranch() && !MI.isBarrier()) 2259 return true; 2260 if (!MI.isPredicable()) 2261 return true; 2262 return !isPredicated(MI); 2263} 2264 2265bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { 2266 switch (MI.getOpcode()) { 2267 case X86::TCRETURNdi: 2268 case X86::TCRETURNri: 2269 case X86::TCRETURNmi: 2270 case X86::TCRETURNdi64: 2271 case X86::TCRETURNri64: 2272 case X86::TCRETURNmi64: 2273 return true; 2274 default: 2275 return false; 2276 } 2277} 2278 2279bool X86InstrInfo::canMakeTailCallConditional( 2280 SmallVectorImpl<MachineOperand> &BranchCond, 2281 const MachineInstr &TailCall) const { 2282 if (TailCall.getOpcode() != X86::TCRETURNdi && 2283 TailCall.getOpcode() != X86::TCRETURNdi64) { 2284 // Only direct calls can be done with a conditional branch. 2285 return false; 2286 } 2287 2288 const MachineFunction *MF = TailCall.getParent()->getParent(); 2289 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) { 2290 // Conditional tail calls confuse the Win64 unwinder. 2291 return false; 2292 } 2293 2294 assert(BranchCond.size() == 1); 2295 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { 2296 // Can't make a conditional tail call with this condition. 2297 return false; 2298 } 2299 2300 const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 2301 if (X86FI->getTCReturnAddrDelta() != 0 || 2302 TailCall.getOperand(1).getImm() != 0) { 2303 // A conditional tail call cannot do any stack adjustment. 2304 return false; 2305 } 2306 2307 return true; 2308} 2309 2310void X86InstrInfo::replaceBranchWithTailCall( 2311 MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, 2312 const MachineInstr &TailCall) const { 2313 assert(canMakeTailCallConditional(BranchCond, TailCall)); 2314 2315 MachineBasicBlock::iterator I = MBB.end(); 2316 while (I != MBB.begin()) { 2317 --I; 2318 if (I->isDebugInstr()) 2319 continue; 2320 if (!I->isBranch()) 2321 assert(0 && "Can't find the branch to replace!"); 2322 2323 X86::CondCode CC = X86::getCondFromBranch(*I); 2324 assert(BranchCond.size() == 1); 2325 if (CC != BranchCond[0].getImm()) 2326 continue; 2327 2328 break; 2329 } 2330 2331 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc 2332 : X86::TCRETURNdi64cc; 2333 2334 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); 2335 MIB->addOperand(TailCall.getOperand(0)); // Destination. 2336 MIB.addImm(0); // Stack offset (not used). 2337 MIB->addOperand(BranchCond[0]); // Condition. 2338 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters. 2339 2340 // Add implicit uses and defs of all live regs potentially clobbered by the 2341 // call. This way they still appear live across the call. 2342 LivePhysRegs LiveRegs(getRegisterInfo()); 2343 LiveRegs.addLiveOuts(MBB); 2344 SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers; 2345 LiveRegs.stepForward(*MIB, Clobbers); 2346 for (const auto &C : Clobbers) { 2347 MIB.addReg(C.first, RegState::Implicit); 2348 MIB.addReg(C.first, RegState::Implicit | RegState::Define); 2349 } 2350 2351 I->eraseFromParent(); 2352} 2353 2354// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may 2355// not be a fallthrough MBB now due to layout changes). Return nullptr if the 2356// fallthrough MBB cannot be identified. 2357static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB, 2358 MachineBasicBlock *TBB) { 2359 // Look for non-EHPad successors other than TBB. If we find exactly one, it 2360 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB 2361 // and fallthrough MBB. If we find more than one, we cannot identify the 2362 // fallthrough MBB and should return nullptr. 2363 MachineBasicBlock *FallthroughBB = nullptr; 2364 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) { 2365 if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB)) 2366 continue; 2367 // Return a nullptr if we found more than one fallthrough successor. 2368 if (FallthroughBB && FallthroughBB != TBB) 2369 return nullptr; 2370 FallthroughBB = *SI; 2371 } 2372 return FallthroughBB; 2373} 2374 2375bool X86InstrInfo::AnalyzeBranchImpl( 2376 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, 2377 SmallVectorImpl<MachineOperand> &Cond, 2378 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { 2379 2380 // Start from the bottom of the block and work up, examining the 2381 // terminator instructions. 2382 MachineBasicBlock::iterator I = MBB.end(); 2383 MachineBasicBlock::iterator UnCondBrIter = MBB.end(); 2384 while (I != MBB.begin()) { 2385 --I; 2386 if (I->isDebugInstr()) 2387 continue; 2388 2389 // Working from the bottom, when we see a non-terminator instruction, we're 2390 // done. 2391 if (!isUnpredicatedTerminator(*I)) 2392 break; 2393 2394 // A terminator that isn't a branch can't easily be handled by this 2395 // analysis. 2396 if (!I->isBranch()) 2397 return true; 2398 2399 // Handle unconditional branches. 2400 if (I->getOpcode() == X86::JMP_1) { 2401 UnCondBrIter = I; 2402 2403 if (!AllowModify) { 2404 TBB = I->getOperand(0).getMBB(); 2405 continue; 2406 } 2407 2408 // If the block has any instructions after a JMP, delete them. 2409 while (std::next(I) != MBB.end()) 2410 std::next(I)->eraseFromParent(); 2411 2412 Cond.clear(); 2413 FBB = nullptr; 2414 2415 // Delete the JMP if it's equivalent to a fall-through. 2416 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 2417 TBB = nullptr; 2418 I->eraseFromParent(); 2419 I = MBB.end(); 2420 UnCondBrIter = MBB.end(); 2421 continue; 2422 } 2423 2424 // TBB is used to indicate the unconditional destination. 2425 TBB = I->getOperand(0).getMBB(); 2426 continue; 2427 } 2428 2429 // Handle conditional branches. 2430 X86::CondCode BranchCode = X86::getCondFromBranch(*I); 2431 if (BranchCode == X86::COND_INVALID) 2432 return true; // Can't handle indirect branch. 2433 2434 // In practice we should never have an undef eflags operand, if we do 2435 // abort here as we are not prepared to preserve the flag. 2436 if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef()) 2437 return true; 2438 2439 // Working from the bottom, handle the first conditional branch. 2440 if (Cond.empty()) { 2441 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); 2442 if (AllowModify && UnCondBrIter != MBB.end() && 2443 MBB.isLayoutSuccessor(TargetBB)) { 2444 // If we can modify the code and it ends in something like: 2445 // 2446 // jCC L1 2447 // jmp L2 2448 // L1: 2449 // ... 2450 // L2: 2451 // 2452 // Then we can change this to: 2453 // 2454 // jnCC L2 2455 // L1: 2456 // ... 2457 // L2: 2458 // 2459 // Which is a bit more efficient. 2460 // We conditionally jump to the fall-through block. 2461 BranchCode = GetOppositeBranchCondition(BranchCode); 2462 MachineBasicBlock::iterator OldInst = I; 2463 2464 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1)) 2465 .addMBB(UnCondBrIter->getOperand(0).getMBB()) 2466 .addImm(BranchCode); 2467 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) 2468 .addMBB(TargetBB); 2469 2470 OldInst->eraseFromParent(); 2471 UnCondBrIter->eraseFromParent(); 2472 2473 // Restart the analysis. 2474 UnCondBrIter = MBB.end(); 2475 I = MBB.end(); 2476 continue; 2477 } 2478 2479 FBB = TBB; 2480 TBB = I->getOperand(0).getMBB(); 2481 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 2482 CondBranches.push_back(&*I); 2483 continue; 2484 } 2485 2486 // Handle subsequent conditional branches. Only handle the case where all 2487 // conditional branches branch to the same destination and their condition 2488 // opcodes fit one of the special multi-branch idioms. 2489 assert(Cond.size() == 1); 2490 assert(TBB); 2491 2492 // If the conditions are the same, we can leave them alone. 2493 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 2494 auto NewTBB = I->getOperand(0).getMBB(); 2495 if (OldBranchCode == BranchCode && TBB == NewTBB) 2496 continue; 2497 2498 // If they differ, see if they fit one of the known patterns. Theoretically, 2499 // we could handle more patterns here, but we shouldn't expect to see them 2500 // if instruction selection has done a reasonable job. 2501 if (TBB == NewTBB && 2502 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) || 2503 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) { 2504 BranchCode = X86::COND_NE_OR_P; 2505 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) || 2506 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) { 2507 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB))) 2508 return true; 2509 2510 // X86::COND_E_AND_NP usually has two different branch destinations. 2511 // 2512 // JP B1 2513 // JE B2 2514 // JMP B1 2515 // B1: 2516 // B2: 2517 // 2518 // Here this condition branches to B2 only if NP && E. It has another 2519 // equivalent form: 2520 // 2521 // JNE B1 2522 // JNP B2 2523 // JMP B1 2524 // B1: 2525 // B2: 2526 // 2527 // Similarly it branches to B2 only if E && NP. That is why this condition 2528 // is named with COND_E_AND_NP. 2529 BranchCode = X86::COND_E_AND_NP; 2530 } else 2531 return true; 2532 2533 // Update the MachineOperand. 2534 Cond[0].setImm(BranchCode); 2535 CondBranches.push_back(&*I); 2536 } 2537 2538 return false; 2539} 2540 2541bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB, 2542 MachineBasicBlock *&TBB, 2543 MachineBasicBlock *&FBB, 2544 SmallVectorImpl<MachineOperand> &Cond, 2545 bool AllowModify) const { 2546 SmallVector<MachineInstr *, 4> CondBranches; 2547 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); 2548} 2549 2550bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, 2551 MachineBranchPredicate &MBP, 2552 bool AllowModify) const { 2553 using namespace std::placeholders; 2554 2555 SmallVector<MachineOperand, 4> Cond; 2556 SmallVector<MachineInstr *, 4> CondBranches; 2557 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, 2558 AllowModify)) 2559 return true; 2560 2561 if (Cond.size() != 1) 2562 return true; 2563 2564 assert(MBP.TrueDest && "expected!"); 2565 2566 if (!MBP.FalseDest) 2567 MBP.FalseDest = MBB.getNextNode(); 2568 2569 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2570 2571 MachineInstr *ConditionDef = nullptr; 2572 bool SingleUseCondition = true; 2573 2574 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) { 2575 if (I->modifiesRegister(X86::EFLAGS, TRI)) { 2576 ConditionDef = &*I; 2577 break; 2578 } 2579 2580 if (I->readsRegister(X86::EFLAGS, TRI)) 2581 SingleUseCondition = false; 2582 } 2583 2584 if (!ConditionDef) 2585 return true; 2586 2587 if (SingleUseCondition) { 2588 for (auto *Succ : MBB.successors()) 2589 if (Succ->isLiveIn(X86::EFLAGS)) 2590 SingleUseCondition = false; 2591 } 2592 2593 MBP.ConditionDef = ConditionDef; 2594 MBP.SingleUseCondition = SingleUseCondition; 2595 2596 // Currently we only recognize the simple pattern: 2597 // 2598 // test %reg, %reg 2599 // je %label 2600 // 2601 const unsigned TestOpcode = 2602 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; 2603 2604 if (ConditionDef->getOpcode() == TestOpcode && 2605 ConditionDef->getNumOperands() == 3 && 2606 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && 2607 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { 2608 MBP.LHS = ConditionDef->getOperand(0); 2609 MBP.RHS = MachineOperand::CreateImm(0); 2610 MBP.Predicate = Cond[0].getImm() == X86::COND_NE 2611 ? MachineBranchPredicate::PRED_NE 2612 : MachineBranchPredicate::PRED_EQ; 2613 return false; 2614 } 2615 2616 return true; 2617} 2618 2619unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB, 2620 int *BytesRemoved) const { 2621 assert(!BytesRemoved && "code size not handled"); 2622 2623 MachineBasicBlock::iterator I = MBB.end(); 2624 unsigned Count = 0; 2625 2626 while (I != MBB.begin()) { 2627 --I; 2628 if (I->isDebugInstr()) 2629 continue; 2630 if (I->getOpcode() != X86::JMP_1 && 2631 X86::getCondFromBranch(*I) == X86::COND_INVALID) 2632 break; 2633 // Remove the branch. 2634 I->eraseFromParent(); 2635 I = MBB.end(); 2636 ++Count; 2637 } 2638 2639 return Count; 2640} 2641 2642unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB, 2643 MachineBasicBlock *TBB, 2644 MachineBasicBlock *FBB, 2645 ArrayRef<MachineOperand> Cond, 2646 const DebugLoc &DL, 2647 int *BytesAdded) const { 2648 // Shouldn't be a fall through. 2649 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 2650 assert((Cond.size() == 1 || Cond.size() == 0) && 2651 "X86 branch conditions have one component!"); 2652 assert(!BytesAdded && "code size not handled"); 2653 2654 if (Cond.empty()) { 2655 // Unconditional branch? 2656 assert(!FBB && "Unconditional branch with multiple successors!"); 2657 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); 2658 return 1; 2659 } 2660 2661 // If FBB is null, it is implied to be a fall-through block. 2662 bool FallThru = FBB == nullptr; 2663 2664 // Conditional branch. 2665 unsigned Count = 0; 2666 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 2667 switch (CC) { 2668 case X86::COND_NE_OR_P: 2669 // Synthesize NE_OR_P with two branches. 2670 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE); 2671 ++Count; 2672 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P); 2673 ++Count; 2674 break; 2675 case X86::COND_E_AND_NP: 2676 // Use the next block of MBB as FBB if it is null. 2677 if (FBB == nullptr) { 2678 FBB = getFallThroughMBB(&MBB, TBB); 2679 assert(FBB && "MBB cannot be the last block in function when the false " 2680 "body is a fall-through."); 2681 } 2682 // Synthesize COND_E_AND_NP with two branches. 2683 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE); 2684 ++Count; 2685 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP); 2686 ++Count; 2687 break; 2688 default: { 2689 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC); 2690 ++Count; 2691 } 2692 } 2693 if (!FallThru) { 2694 // Two-way Conditional branch. Insert the second branch. 2695 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); 2696 ++Count; 2697 } 2698 return Count; 2699} 2700 2701bool X86InstrInfo:: 2702canInsertSelect(const MachineBasicBlock &MBB, 2703 ArrayRef<MachineOperand> Cond, 2704 unsigned TrueReg, unsigned FalseReg, 2705 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 2706 // Not all subtargets have cmov instructions. 2707 if (!Subtarget.hasCMov()) 2708 return false; 2709 if (Cond.size() != 1) 2710 return false; 2711 // We cannot do the composite conditions, at least not in SSA form. 2712 if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND) 2713 return false; 2714 2715 // Check register classes. 2716 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2717 const TargetRegisterClass *RC = 2718 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 2719 if (!RC) 2720 return false; 2721 2722 // We have cmov instructions for 16, 32, and 64 bit general purpose registers. 2723 if (X86::GR16RegClass.hasSubClassEq(RC) || 2724 X86::GR32RegClass.hasSubClassEq(RC) || 2725 X86::GR64RegClass.hasSubClassEq(RC)) { 2726 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy 2727 // Bridge. Probably Ivy Bridge as well. 2728 CondCycles = 2; 2729 TrueCycles = 2; 2730 FalseCycles = 2; 2731 return true; 2732 } 2733 2734 // Can't do vectors. 2735 return false; 2736} 2737 2738void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, 2739 MachineBasicBlock::iterator I, 2740 const DebugLoc &DL, unsigned DstReg, 2741 ArrayRef<MachineOperand> Cond, unsigned TrueReg, 2742 unsigned FalseReg) const { 2743 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2744 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 2745 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg); 2746 assert(Cond.size() == 1 && "Invalid Cond array"); 2747 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8, 2748 false /*HasMemoryOperand*/); 2749 BuildMI(MBB, I, DL, get(Opc), DstReg) 2750 .addReg(FalseReg) 2751 .addReg(TrueReg) 2752 .addImm(Cond[0].getImm()); 2753} 2754 2755/// Test if the given register is a physical h register. 2756static bool isHReg(unsigned Reg) { 2757 return X86::GR8_ABCD_HRegClass.contains(Reg); 2758} 2759 2760// Try and copy between VR128/VR64 and GR64 registers. 2761static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, 2762 const X86Subtarget &Subtarget) { 2763 bool HasAVX = Subtarget.hasAVX(); 2764 bool HasAVX512 = Subtarget.hasAVX512(); 2765 2766 // SrcReg(MaskReg) -> DestReg(GR64) 2767 // SrcReg(MaskReg) -> DestReg(GR32) 2768 2769 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 2770 if (X86::VK16RegClass.contains(SrcReg)) { 2771 if (X86::GR64RegClass.contains(DestReg)) { 2772 assert(Subtarget.hasBWI()); 2773 return X86::KMOVQrk; 2774 } 2775 if (X86::GR32RegClass.contains(DestReg)) 2776 return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk; 2777 } 2778 2779 // SrcReg(GR64) -> DestReg(MaskReg) 2780 // SrcReg(GR32) -> DestReg(MaskReg) 2781 2782 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 2783 if (X86::VK16RegClass.contains(DestReg)) { 2784 if (X86::GR64RegClass.contains(SrcReg)) { 2785 assert(Subtarget.hasBWI()); 2786 return X86::KMOVQkr; 2787 } 2788 if (X86::GR32RegClass.contains(SrcReg)) 2789 return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr; 2790 } 2791 2792 2793 // SrcReg(VR128) -> DestReg(GR64) 2794 // SrcReg(VR64) -> DestReg(GR64) 2795 // SrcReg(GR64) -> DestReg(VR128) 2796 // SrcReg(GR64) -> DestReg(VR64) 2797 2798 if (X86::GR64RegClass.contains(DestReg)) { 2799 if (X86::VR128XRegClass.contains(SrcReg)) 2800 // Copy from a VR128 register to a GR64 register. 2801 return HasAVX512 ? X86::VMOVPQIto64Zrr : 2802 HasAVX ? X86::VMOVPQIto64rr : 2803 X86::MOVPQIto64rr; 2804 if (X86::VR64RegClass.contains(SrcReg)) 2805 // Copy from a VR64 register to a GR64 register. 2806 return X86::MMX_MOVD64from64rr; 2807 } else if (X86::GR64RegClass.contains(SrcReg)) { 2808 // Copy from a GR64 register to a VR128 register. 2809 if (X86::VR128XRegClass.contains(DestReg)) 2810 return HasAVX512 ? X86::VMOV64toPQIZrr : 2811 HasAVX ? X86::VMOV64toPQIrr : 2812 X86::MOV64toPQIrr; 2813 // Copy from a GR64 register to a VR64 register. 2814 if (X86::VR64RegClass.contains(DestReg)) 2815 return X86::MMX_MOVD64to64rr; 2816 } 2817 2818 // SrcReg(VR128) -> DestReg(GR32) 2819 // SrcReg(GR32) -> DestReg(VR128) 2820 2821 if (X86::GR32RegClass.contains(DestReg) && 2822 X86::VR128XRegClass.contains(SrcReg)) 2823 // Copy from a VR128 register to a GR32 register. 2824 return HasAVX512 ? X86::VMOVPDI2DIZrr : 2825 HasAVX ? X86::VMOVPDI2DIrr : 2826 X86::MOVPDI2DIrr; 2827 2828 if (X86::VR128XRegClass.contains(DestReg) && 2829 X86::GR32RegClass.contains(SrcReg)) 2830 // Copy from a VR128 register to a VR128 register. 2831 return HasAVX512 ? X86::VMOVDI2PDIZrr : 2832 HasAVX ? X86::VMOVDI2PDIrr : 2833 X86::MOVDI2PDIrr; 2834 return 0; 2835} 2836 2837void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 2838 MachineBasicBlock::iterator MI, 2839 const DebugLoc &DL, unsigned DestReg, 2840 unsigned SrcReg, bool KillSrc) const { 2841 // First deal with the normal symmetric copies. 2842 bool HasAVX = Subtarget.hasAVX(); 2843 bool HasVLX = Subtarget.hasVLX(); 2844 unsigned Opc = 0; 2845 if (X86::GR64RegClass.contains(DestReg, SrcReg)) 2846 Opc = X86::MOV64rr; 2847 else if (X86::GR32RegClass.contains(DestReg, SrcReg)) 2848 Opc = X86::MOV32rr; 2849 else if (X86::GR16RegClass.contains(DestReg, SrcReg)) 2850 Opc = X86::MOV16rr; 2851 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { 2852 // Copying to or from a physical H register on x86-64 requires a NOREX 2853 // move. Otherwise use a normal move. 2854 if ((isHReg(DestReg) || isHReg(SrcReg)) && 2855 Subtarget.is64Bit()) { 2856 Opc = X86::MOV8rr_NOREX; 2857 // Both operands must be encodable without an REX prefix. 2858 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && 2859 "8-bit H register can not be copied outside GR8_NOREX"); 2860 } else 2861 Opc = X86::MOV8rr; 2862 } 2863 else if (X86::VR64RegClass.contains(DestReg, SrcReg)) 2864 Opc = X86::MMX_MOVQ64rr; 2865 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) { 2866 if (HasVLX) 2867 Opc = X86::VMOVAPSZ128rr; 2868 else if (X86::VR128RegClass.contains(DestReg, SrcReg)) 2869 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; 2870 else { 2871 // If this an extended register and we don't have VLX we need to use a 2872 // 512-bit move. 2873 Opc = X86::VMOVAPSZrr; 2874 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2875 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, 2876 &X86::VR512RegClass); 2877 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, 2878 &X86::VR512RegClass); 2879 } 2880 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) { 2881 if (HasVLX) 2882 Opc = X86::VMOVAPSZ256rr; 2883 else if (X86::VR256RegClass.contains(DestReg, SrcReg)) 2884 Opc = X86::VMOVAPSYrr; 2885 else { 2886 // If this an extended register and we don't have VLX we need to use a 2887 // 512-bit move. 2888 Opc = X86::VMOVAPSZrr; 2889 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2890 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, 2891 &X86::VR512RegClass); 2892 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, 2893 &X86::VR512RegClass); 2894 } 2895 } else if (X86::VR512RegClass.contains(DestReg, SrcReg)) 2896 Opc = X86::VMOVAPSZrr; 2897 // All KMASK RegClasses hold the same k registers, can be tested against anyone. 2898 else if (X86::VK16RegClass.contains(DestReg, SrcReg)) 2899 Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk; 2900 if (!Opc) 2901 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); 2902 2903 if (Opc) { 2904 BuildMI(MBB, MI, DL, get(Opc), DestReg) 2905 .addReg(SrcReg, getKillRegState(KillSrc)); 2906 return; 2907 } 2908 2909 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) { 2910 // FIXME: We use a fatal error here because historically LLVM has tried 2911 // lower some of these physreg copies and we want to ensure we get 2912 // reasonable bug reports if someone encounters a case no other testing 2913 // found. This path should be removed after the LLVM 7 release. 2914 report_fatal_error("Unable to copy EFLAGS physical register!"); 2915 } 2916 2917 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " 2918 << RI.getName(DestReg) << '\n'); 2919 report_fatal_error("Cannot emit physreg copy instruction"); 2920} 2921 2922bool X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI, 2923 const MachineOperand *&Src, 2924 const MachineOperand *&Dest) const { 2925 if (MI.isMoveReg()) { 2926 Dest = &MI.getOperand(0); 2927 Src = &MI.getOperand(1); 2928 return true; 2929 } 2930 return false; 2931} 2932 2933static unsigned getLoadStoreRegOpcode(unsigned Reg, 2934 const TargetRegisterClass *RC, 2935 bool isStackAligned, 2936 const X86Subtarget &STI, 2937 bool load) { 2938 bool HasAVX = STI.hasAVX(); 2939 bool HasAVX512 = STI.hasAVX512(); 2940 bool HasVLX = STI.hasVLX(); 2941 2942 switch (STI.getRegisterInfo()->getSpillSize(*RC)) { 2943 default: 2944 llvm_unreachable("Unknown spill size"); 2945 case 1: 2946 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); 2947 if (STI.is64Bit()) 2948 // Copying to or from a physical H register on x86-64 requires a NOREX 2949 // move. Otherwise use a normal move. 2950 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) 2951 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; 2952 return load ? X86::MOV8rm : X86::MOV8mr; 2953 case 2: 2954 if (X86::VK16RegClass.hasSubClassEq(RC)) 2955 return load ? X86::KMOVWkm : X86::KMOVWmk; 2956 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); 2957 return load ? X86::MOV16rm : X86::MOV16mr; 2958 case 4: 2959 if (X86::GR32RegClass.hasSubClassEq(RC)) 2960 return load ? X86::MOV32rm : X86::MOV32mr; 2961 if (X86::FR32XRegClass.hasSubClassEq(RC)) 2962 return load ? 2963 (HasAVX512 ? X86::VMOVSSZrm_alt : 2964 HasAVX ? X86::VMOVSSrm_alt : 2965 X86::MOVSSrm_alt) : 2966 (HasAVX512 ? X86::VMOVSSZmr : 2967 HasAVX ? X86::VMOVSSmr : 2968 X86::MOVSSmr); 2969 if (X86::RFP32RegClass.hasSubClassEq(RC)) 2970 return load ? X86::LD_Fp32m : X86::ST_Fp32m; 2971 if (X86::VK32RegClass.hasSubClassEq(RC)) { 2972 assert(STI.hasBWI() && "KMOVD requires BWI"); 2973 return load ? X86::KMOVDkm : X86::KMOVDmk; 2974 } 2975 // All of these mask pair classes have the same spill size, the same kind 2976 // of kmov instructions can be used with all of them. 2977 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) || 2978 X86::VK2PAIRRegClass.hasSubClassEq(RC) || 2979 X86::VK4PAIRRegClass.hasSubClassEq(RC) || 2980 X86::VK8PAIRRegClass.hasSubClassEq(RC) || 2981 X86::VK16PAIRRegClass.hasSubClassEq(RC)) 2982 return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE; 2983 llvm_unreachable("Unknown 4-byte regclass"); 2984 case 8: 2985 if (X86::GR64RegClass.hasSubClassEq(RC)) 2986 return load ? X86::MOV64rm : X86::MOV64mr; 2987 if (X86::FR64XRegClass.hasSubClassEq(RC)) 2988 return load ? 2989 (HasAVX512 ? X86::VMOVSDZrm_alt : 2990 HasAVX ? X86::VMOVSDrm_alt : 2991 X86::MOVSDrm_alt) : 2992 (HasAVX512 ? X86::VMOVSDZmr : 2993 HasAVX ? X86::VMOVSDmr : 2994 X86::MOVSDmr); 2995 if (X86::VR64RegClass.hasSubClassEq(RC)) 2996 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; 2997 if (X86::RFP64RegClass.hasSubClassEq(RC)) 2998 return load ? X86::LD_Fp64m : X86::ST_Fp64m; 2999 if (X86::VK64RegClass.hasSubClassEq(RC)) { 3000 assert(STI.hasBWI() && "KMOVQ requires BWI"); 3001 return load ? X86::KMOVQkm : X86::KMOVQmk; 3002 } 3003 llvm_unreachable("Unknown 8-byte regclass"); 3004 case 10: 3005 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); 3006 return load ? X86::LD_Fp80m : X86::ST_FpP80m; 3007 case 16: { 3008 if (X86::VR128XRegClass.hasSubClassEq(RC)) { 3009 // If stack is realigned we can use aligned stores. 3010 if (isStackAligned) 3011 return load ? 3012 (HasVLX ? X86::VMOVAPSZ128rm : 3013 HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : 3014 HasAVX ? X86::VMOVAPSrm : 3015 X86::MOVAPSrm): 3016 (HasVLX ? X86::VMOVAPSZ128mr : 3017 HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX : 3018 HasAVX ? X86::VMOVAPSmr : 3019 X86::MOVAPSmr); 3020 else 3021 return load ? 3022 (HasVLX ? X86::VMOVUPSZ128rm : 3023 HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX : 3024 HasAVX ? X86::VMOVUPSrm : 3025 X86::MOVUPSrm): 3026 (HasVLX ? X86::VMOVUPSZ128mr : 3027 HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX : 3028 HasAVX ? X86::VMOVUPSmr : 3029 X86::MOVUPSmr); 3030 } 3031 if (X86::BNDRRegClass.hasSubClassEq(RC)) { 3032 if (STI.is64Bit()) 3033 return load ? X86::BNDMOV64rm : X86::BNDMOV64mr; 3034 else 3035 return load ? X86::BNDMOV32rm : X86::BNDMOV32mr; 3036 } 3037 llvm_unreachable("Unknown 16-byte regclass"); 3038 } 3039 case 32: 3040 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); 3041 // If stack is realigned we can use aligned stores. 3042 if (isStackAligned) 3043 return load ? 3044 (HasVLX ? X86::VMOVAPSZ256rm : 3045 HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : 3046 X86::VMOVAPSYrm) : 3047 (HasVLX ? X86::VMOVAPSZ256mr : 3048 HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX : 3049 X86::VMOVAPSYmr); 3050 else 3051 return load ? 3052 (HasVLX ? X86::VMOVUPSZ256rm : 3053 HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX : 3054 X86::VMOVUPSYrm) : 3055 (HasVLX ? X86::VMOVUPSZ256mr : 3056 HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : 3057 X86::VMOVUPSYmr); 3058 case 64: 3059 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); 3060 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512"); 3061 if (isStackAligned) 3062 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; 3063 else 3064 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 3065 } 3066} 3067 3068bool X86InstrInfo::getMemOperandWithOffset( 3069 const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset, 3070 const TargetRegisterInfo *TRI) const { 3071 const MCInstrDesc &Desc = MemOp.getDesc(); 3072 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); 3073 if (MemRefBegin < 0) 3074 return false; 3075 3076 MemRefBegin += X86II::getOperandBias(Desc); 3077 3078 BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); 3079 if (!BaseOp->isReg()) // Can be an MO_FrameIndex 3080 return false; 3081 3082 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) 3083 return false; 3084 3085 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != 3086 X86::NoRegister) 3087 return false; 3088 3089 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp); 3090 3091 // Displacement can be symbolic 3092 if (!DispMO.isImm()) 3093 return false; 3094 3095 Offset = DispMO.getImm(); 3096 3097 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 3098 "operands of type register."); 3099 return true; 3100} 3101 3102static unsigned getStoreRegOpcode(unsigned SrcReg, 3103 const TargetRegisterClass *RC, 3104 bool isStackAligned, 3105 const X86Subtarget &STI) { 3106 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); 3107} 3108 3109 3110static unsigned getLoadRegOpcode(unsigned DestReg, 3111 const TargetRegisterClass *RC, 3112 bool isStackAligned, 3113 const X86Subtarget &STI) { 3114 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); 3115} 3116 3117void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 3118 MachineBasicBlock::iterator MI, 3119 unsigned SrcReg, bool isKill, int FrameIdx, 3120 const TargetRegisterClass *RC, 3121 const TargetRegisterInfo *TRI) const { 3122 const MachineFunction &MF = *MBB.getParent(); 3123 assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && 3124 "Stack slot too small for store"); 3125 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); 3126 bool isAligned = 3127 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 3128 RI.canRealignStack(MF); 3129 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 3130 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) 3131 .addReg(SrcReg, getKillRegState(isKill)); 3132} 3133 3134void X86InstrInfo::storeRegToAddr( 3135 MachineFunction &MF, unsigned SrcReg, bool isKill, 3136 SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, 3137 ArrayRef<MachineMemOperand *> MMOs, 3138 SmallVectorImpl<MachineInstr *> &NewMIs) const { 3139 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 3140 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 3141 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; 3142 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 3143 DebugLoc DL; 3144 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); 3145 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 3146 MIB.add(Addr[i]); 3147 MIB.addReg(SrcReg, getKillRegState(isKill)); 3148 MIB.setMemRefs(MMOs); 3149 NewMIs.push_back(MIB); 3150} 3151 3152 3153void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 3154 MachineBasicBlock::iterator MI, 3155 unsigned DestReg, int FrameIdx, 3156 const TargetRegisterClass *RC, 3157 const TargetRegisterInfo *TRI) const { 3158 const MachineFunction &MF = *MBB.getParent(); 3159 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); 3160 bool isAligned = 3161 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 3162 RI.canRealignStack(MF); 3163 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 3164 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx); 3165} 3166 3167void X86InstrInfo::loadRegFromAddr( 3168 MachineFunction &MF, unsigned DestReg, 3169 SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, 3170 ArrayRef<MachineMemOperand *> MMOs, 3171 SmallVectorImpl<MachineInstr *> &NewMIs) const { 3172 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 3173 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 3174 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; 3175 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 3176 DebugLoc DL; 3177 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 3178 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 3179 MIB.add(Addr[i]); 3180 MIB.setMemRefs(MMOs); 3181 NewMIs.push_back(MIB); 3182} 3183 3184bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 3185 unsigned &SrcReg2, int &CmpMask, 3186 int &CmpValue) const { 3187 switch (MI.getOpcode()) { 3188 default: break; 3189 case X86::CMP64ri32: 3190 case X86::CMP64ri8: 3191 case X86::CMP32ri: 3192 case X86::CMP32ri8: 3193 case X86::CMP16ri: 3194 case X86::CMP16ri8: 3195 case X86::CMP8ri: 3196 SrcReg = MI.getOperand(0).getReg(); 3197 SrcReg2 = 0; 3198 if (MI.getOperand(1).isImm()) { 3199 CmpMask = ~0; 3200 CmpValue = MI.getOperand(1).getImm(); 3201 } else { 3202 CmpMask = CmpValue = 0; 3203 } 3204 return true; 3205 // A SUB can be used to perform comparison. 3206 case X86::SUB64rm: 3207 case X86::SUB32rm: 3208 case X86::SUB16rm: 3209 case X86::SUB8rm: 3210 SrcReg = MI.getOperand(1).getReg(); 3211 SrcReg2 = 0; 3212 CmpMask = 0; 3213 CmpValue = 0; 3214 return true; 3215 case X86::SUB64rr: 3216 case X86::SUB32rr: 3217 case X86::SUB16rr: 3218 case X86::SUB8rr: 3219 SrcReg = MI.getOperand(1).getReg(); 3220 SrcReg2 = MI.getOperand(2).getReg(); 3221 CmpMask = 0; 3222 CmpValue = 0; 3223 return true; 3224 case X86::SUB64ri32: 3225 case X86::SUB64ri8: 3226 case X86::SUB32ri: 3227 case X86::SUB32ri8: 3228 case X86::SUB16ri: 3229 case X86::SUB16ri8: 3230 case X86::SUB8ri: 3231 SrcReg = MI.getOperand(1).getReg(); 3232 SrcReg2 = 0; 3233 if (MI.getOperand(2).isImm()) { 3234 CmpMask = ~0; 3235 CmpValue = MI.getOperand(2).getImm(); 3236 } else { 3237 CmpMask = CmpValue = 0; 3238 } 3239 return true; 3240 case X86::CMP64rr: 3241 case X86::CMP32rr: 3242 case X86::CMP16rr: 3243 case X86::CMP8rr: 3244 SrcReg = MI.getOperand(0).getReg(); 3245 SrcReg2 = MI.getOperand(1).getReg(); 3246 CmpMask = 0; 3247 CmpValue = 0; 3248 return true; 3249 case X86::TEST8rr: 3250 case X86::TEST16rr: 3251 case X86::TEST32rr: 3252 case X86::TEST64rr: 3253 SrcReg = MI.getOperand(0).getReg(); 3254 if (MI.getOperand(1).getReg() != SrcReg) 3255 return false; 3256 // Compare against zero. 3257 SrcReg2 = 0; 3258 CmpMask = ~0; 3259 CmpValue = 0; 3260 return true; 3261 } 3262 return false; 3263} 3264 3265/// Check whether the first instruction, whose only 3266/// purpose is to update flags, can be made redundant. 3267/// CMPrr can be made redundant by SUBrr if the operands are the same. 3268/// This function can be extended later on. 3269/// SrcReg, SrcRegs: register operands for FlagI. 3270/// ImmValue: immediate for FlagI if it takes an immediate. 3271inline static bool isRedundantFlagInstr(const MachineInstr &FlagI, 3272 unsigned SrcReg, unsigned SrcReg2, 3273 int ImmMask, int ImmValue, 3274 const MachineInstr &OI) { 3275 if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) || 3276 (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) || 3277 (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) || 3278 (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) && 3279 ((OI.getOperand(1).getReg() == SrcReg && 3280 OI.getOperand(2).getReg() == SrcReg2) || 3281 (OI.getOperand(1).getReg() == SrcReg2 && 3282 OI.getOperand(2).getReg() == SrcReg))) 3283 return true; 3284 3285 if (ImmMask != 0 && 3286 ((FlagI.getOpcode() == X86::CMP64ri32 && 3287 OI.getOpcode() == X86::SUB64ri32) || 3288 (FlagI.getOpcode() == X86::CMP64ri8 && 3289 OI.getOpcode() == X86::SUB64ri8) || 3290 (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) || 3291 (FlagI.getOpcode() == X86::CMP32ri8 && 3292 OI.getOpcode() == X86::SUB32ri8) || 3293 (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) || 3294 (FlagI.getOpcode() == X86::CMP16ri8 && 3295 OI.getOpcode() == X86::SUB16ri8) || 3296 (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) && 3297 OI.getOperand(1).getReg() == SrcReg && 3298 OI.getOperand(2).getImm() == ImmValue) 3299 return true; 3300 return false; 3301} 3302 3303/// Check whether the definition can be converted 3304/// to remove a comparison against zero. 3305inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) { 3306 NoSignFlag = false; 3307 3308 switch (MI.getOpcode()) { 3309 default: return false; 3310 3311 // The shift instructions only modify ZF if their shift count is non-zero. 3312 // N.B.: The processor truncates the shift count depending on the encoding. 3313 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: 3314 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: 3315 return getTruncatedShiftCount(MI, 2) != 0; 3316 3317 // Some left shift instructions can be turned into LEA instructions but only 3318 // if their flags aren't used. Avoid transforming such instructions. 3319 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ 3320 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 3321 if (isTruncatedShiftCountForLEA(ShAmt)) return false; 3322 return ShAmt != 0; 3323 } 3324 3325 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: 3326 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: 3327 return getTruncatedShiftCount(MI, 3) != 0; 3328 3329 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: 3330 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: 3331 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: 3332 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: 3333 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: 3334 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: 3335 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: 3336 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: 3337 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: 3338 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: 3339 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: 3340 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: 3341 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: 3342 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: 3343 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: 3344 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: 3345 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: 3346 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: 3347 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: 3348 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: 3349 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: 3350 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: 3351 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: 3352 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: 3353 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: 3354 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: 3355 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: 3356 case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri: 3357 case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8: 3358 case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr: 3359 case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm: 3360 case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm: 3361 case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri: 3362 case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8: 3363 case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: 3364 case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: 3365 case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: 3366 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: 3367 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: 3368 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: 3369 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: 3370 case X86::ANDN32rr: case X86::ANDN32rm: 3371 case X86::ANDN64rr: case X86::ANDN64rm: 3372 case X86::BLSI32rr: case X86::BLSI32rm: 3373 case X86::BLSI64rr: case X86::BLSI64rm: 3374 case X86::BLSMSK32rr:case X86::BLSMSK32rm: 3375 case X86::BLSMSK64rr:case X86::BLSMSK64rm: 3376 case X86::BLSR32rr: case X86::BLSR32rm: 3377 case X86::BLSR64rr: case X86::BLSR64rm: 3378 case X86::BZHI32rr: case X86::BZHI32rm: 3379 case X86::BZHI64rr: case X86::BZHI64rm: 3380 case X86::LZCNT16rr: case X86::LZCNT16rm: 3381 case X86::LZCNT32rr: case X86::LZCNT32rm: 3382 case X86::LZCNT64rr: case X86::LZCNT64rm: 3383 case X86::POPCNT16rr:case X86::POPCNT16rm: 3384 case X86::POPCNT32rr:case X86::POPCNT32rm: 3385 case X86::POPCNT64rr:case X86::POPCNT64rm: 3386 case X86::TZCNT16rr: case X86::TZCNT16rm: 3387 case X86::TZCNT32rr: case X86::TZCNT32rm: 3388 case X86::TZCNT64rr: case X86::TZCNT64rm: 3389 case X86::BLCFILL32rr: case X86::BLCFILL32rm: 3390 case X86::BLCFILL64rr: case X86::BLCFILL64rm: 3391 case X86::BLCI32rr: case X86::BLCI32rm: 3392 case X86::BLCI64rr: case X86::BLCI64rm: 3393 case X86::BLCIC32rr: case X86::BLCIC32rm: 3394 case X86::BLCIC64rr: case X86::BLCIC64rm: 3395 case X86::BLCMSK32rr: case X86::BLCMSK32rm: 3396 case X86::BLCMSK64rr: case X86::BLCMSK64rm: 3397 case X86::BLCS32rr: case X86::BLCS32rm: 3398 case X86::BLCS64rr: case X86::BLCS64rm: 3399 case X86::BLSFILL32rr: case X86::BLSFILL32rm: 3400 case X86::BLSFILL64rr: case X86::BLSFILL64rm: 3401 case X86::BLSIC32rr: case X86::BLSIC32rm: 3402 case X86::BLSIC64rr: case X86::BLSIC64rm: 3403 case X86::T1MSKC32rr: case X86::T1MSKC32rm: 3404 case X86::T1MSKC64rr: case X86::T1MSKC64rm: 3405 case X86::TZMSK32rr: case X86::TZMSK32rm: 3406 case X86::TZMSK64rr: case X86::TZMSK64rm: 3407 return true; 3408 case X86::BEXTR32rr: case X86::BEXTR64rr: 3409 case X86::BEXTR32rm: case X86::BEXTR64rm: 3410 case X86::BEXTRI32ri: case X86::BEXTRI32mi: 3411 case X86::BEXTRI64ri: case X86::BEXTRI64mi: 3412 // BEXTR doesn't update the sign flag so we can't use it. 3413 NoSignFlag = true; 3414 return true; 3415 } 3416} 3417 3418/// Check whether the use can be converted to remove a comparison against zero. 3419static X86::CondCode isUseDefConvertible(const MachineInstr &MI) { 3420 switch (MI.getOpcode()) { 3421 default: return X86::COND_INVALID; 3422 case X86::NEG8r: 3423 case X86::NEG16r: 3424 case X86::NEG32r: 3425 case X86::NEG64r: 3426 return X86::COND_AE; 3427 case X86::LZCNT16rr: 3428 case X86::LZCNT32rr: 3429 case X86::LZCNT64rr: 3430 return X86::COND_B; 3431 case X86::POPCNT16rr: 3432 case X86::POPCNT32rr: 3433 case X86::POPCNT64rr: 3434 return X86::COND_E; 3435 case X86::TZCNT16rr: 3436 case X86::TZCNT32rr: 3437 case X86::TZCNT64rr: 3438 return X86::COND_B; 3439 case X86::BSF16rr: 3440 case X86::BSF32rr: 3441 case X86::BSF64rr: 3442 case X86::BSR16rr: 3443 case X86::BSR32rr: 3444 case X86::BSR64rr: 3445 return X86::COND_E; 3446 case X86::BLSI32rr: 3447 case X86::BLSI64rr: 3448 return X86::COND_AE; 3449 case X86::BLSR32rr: 3450 case X86::BLSR64rr: 3451 case X86::BLSMSK32rr: 3452 case X86::BLSMSK64rr: 3453 return X86::COND_B; 3454 // TODO: TBM instructions. 3455 } 3456} 3457 3458/// Check if there exists an earlier instruction that 3459/// operates on the same source operands and sets flags in the same way as 3460/// Compare; remove Compare if possible. 3461bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, 3462 unsigned SrcReg2, int CmpMask, 3463 int CmpValue, 3464 const MachineRegisterInfo *MRI) const { 3465 // Check whether we can replace SUB with CMP. 3466 switch (CmpInstr.getOpcode()) { 3467 default: break; 3468 case X86::SUB64ri32: 3469 case X86::SUB64ri8: 3470 case X86::SUB32ri: 3471 case X86::SUB32ri8: 3472 case X86::SUB16ri: 3473 case X86::SUB16ri8: 3474 case X86::SUB8ri: 3475 case X86::SUB64rm: 3476 case X86::SUB32rm: 3477 case X86::SUB16rm: 3478 case X86::SUB8rm: 3479 case X86::SUB64rr: 3480 case X86::SUB32rr: 3481 case X86::SUB16rr: 3482 case X86::SUB8rr: { 3483 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) 3484 return false; 3485 // There is no use of the destination register, we can replace SUB with CMP. 3486 unsigned NewOpcode = 0; 3487 switch (CmpInstr.getOpcode()) { 3488 default: llvm_unreachable("Unreachable!"); 3489 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; 3490 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; 3491 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; 3492 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; 3493 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; 3494 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; 3495 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; 3496 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; 3497 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; 3498 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; 3499 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; 3500 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; 3501 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; 3502 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; 3503 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; 3504 } 3505 CmpInstr.setDesc(get(NewOpcode)); 3506 CmpInstr.RemoveOperand(0); 3507 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. 3508 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || 3509 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) 3510 return false; 3511 } 3512 } 3513 3514 // Get the unique definition of SrcReg. 3515 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 3516 if (!MI) return false; 3517 3518 // CmpInstr is the first instruction of the BB. 3519 MachineBasicBlock::iterator I = CmpInstr, Def = MI; 3520 3521 // If we are comparing against zero, check whether we can use MI to update 3522 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. 3523 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0); 3524 if (IsCmpZero && MI->getParent() != CmpInstr.getParent()) 3525 return false; 3526 3527 // If we have a use of the source register between the def and our compare 3528 // instruction we can eliminate the compare iff the use sets EFLAGS in the 3529 // right way. 3530 bool ShouldUpdateCC = false; 3531 bool NoSignFlag = false; 3532 X86::CondCode NewCC = X86::COND_INVALID; 3533 if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) { 3534 // Scan forward from the use until we hit the use we're looking for or the 3535 // compare instruction. 3536 for (MachineBasicBlock::iterator J = MI;; ++J) { 3537 // Do we have a convertible instruction? 3538 NewCC = isUseDefConvertible(*J); 3539 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && 3540 J->getOperand(1).getReg() == SrcReg) { 3541 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!"); 3542 ShouldUpdateCC = true; // Update CC later on. 3543 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going 3544 // with the new def. 3545 Def = J; 3546 MI = &*Def; 3547 break; 3548 } 3549 3550 if (J == I) 3551 return false; 3552 } 3553 } 3554 3555 // We are searching for an earlier instruction that can make CmpInstr 3556 // redundant and that instruction will be saved in Sub. 3557 MachineInstr *Sub = nullptr; 3558 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3559 3560 // We iterate backward, starting from the instruction before CmpInstr and 3561 // stop when reaching the definition of a source register or done with the BB. 3562 // RI points to the instruction before CmpInstr. 3563 // If the definition is in this basic block, RE points to the definition; 3564 // otherwise, RE is the rend of the basic block. 3565 MachineBasicBlock::reverse_iterator 3566 RI = ++I.getReverse(), 3567 RE = CmpInstr.getParent() == MI->getParent() 3568 ? Def.getReverse() /* points to MI */ 3569 : CmpInstr.getParent()->rend(); 3570 MachineInstr *Movr0Inst = nullptr; 3571 for (; RI != RE; ++RI) { 3572 MachineInstr &Instr = *RI; 3573 // Check whether CmpInstr can be made redundant by the current instruction. 3574 if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, 3575 CmpValue, Instr)) { 3576 Sub = &Instr; 3577 break; 3578 } 3579 3580 if (Instr.modifiesRegister(X86::EFLAGS, TRI) || 3581 Instr.readsRegister(X86::EFLAGS, TRI)) { 3582 // This instruction modifies or uses EFLAGS. 3583 3584 // MOV32r0 etc. are implemented with xor which clobbers condition code. 3585 // They are safe to move up, if the definition to EFLAGS is dead and 3586 // earlier instructions do not read or write EFLAGS. 3587 if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 && 3588 Instr.registerDefIsDead(X86::EFLAGS, TRI)) { 3589 Movr0Inst = &Instr; 3590 continue; 3591 } 3592 3593 // We can't remove CmpInstr. 3594 return false; 3595 } 3596 } 3597 3598 // Return false if no candidates exist. 3599 if (!IsCmpZero && !Sub) 3600 return false; 3601 3602 bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 3603 Sub->getOperand(2).getReg() == SrcReg); 3604 3605 // Scan forward from the instruction after CmpInstr for uses of EFLAGS. 3606 // It is safe to remove CmpInstr if EFLAGS is redefined or killed. 3607 // If we are done with the basic block, we need to check whether EFLAGS is 3608 // live-out. 3609 bool IsSafe = false; 3610 SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate; 3611 MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); 3612 for (++I; I != E; ++I) { 3613 const MachineInstr &Instr = *I; 3614 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); 3615 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); 3616 // We should check the usage if this instruction uses and updates EFLAGS. 3617 if (!UseEFLAGS && ModifyEFLAGS) { 3618 // It is safe to remove CmpInstr if EFLAGS is updated again. 3619 IsSafe = true; 3620 break; 3621 } 3622 if (!UseEFLAGS && !ModifyEFLAGS) 3623 continue; 3624 3625 // EFLAGS is used by this instruction. 3626 X86::CondCode OldCC = X86::COND_INVALID; 3627 if (IsCmpZero || IsSwapped) { 3628 // We decode the condition code from opcode. 3629 if (Instr.isBranch()) 3630 OldCC = X86::getCondFromBranch(Instr); 3631 else { 3632 OldCC = X86::getCondFromSETCC(Instr); 3633 if (OldCC == X86::COND_INVALID) 3634 OldCC = X86::getCondFromCMov(Instr); 3635 } 3636 if (OldCC == X86::COND_INVALID) return false; 3637 } 3638 X86::CondCode ReplacementCC = X86::COND_INVALID; 3639 if (IsCmpZero) { 3640 switch (OldCC) { 3641 default: break; 3642 case X86::COND_A: case X86::COND_AE: 3643 case X86::COND_B: case X86::COND_BE: 3644 case X86::COND_G: case X86::COND_GE: 3645 case X86::COND_L: case X86::COND_LE: 3646 case X86::COND_O: case X86::COND_NO: 3647 // CF and OF are used, we can't perform this optimization. 3648 return false; 3649 case X86::COND_S: case X86::COND_NS: 3650 // If SF is used, but the instruction doesn't update the SF, then we 3651 // can't do the optimization. 3652 if (NoSignFlag) 3653 return false; 3654 break; 3655 } 3656 3657 // If we're updating the condition code check if we have to reverse the 3658 // condition. 3659 if (ShouldUpdateCC) 3660 switch (OldCC) { 3661 default: 3662 return false; 3663 case X86::COND_E: 3664 ReplacementCC = NewCC; 3665 break; 3666 case X86::COND_NE: 3667 ReplacementCC = GetOppositeBranchCondition(NewCC); 3668 break; 3669 } 3670 } else if (IsSwapped) { 3671 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs 3672 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3673 // We swap the condition code and synthesize the new opcode. 3674 ReplacementCC = getSwappedCondition(OldCC); 3675 if (ReplacementCC == X86::COND_INVALID) return false; 3676 } 3677 3678 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) { 3679 // Push the MachineInstr to OpsToUpdate. 3680 // If it is safe to remove CmpInstr, the condition code of these 3681 // instructions will be modified. 3682 OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC)); 3683 } 3684 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { 3685 // It is safe to remove CmpInstr if EFLAGS is updated again or killed. 3686 IsSafe = true; 3687 break; 3688 } 3689 } 3690 3691 // If EFLAGS is not killed nor re-defined, we should check whether it is 3692 // live-out. If it is live-out, do not optimize. 3693 if ((IsCmpZero || IsSwapped) && !IsSafe) { 3694 MachineBasicBlock *MBB = CmpInstr.getParent(); 3695 for (MachineBasicBlock *Successor : MBB->successors()) 3696 if (Successor->isLiveIn(X86::EFLAGS)) 3697 return false; 3698 } 3699 3700 // The instruction to be updated is either Sub or MI. 3701 Sub = IsCmpZero ? MI : Sub; 3702 // Move Movr0Inst to the appropriate place before Sub. 3703 if (Movr0Inst) { 3704 // Look backwards until we find a def that doesn't use the current EFLAGS. 3705 Def = Sub; 3706 MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(), 3707 InsertE = Sub->getParent()->rend(); 3708 for (; InsertI != InsertE; ++InsertI) { 3709 MachineInstr *Instr = &*InsertI; 3710 if (!Instr->readsRegister(X86::EFLAGS, TRI) && 3711 Instr->modifiesRegister(X86::EFLAGS, TRI)) { 3712 Sub->getParent()->remove(Movr0Inst); 3713 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), 3714 Movr0Inst); 3715 break; 3716 } 3717 } 3718 if (InsertI == InsertE) 3719 return false; 3720 } 3721 3722 // Make sure Sub instruction defines EFLAGS and mark the def live. 3723 MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS); 3724 assert(FlagDef && "Unable to locate a def EFLAGS operand"); 3725 FlagDef->setIsDead(false); 3726 3727 CmpInstr.eraseFromParent(); 3728 3729 // Modify the condition code of instructions in OpsToUpdate. 3730 for (auto &Op : OpsToUpdate) { 3731 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1) 3732 .setImm(Op.second); 3733 } 3734 return true; 3735} 3736 3737/// Try to remove the load by folding it to a register 3738/// operand at the use. We fold the load instructions if load defines a virtual 3739/// register, the virtual register is used once in the same BB, and the 3740/// instructions in-between do not load or store, and have no side effects. 3741MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, 3742 const MachineRegisterInfo *MRI, 3743 unsigned &FoldAsLoadDefReg, 3744 MachineInstr *&DefMI) const { 3745 // Check whether we can move DefMI here. 3746 DefMI = MRI->getVRegDef(FoldAsLoadDefReg); 3747 assert(DefMI); 3748 bool SawStore = false; 3749 if (!DefMI->isSafeToMove(nullptr, SawStore)) 3750 return nullptr; 3751 3752 // Collect information about virtual register operands of MI. 3753 SmallVector<unsigned, 1> SrcOperandIds; 3754 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 3755 MachineOperand &MO = MI.getOperand(i); 3756 if (!MO.isReg()) 3757 continue; 3758 unsigned Reg = MO.getReg(); 3759 if (Reg != FoldAsLoadDefReg) 3760 continue; 3761 // Do not fold if we have a subreg use or a def. 3762 if (MO.getSubReg() || MO.isDef()) 3763 return nullptr; 3764 SrcOperandIds.push_back(i); 3765 } 3766 if (SrcOperandIds.empty()) 3767 return nullptr; 3768 3769 // Check whether we can fold the def into SrcOperandId. 3770 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) { 3771 FoldAsLoadDefReg = 0; 3772 return FoldMI; 3773 } 3774 3775 return nullptr; 3776} 3777 3778/// Expand a single-def pseudo instruction to a two-addr 3779/// instruction with two undef reads of the register being defined. 3780/// This is used for mapping: 3781/// %xmm4 = V_SET0 3782/// to: 3783/// %xmm4 = PXORrr undef %xmm4, undef %xmm4 3784/// 3785static bool Expand2AddrUndef(MachineInstrBuilder &MIB, 3786 const MCInstrDesc &Desc) { 3787 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 3788 unsigned Reg = MIB->getOperand(0).getReg(); 3789 MIB->setDesc(Desc); 3790 3791 // MachineInstr::addOperand() will insert explicit operands before any 3792 // implicit operands. 3793 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 3794 // But we don't trust that. 3795 assert(MIB->getOperand(1).getReg() == Reg && 3796 MIB->getOperand(2).getReg() == Reg && "Misplaced operand"); 3797 return true; 3798} 3799 3800/// Expand a single-def pseudo instruction to a two-addr 3801/// instruction with two %k0 reads. 3802/// This is used for mapping: 3803/// %k4 = K_SET1 3804/// to: 3805/// %k4 = KXNORrr %k0, %k0 3806static bool Expand2AddrKreg(MachineInstrBuilder &MIB, 3807 const MCInstrDesc &Desc, unsigned Reg) { 3808 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 3809 MIB->setDesc(Desc); 3810 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 3811 return true; 3812} 3813 3814static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, 3815 bool MinusOne) { 3816 MachineBasicBlock &MBB = *MIB->getParent(); 3817 DebugLoc DL = MIB->getDebugLoc(); 3818 unsigned Reg = MIB->getOperand(0).getReg(); 3819 3820 // Insert the XOR. 3821 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) 3822 .addReg(Reg, RegState::Undef) 3823 .addReg(Reg, RegState::Undef); 3824 3825 // Turn the pseudo into an INC or DEC. 3826 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); 3827 MIB.addReg(Reg); 3828 3829 return true; 3830} 3831 3832static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, 3833 const TargetInstrInfo &TII, 3834 const X86Subtarget &Subtarget) { 3835 MachineBasicBlock &MBB = *MIB->getParent(); 3836 DebugLoc DL = MIB->getDebugLoc(); 3837 int64_t Imm = MIB->getOperand(1).getImm(); 3838 assert(Imm != 0 && "Using push/pop for 0 is not efficient."); 3839 MachineBasicBlock::iterator I = MIB.getInstr(); 3840 3841 int StackAdjustment; 3842 3843 if (Subtarget.is64Bit()) { 3844 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 || 3845 MIB->getOpcode() == X86::MOV32ImmSExti8); 3846 3847 // Can't use push/pop lowering if the function might write to the red zone. 3848 X86MachineFunctionInfo *X86FI = 3849 MBB.getParent()->getInfo<X86MachineFunctionInfo>(); 3850 if (X86FI->getUsesRedZone()) { 3851 MIB->setDesc(TII.get(MIB->getOpcode() == 3852 X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri)); 3853 return true; 3854 } 3855 3856 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and 3857 // widen the register if necessary. 3858 StackAdjustment = 8; 3859 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm); 3860 MIB->setDesc(TII.get(X86::POP64r)); 3861 MIB->getOperand(0) 3862 .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64)); 3863 } else { 3864 assert(MIB->getOpcode() == X86::MOV32ImmSExti8); 3865 StackAdjustment = 4; 3866 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); 3867 MIB->setDesc(TII.get(X86::POP32r)); 3868 } 3869 3870 // Build CFI if necessary. 3871 MachineFunction &MF = *MBB.getParent(); 3872 const X86FrameLowering *TFL = Subtarget.getFrameLowering(); 3873 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 3874 bool NeedsDwarfCFI = 3875 !IsWin64Prologue && 3876 (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry()); 3877 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; 3878 if (EmitCFI) { 3879 TFL->BuildCFI(MBB, I, DL, 3880 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); 3881 TFL->BuildCFI(MBB, std::next(I), DL, 3882 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); 3883 } 3884 3885 return true; 3886} 3887 3888// LoadStackGuard has so far only been implemented for 64-bit MachO. Different 3889// code sequence is needed for other targets. 3890static void expandLoadStackGuard(MachineInstrBuilder &MIB, 3891 const TargetInstrInfo &TII) { 3892 MachineBasicBlock &MBB = *MIB->getParent(); 3893 DebugLoc DL = MIB->getDebugLoc(); 3894 unsigned Reg = MIB->getOperand(0).getReg(); 3895 const GlobalValue *GV = 3896 cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); 3897 auto Flags = MachineMemOperand::MOLoad | 3898 MachineMemOperand::MODereferenceable | 3899 MachineMemOperand::MOInvariant; 3900 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 3901 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8); 3902 MachineBasicBlock::iterator I = MIB.getInstr(); 3903 3904 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) 3905 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) 3906 .addMemOperand(MMO); 3907 MIB->setDebugLoc(DL); 3908 MIB->setDesc(TII.get(X86::MOV64rm)); 3909 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); 3910} 3911 3912static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { 3913 MachineBasicBlock &MBB = *MIB->getParent(); 3914 MachineFunction &MF = *MBB.getParent(); 3915 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 3916 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 3917 unsigned XorOp = 3918 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr; 3919 MIB->setDesc(TII.get(XorOp)); 3920 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef); 3921 return true; 3922} 3923 3924// This is used to handle spills for 128/256-bit registers when we have AVX512, 3925// but not VLX. If it uses an extended register we need to use an instruction 3926// that loads the lower 128/256-bit, but is available with only AVX512F. 3927static bool expandNOVLXLoad(MachineInstrBuilder &MIB, 3928 const TargetRegisterInfo *TRI, 3929 const MCInstrDesc &LoadDesc, 3930 const MCInstrDesc &BroadcastDesc, 3931 unsigned SubIdx) { 3932 unsigned DestReg = MIB->getOperand(0).getReg(); 3933 // Check if DestReg is XMM16-31 or YMM16-31. 3934 if (TRI->getEncodingValue(DestReg) < 16) { 3935 // We can use a normal VEX encoded load. 3936 MIB->setDesc(LoadDesc); 3937 } else { 3938 // Use a 128/256-bit VBROADCAST instruction. 3939 MIB->setDesc(BroadcastDesc); 3940 // Change the destination to a 512-bit register. 3941 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass); 3942 MIB->getOperand(0).setReg(DestReg); 3943 } 3944 return true; 3945} 3946 3947// This is used to handle spills for 128/256-bit registers when we have AVX512, 3948// but not VLX. If it uses an extended register we need to use an instruction 3949// that stores the lower 128/256-bit, but is available with only AVX512F. 3950static bool expandNOVLXStore(MachineInstrBuilder &MIB, 3951 const TargetRegisterInfo *TRI, 3952 const MCInstrDesc &StoreDesc, 3953 const MCInstrDesc &ExtractDesc, 3954 unsigned SubIdx) { 3955 unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg(); 3956 // Check if DestReg is XMM16-31 or YMM16-31. 3957 if (TRI->getEncodingValue(SrcReg) < 16) { 3958 // We can use a normal VEX encoded store. 3959 MIB->setDesc(StoreDesc); 3960 } else { 3961 // Use a VEXTRACTF instruction. 3962 MIB->setDesc(ExtractDesc); 3963 // Change the destination to a 512-bit register. 3964 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass); 3965 MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg); 3966 MIB.addImm(0x0); // Append immediate to extract from the lower bits. 3967 } 3968 3969 return true; 3970} 3971 3972static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { 3973 MIB->setDesc(Desc); 3974 int64_t ShiftAmt = MIB->getOperand(2).getImm(); 3975 // Temporarily remove the immediate so we can add another source register. 3976 MIB->RemoveOperand(2); 3977 // Add the register. Don't copy the kill flag if there is one. 3978 MIB.addReg(MIB->getOperand(1).getReg(), 3979 getUndefRegState(MIB->getOperand(1).isUndef())); 3980 // Add back the immediate. 3981 MIB.addImm(ShiftAmt); 3982 return true; 3983} 3984 3985bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 3986 bool HasAVX = Subtarget.hasAVX(); 3987 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 3988 switch (MI.getOpcode()) { 3989 case X86::MOV32r0: 3990 return Expand2AddrUndef(MIB, get(X86::XOR32rr)); 3991 case X86::MOV32r1: 3992 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); 3993 case X86::MOV32r_1: 3994 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); 3995 case X86::MOV32ImmSExti8: 3996 case X86::MOV64ImmSExti8: 3997 return ExpandMOVImmSExti8(MIB, *this, Subtarget); 3998 case X86::SETB_C8r: 3999 return Expand2AddrUndef(MIB, get(X86::SBB8rr)); 4000 case X86::SETB_C16r: 4001 return Expand2AddrUndef(MIB, get(X86::SBB16rr)); 4002 case X86::SETB_C32r: 4003 return Expand2AddrUndef(MIB, get(X86::SBB32rr)); 4004 case X86::SETB_C64r: 4005 return Expand2AddrUndef(MIB, get(X86::SBB64rr)); 4006 case X86::MMX_SET0: 4007 return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr)); 4008 case X86::V_SET0: 4009 case X86::FsFLD0SS: 4010 case X86::FsFLD0SD: 4011 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); 4012 case X86::AVX_SET0: { 4013 assert(HasAVX && "AVX not supported"); 4014 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4015 unsigned SrcReg = MIB->getOperand(0).getReg(); 4016 unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); 4017 MIB->getOperand(0).setReg(XReg); 4018 Expand2AddrUndef(MIB, get(X86::VXORPSrr)); 4019 MIB.addReg(SrcReg, RegState::ImplicitDefine); 4020 return true; 4021 } 4022 case X86::AVX512_128_SET0: 4023 case X86::AVX512_FsFLD0SS: 4024 case X86::AVX512_FsFLD0SD: { 4025 bool HasVLX = Subtarget.hasVLX(); 4026 unsigned SrcReg = MIB->getOperand(0).getReg(); 4027 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4028 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) 4029 return Expand2AddrUndef(MIB, 4030 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); 4031 // Extended register without VLX. Use a larger XOR. 4032 SrcReg = 4033 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass); 4034 MIB->getOperand(0).setReg(SrcReg); 4035 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 4036 } 4037 case X86::AVX512_256_SET0: 4038 case X86::AVX512_512_SET0: { 4039 bool HasVLX = Subtarget.hasVLX(); 4040 unsigned SrcReg = MIB->getOperand(0).getReg(); 4041 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4042 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { 4043 unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); 4044 MIB->getOperand(0).setReg(XReg); 4045 Expand2AddrUndef(MIB, 4046 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); 4047 MIB.addReg(SrcReg, RegState::ImplicitDefine); 4048 return true; 4049 } 4050 if (MI.getOpcode() == X86::AVX512_256_SET0) { 4051 // No VLX so we must reference a zmm. 4052 unsigned ZReg = 4053 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass); 4054 MIB->getOperand(0).setReg(ZReg); 4055 } 4056 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 4057 } 4058 case X86::V_SETALLONES: 4059 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); 4060 case X86::AVX2_SETALLONES: 4061 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); 4062 case X86::AVX1_SETALLONES: { 4063 unsigned Reg = MIB->getOperand(0).getReg(); 4064 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS. 4065 MIB->setDesc(get(X86::VCMPPSYrri)); 4066 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); 4067 return true; 4068 } 4069 case X86::AVX512_512_SETALLONES: { 4070 unsigned Reg = MIB->getOperand(0).getReg(); 4071 MIB->setDesc(get(X86::VPTERNLOGDZrri)); 4072 // VPTERNLOGD needs 3 register inputs and an immediate. 4073 // 0xff will return 1s for any input. 4074 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef) 4075 .addReg(Reg, RegState::Undef).addImm(0xff); 4076 return true; 4077 } 4078 case X86::AVX512_512_SEXT_MASK_32: 4079 case X86::AVX512_512_SEXT_MASK_64: { 4080 unsigned Reg = MIB->getOperand(0).getReg(); 4081 unsigned MaskReg = MIB->getOperand(1).getReg(); 4082 unsigned MaskState = getRegState(MIB->getOperand(1)); 4083 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? 4084 X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; 4085 MI.RemoveOperand(1); 4086 MIB->setDesc(get(Opc)); 4087 // VPTERNLOG needs 3 register inputs and an immediate. 4088 // 0xff will return 1s for any input. 4089 MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState) 4090 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff); 4091 return true; 4092 } 4093 case X86::VMOVAPSZ128rm_NOVLX: 4094 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm), 4095 get(X86::VBROADCASTF32X4rm), X86::sub_xmm); 4096 case X86::VMOVUPSZ128rm_NOVLX: 4097 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm), 4098 get(X86::VBROADCASTF32X4rm), X86::sub_xmm); 4099 case X86::VMOVAPSZ256rm_NOVLX: 4100 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm), 4101 get(X86::VBROADCASTF64X4rm), X86::sub_ymm); 4102 case X86::VMOVUPSZ256rm_NOVLX: 4103 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm), 4104 get(X86::VBROADCASTF64X4rm), X86::sub_ymm); 4105 case X86::VMOVAPSZ128mr_NOVLX: 4106 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), 4107 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); 4108 case X86::VMOVUPSZ128mr_NOVLX: 4109 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), 4110 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); 4111 case X86::VMOVAPSZ256mr_NOVLX: 4112 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), 4113 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); 4114 case X86::VMOVUPSZ256mr_NOVLX: 4115 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), 4116 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); 4117 case X86::MOV32ri64: { 4118 unsigned Reg = MIB->getOperand(0).getReg(); 4119 unsigned Reg32 = RI.getSubReg(Reg, X86::sub_32bit); 4120 MI.setDesc(get(X86::MOV32ri)); 4121 MIB->getOperand(0).setReg(Reg32); 4122 MIB.addReg(Reg, RegState::ImplicitDefine); 4123 return true; 4124 } 4125 4126 // KNL does not recognize dependency-breaking idioms for mask registers, 4127 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. 4128 // Using %k0 as the undef input register is a performance heuristic based 4129 // on the assumption that %k0 is used less frequently than the other mask 4130 // registers, since it is not usable as a write mask. 4131 // FIXME: A more advanced approach would be to choose the best input mask 4132 // register based on context. 4133 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); 4134 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); 4135 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); 4136 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); 4137 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); 4138 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); 4139 case TargetOpcode::LOAD_STACK_GUARD: 4140 expandLoadStackGuard(MIB, *this); 4141 return true; 4142 case X86::XOR64_FP: 4143 case X86::XOR32_FP: 4144 return expandXorFP(MIB, *this); 4145 case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8)); 4146 case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8)); 4147 case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8)); 4148 case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8)); 4149 case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break; 4150 case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break; 4151 case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break; 4152 case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break; 4153 case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break; 4154 case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break; 4155 case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break; 4156 case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break; 4157 case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break; 4158 case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break; 4159 case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break; 4160 } 4161 return false; 4162} 4163 4164/// Return true for all instructions that only update 4165/// the first 32 or 64-bits of the destination register and leave the rest 4166/// unmodified. This can be used to avoid folding loads if the instructions 4167/// only update part of the destination register, and the non-updated part is 4168/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these 4169/// instructions breaks the partial register dependency and it can improve 4170/// performance. e.g.: 4171/// 4172/// movss (%rdi), %xmm0 4173/// cvtss2sd %xmm0, %xmm0 4174/// 4175/// Instead of 4176/// cvtss2sd (%rdi), %xmm0 4177/// 4178/// FIXME: This should be turned into a TSFlags. 4179/// 4180static bool hasPartialRegUpdate(unsigned Opcode, 4181 const X86Subtarget &Subtarget, 4182 bool ForLoadFold = false) { 4183 switch (Opcode) { 4184 case X86::CVTSI2SSrr: 4185 case X86::CVTSI2SSrm: 4186 case X86::CVTSI642SSrr: 4187 case X86::CVTSI642SSrm: 4188 case X86::CVTSI2SDrr: 4189 case X86::CVTSI2SDrm: 4190 case X86::CVTSI642SDrr: 4191 case X86::CVTSI642SDrm: 4192 // Load folding won't effect the undef register update since the input is 4193 // a GPR. 4194 return !ForLoadFold; 4195 case X86::CVTSD2SSrr: 4196 case X86::CVTSD2SSrm: 4197 case X86::CVTSS2SDrr: 4198 case X86::CVTSS2SDrm: 4199 case X86::MOVHPDrm: 4200 case X86::MOVHPSrm: 4201 case X86::MOVLPDrm: 4202 case X86::MOVLPSrm: 4203 case X86::RCPSSr: 4204 case X86::RCPSSm: 4205 case X86::RCPSSr_Int: 4206 case X86::RCPSSm_Int: 4207 case X86::ROUNDSDr: 4208 case X86::ROUNDSDm: 4209 case X86::ROUNDSSr: 4210 case X86::ROUNDSSm: 4211 case X86::RSQRTSSr: 4212 case X86::RSQRTSSm: 4213 case X86::RSQRTSSr_Int: 4214 case X86::RSQRTSSm_Int: 4215 case X86::SQRTSSr: 4216 case X86::SQRTSSm: 4217 case X86::SQRTSSr_Int: 4218 case X86::SQRTSSm_Int: 4219 case X86::SQRTSDr: 4220 case X86::SQRTSDm: 4221 case X86::SQRTSDr_Int: 4222 case X86::SQRTSDm_Int: 4223 return true; 4224 // GPR 4225 case X86::POPCNT32rm: 4226 case X86::POPCNT32rr: 4227 case X86::POPCNT64rm: 4228 case X86::POPCNT64rr: 4229 return Subtarget.hasPOPCNTFalseDeps(); 4230 case X86::LZCNT32rm: 4231 case X86::LZCNT32rr: 4232 case X86::LZCNT64rm: 4233 case X86::LZCNT64rr: 4234 case X86::TZCNT32rm: 4235 case X86::TZCNT32rr: 4236 case X86::TZCNT64rm: 4237 case X86::TZCNT64rr: 4238 return Subtarget.hasLZCNTFalseDeps(); 4239 } 4240 4241 return false; 4242} 4243 4244/// Inform the BreakFalseDeps pass how many idle 4245/// instructions we would like before a partial register update. 4246unsigned X86InstrInfo::getPartialRegUpdateClearance( 4247 const MachineInstr &MI, unsigned OpNum, 4248 const TargetRegisterInfo *TRI) const { 4249 if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget)) 4250 return 0; 4251 4252 // If MI is marked as reading Reg, the partial register update is wanted. 4253 const MachineOperand &MO = MI.getOperand(0); 4254 unsigned Reg = MO.getReg(); 4255 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 4256 if (MO.readsReg() || MI.readsVirtualRegister(Reg)) 4257 return 0; 4258 } else { 4259 if (MI.readsRegister(Reg, TRI)) 4260 return 0; 4261 } 4262 4263 // If any instructions in the clearance range are reading Reg, insert a 4264 // dependency breaking instruction, which is inexpensive and is likely to 4265 // be hidden in other instruction's cycles. 4266 return PartialRegUpdateClearance; 4267} 4268 4269// Return true for any instruction the copies the high bits of the first source 4270// operand into the unused high bits of the destination operand. 4271static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) { 4272 switch (Opcode) { 4273 case X86::VCVTSI2SSrr: 4274 case X86::VCVTSI2SSrm: 4275 case X86::VCVTSI2SSrr_Int: 4276 case X86::VCVTSI2SSrm_Int: 4277 case X86::VCVTSI642SSrr: 4278 case X86::VCVTSI642SSrm: 4279 case X86::VCVTSI642SSrr_Int: 4280 case X86::VCVTSI642SSrm_Int: 4281 case X86::VCVTSI2SDrr: 4282 case X86::VCVTSI2SDrm: 4283 case X86::VCVTSI2SDrr_Int: 4284 case X86::VCVTSI2SDrm_Int: 4285 case X86::VCVTSI642SDrr: 4286 case X86::VCVTSI642SDrm: 4287 case X86::VCVTSI642SDrr_Int: 4288 case X86::VCVTSI642SDrm_Int: 4289 // AVX-512 4290 case X86::VCVTSI2SSZrr: 4291 case X86::VCVTSI2SSZrm: 4292 case X86::VCVTSI2SSZrr_Int: 4293 case X86::VCVTSI2SSZrrb_Int: 4294 case X86::VCVTSI2SSZrm_Int: 4295 case X86::VCVTSI642SSZrr: 4296 case X86::VCVTSI642SSZrm: 4297 case X86::VCVTSI642SSZrr_Int: 4298 case X86::VCVTSI642SSZrrb_Int: 4299 case X86::VCVTSI642SSZrm_Int: 4300 case X86::VCVTSI2SDZrr: 4301 case X86::VCVTSI2SDZrm: 4302 case X86::VCVTSI2SDZrr_Int: 4303 case X86::VCVTSI2SDZrm_Int: 4304 case X86::VCVTSI642SDZrr: 4305 case X86::VCVTSI642SDZrm: 4306 case X86::VCVTSI642SDZrr_Int: 4307 case X86::VCVTSI642SDZrrb_Int: 4308 case X86::VCVTSI642SDZrm_Int: 4309 case X86::VCVTUSI2SSZrr: 4310 case X86::VCVTUSI2SSZrm: 4311 case X86::VCVTUSI2SSZrr_Int: 4312 case X86::VCVTUSI2SSZrrb_Int: 4313 case X86::VCVTUSI2SSZrm_Int: 4314 case X86::VCVTUSI642SSZrr: 4315 case X86::VCVTUSI642SSZrm: 4316 case X86::VCVTUSI642SSZrr_Int: 4317 case X86::VCVTUSI642SSZrrb_Int: 4318 case X86::VCVTUSI642SSZrm_Int: 4319 case X86::VCVTUSI2SDZrr: 4320 case X86::VCVTUSI2SDZrm: 4321 case X86::VCVTUSI2SDZrr_Int: 4322 case X86::VCVTUSI2SDZrm_Int: 4323 case X86::VCVTUSI642SDZrr: 4324 case X86::VCVTUSI642SDZrm: 4325 case X86::VCVTUSI642SDZrr_Int: 4326 case X86::VCVTUSI642SDZrrb_Int: 4327 case X86::VCVTUSI642SDZrm_Int: 4328 // Load folding won't effect the undef register update since the input is 4329 // a GPR. 4330 return !ForLoadFold; 4331 case X86::VCVTSD2SSrr: 4332 case X86::VCVTSD2SSrm: 4333 case X86::VCVTSD2SSrr_Int: 4334 case X86::VCVTSD2SSrm_Int: 4335 case X86::VCVTSS2SDrr: 4336 case X86::VCVTSS2SDrm: 4337 case X86::VCVTSS2SDrr_Int: 4338 case X86::VCVTSS2SDrm_Int: 4339 case X86::VRCPSSr: 4340 case X86::VRCPSSr_Int: 4341 case X86::VRCPSSm: 4342 case X86::VRCPSSm_Int: 4343 case X86::VROUNDSDr: 4344 case X86::VROUNDSDm: 4345 case X86::VROUNDSDr_Int: 4346 case X86::VROUNDSDm_Int: 4347 case X86::VROUNDSSr: 4348 case X86::VROUNDSSm: 4349 case X86::VROUNDSSr_Int: 4350 case X86::VROUNDSSm_Int: 4351 case X86::VRSQRTSSr: 4352 case X86::VRSQRTSSr_Int: 4353 case X86::VRSQRTSSm: 4354 case X86::VRSQRTSSm_Int: 4355 case X86::VSQRTSSr: 4356 case X86::VSQRTSSr_Int: 4357 case X86::VSQRTSSm: 4358 case X86::VSQRTSSm_Int: 4359 case X86::VSQRTSDr: 4360 case X86::VSQRTSDr_Int: 4361 case X86::VSQRTSDm: 4362 case X86::VSQRTSDm_Int: 4363 // AVX-512 4364 case X86::VCVTSD2SSZrr: 4365 case X86::VCVTSD2SSZrr_Int: 4366 case X86::VCVTSD2SSZrrb_Int: 4367 case X86::VCVTSD2SSZrm: 4368 case X86::VCVTSD2SSZrm_Int: 4369 case X86::VCVTSS2SDZrr: 4370 case X86::VCVTSS2SDZrr_Int: 4371 case X86::VCVTSS2SDZrrb_Int: 4372 case X86::VCVTSS2SDZrm: 4373 case X86::VCVTSS2SDZrm_Int: 4374 case X86::VGETEXPSDZr: 4375 case X86::VGETEXPSDZrb: 4376 case X86::VGETEXPSDZm: 4377 case X86::VGETEXPSSZr: 4378 case X86::VGETEXPSSZrb: 4379 case X86::VGETEXPSSZm: 4380 case X86::VGETMANTSDZrri: 4381 case X86::VGETMANTSDZrrib: 4382 case X86::VGETMANTSDZrmi: 4383 case X86::VGETMANTSSZrri: 4384 case X86::VGETMANTSSZrrib: 4385 case X86::VGETMANTSSZrmi: 4386 case X86::VRNDSCALESDZr: 4387 case X86::VRNDSCALESDZr_Int: 4388 case X86::VRNDSCALESDZrb_Int: 4389 case X86::VRNDSCALESDZm: 4390 case X86::VRNDSCALESDZm_Int: 4391 case X86::VRNDSCALESSZr: 4392 case X86::VRNDSCALESSZr_Int: 4393 case X86::VRNDSCALESSZrb_Int: 4394 case X86::VRNDSCALESSZm: 4395 case X86::VRNDSCALESSZm_Int: 4396 case X86::VRCP14SDZrr: 4397 case X86::VRCP14SDZrm: 4398 case X86::VRCP14SSZrr: 4399 case X86::VRCP14SSZrm: 4400 case X86::VRCP28SDZr: 4401 case X86::VRCP28SDZrb: 4402 case X86::VRCP28SDZm: 4403 case X86::VRCP28SSZr: 4404 case X86::VRCP28SSZrb: 4405 case X86::VRCP28SSZm: 4406 case X86::VREDUCESSZrmi: 4407 case X86::VREDUCESSZrri: 4408 case X86::VREDUCESSZrrib: 4409 case X86::VRSQRT14SDZrr: 4410 case X86::VRSQRT14SDZrm: 4411 case X86::VRSQRT14SSZrr: 4412 case X86::VRSQRT14SSZrm: 4413 case X86::VRSQRT28SDZr: 4414 case X86::VRSQRT28SDZrb: 4415 case X86::VRSQRT28SDZm: 4416 case X86::VRSQRT28SSZr: 4417 case X86::VRSQRT28SSZrb: 4418 case X86::VRSQRT28SSZm: 4419 case X86::VSQRTSSZr: 4420 case X86::VSQRTSSZr_Int: 4421 case X86::VSQRTSSZrb_Int: 4422 case X86::VSQRTSSZm: 4423 case X86::VSQRTSSZm_Int: 4424 case X86::VSQRTSDZr: 4425 case X86::VSQRTSDZr_Int: 4426 case X86::VSQRTSDZrb_Int: 4427 case X86::VSQRTSDZm: 4428 case X86::VSQRTSDZm_Int: 4429 return true; 4430 } 4431 4432 return false; 4433} 4434 4435/// Inform the BreakFalseDeps pass how many idle instructions we would like 4436/// before certain undef register reads. 4437/// 4438/// This catches the VCVTSI2SD family of instructions: 4439/// 4440/// vcvtsi2sdq %rax, undef %xmm0, %xmm14 4441/// 4442/// We should to be careful *not* to catch VXOR idioms which are presumably 4443/// handled specially in the pipeline: 4444/// 4445/// vxorps undef %xmm1, undef %xmm1, %xmm1 4446/// 4447/// Like getPartialRegUpdateClearance, this makes a strong assumption that the 4448/// high bits that are passed-through are not live. 4449unsigned 4450X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, 4451 const TargetRegisterInfo *TRI) const { 4452 if (!hasUndefRegUpdate(MI.getOpcode())) 4453 return 0; 4454 4455 // Set the OpNum parameter to the first source operand. 4456 OpNum = 1; 4457 4458 const MachineOperand &MO = MI.getOperand(OpNum); 4459 if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 4460 return UndefRegClearance; 4461 } 4462 return 0; 4463} 4464 4465void X86InstrInfo::breakPartialRegDependency( 4466 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 4467 unsigned Reg = MI.getOperand(OpNum).getReg(); 4468 // If MI kills this register, the false dependence is already broken. 4469 if (MI.killsRegister(Reg, TRI)) 4470 return; 4471 4472 if (X86::VR128RegClass.contains(Reg)) { 4473 // These instructions are all floating point domain, so xorps is the best 4474 // choice. 4475 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; 4476 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg) 4477 .addReg(Reg, RegState::Undef) 4478 .addReg(Reg, RegState::Undef); 4479 MI.addRegisterKilled(Reg, TRI, true); 4480 } else if (X86::VR256RegClass.contains(Reg)) { 4481 // Use vxorps to clear the full ymm register. 4482 // It wants to read and write the xmm sub-register. 4483 unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); 4484 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) 4485 .addReg(XReg, RegState::Undef) 4486 .addReg(XReg, RegState::Undef) 4487 .addReg(Reg, RegState::ImplicitDefine); 4488 MI.addRegisterKilled(Reg, TRI, true); 4489 } else if (X86::GR64RegClass.contains(Reg)) { 4490 // Using XOR32rr because it has shorter encoding and zeros up the upper bits 4491 // as well. 4492 unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit); 4493 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) 4494 .addReg(XReg, RegState::Undef) 4495 .addReg(XReg, RegState::Undef) 4496 .addReg(Reg, RegState::ImplicitDefine); 4497 MI.addRegisterKilled(Reg, TRI, true); 4498 } else if (X86::GR32RegClass.contains(Reg)) { 4499 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg) 4500 .addReg(Reg, RegState::Undef) 4501 .addReg(Reg, RegState::Undef); 4502 MI.addRegisterKilled(Reg, TRI, true); 4503 } 4504} 4505 4506static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, 4507 int PtrOffset = 0) { 4508 unsigned NumAddrOps = MOs.size(); 4509 4510 if (NumAddrOps < 4) { 4511 // FrameIndex only - add an immediate offset (whether its zero or not). 4512 for (unsigned i = 0; i != NumAddrOps; ++i) 4513 MIB.add(MOs[i]); 4514 addOffset(MIB, PtrOffset); 4515 } else { 4516 // General Memory Addressing - we need to add any offset to an existing 4517 // offset. 4518 assert(MOs.size() == 5 && "Unexpected memory operand list length"); 4519 for (unsigned i = 0; i != NumAddrOps; ++i) { 4520 const MachineOperand &MO = MOs[i]; 4521 if (i == 3 && PtrOffset != 0) { 4522 MIB.addDisp(MO, PtrOffset); 4523 } else { 4524 MIB.add(MO); 4525 } 4526 } 4527 } 4528} 4529 4530static void updateOperandRegConstraints(MachineFunction &MF, 4531 MachineInstr &NewMI, 4532 const TargetInstrInfo &TII) { 4533 MachineRegisterInfo &MRI = MF.getRegInfo(); 4534 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 4535 4536 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) { 4537 MachineOperand &MO = NewMI.getOperand(Idx); 4538 // We only need to update constraints on virtual register operands. 4539 if (!MO.isReg()) 4540 continue; 4541 unsigned Reg = MO.getReg(); 4542 if (!TRI.isVirtualRegister(Reg)) 4543 continue; 4544 4545 auto *NewRC = MRI.constrainRegClass( 4546 Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF)); 4547 if (!NewRC) { 4548 LLVM_DEBUG( 4549 dbgs() << "WARNING: Unable to update register constraint for operand " 4550 << Idx << " of instruction:\n"; 4551 NewMI.dump(); dbgs() << "\n"); 4552 } 4553 } 4554} 4555 4556static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 4557 ArrayRef<MachineOperand> MOs, 4558 MachineBasicBlock::iterator InsertPt, 4559 MachineInstr &MI, 4560 const TargetInstrInfo &TII) { 4561 // Create the base instruction with the memory operand as the first part. 4562 // Omit the implicit operands, something BuildMI can't do. 4563 MachineInstr *NewMI = 4564 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); 4565 MachineInstrBuilder MIB(MF, NewMI); 4566 addOperands(MIB, MOs); 4567 4568 // Loop over the rest of the ri operands, converting them over. 4569 unsigned NumOps = MI.getDesc().getNumOperands() - 2; 4570 for (unsigned i = 0; i != NumOps; ++i) { 4571 MachineOperand &MO = MI.getOperand(i + 2); 4572 MIB.add(MO); 4573 } 4574 for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { 4575 MachineOperand &MO = MI.getOperand(i); 4576 MIB.add(MO); 4577 } 4578 4579 updateOperandRegConstraints(MF, *NewMI, TII); 4580 4581 MachineBasicBlock *MBB = InsertPt->getParent(); 4582 MBB->insert(InsertPt, NewMI); 4583 4584 return MIB; 4585} 4586 4587static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, 4588 unsigned OpNo, ArrayRef<MachineOperand> MOs, 4589 MachineBasicBlock::iterator InsertPt, 4590 MachineInstr &MI, const TargetInstrInfo &TII, 4591 int PtrOffset = 0) { 4592 // Omit the implicit operands, something BuildMI can't do. 4593 MachineInstr *NewMI = 4594 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); 4595 MachineInstrBuilder MIB(MF, NewMI); 4596 4597 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4598 MachineOperand &MO = MI.getOperand(i); 4599 if (i == OpNo) { 4600 assert(MO.isReg() && "Expected to fold into reg operand!"); 4601 addOperands(MIB, MOs, PtrOffset); 4602 } else { 4603 MIB.add(MO); 4604 } 4605 } 4606 4607 updateOperandRegConstraints(MF, *NewMI, TII); 4608 4609 MachineBasicBlock *MBB = InsertPt->getParent(); 4610 MBB->insert(InsertPt, NewMI); 4611 4612 return MIB; 4613} 4614 4615static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 4616 ArrayRef<MachineOperand> MOs, 4617 MachineBasicBlock::iterator InsertPt, 4618 MachineInstr &MI) { 4619 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, 4620 MI.getDebugLoc(), TII.get(Opcode)); 4621 addOperands(MIB, MOs); 4622 return MIB.addImm(0); 4623} 4624 4625MachineInstr *X86InstrInfo::foldMemoryOperandCustom( 4626 MachineFunction &MF, MachineInstr &MI, unsigned OpNum, 4627 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 4628 unsigned Size, unsigned Align) const { 4629 switch (MI.getOpcode()) { 4630 case X86::INSERTPSrr: 4631 case X86::VINSERTPSrr: 4632 case X86::VINSERTPSZrr: 4633 // Attempt to convert the load of inserted vector into a fold load 4634 // of a single float. 4635 if (OpNum == 2) { 4636 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); 4637 unsigned ZMask = Imm & 15; 4638 unsigned DstIdx = (Imm >> 4) & 3; 4639 unsigned SrcIdx = (Imm >> 6) & 3; 4640 4641 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4642 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); 4643 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4644 if ((Size == 0 || Size >= 16) && RCSize >= 16 && 4 <= Align) { 4645 int PtrOffset = SrcIdx * 4; 4646 unsigned NewImm = (DstIdx << 4) | ZMask; 4647 unsigned NewOpCode = 4648 (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : 4649 (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : 4650 X86::INSERTPSrm; 4651 MachineInstr *NewMI = 4652 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); 4653 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); 4654 return NewMI; 4655 } 4656 } 4657 break; 4658 case X86::MOVHLPSrr: 4659 case X86::VMOVHLPSrr: 4660 case X86::VMOVHLPSZrr: 4661 // Move the upper 64-bits of the second operand to the lower 64-bits. 4662 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. 4663 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. 4664 if (OpNum == 2) { 4665 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4666 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); 4667 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4668 if ((Size == 0 || Size >= 16) && RCSize >= 16 && 8 <= Align) { 4669 unsigned NewOpCode = 4670 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : 4671 (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : 4672 X86::MOVLPSrm; 4673 MachineInstr *NewMI = 4674 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); 4675 return NewMI; 4676 } 4677 } 4678 break; 4679 case X86::UNPCKLPDrr: 4680 // If we won't be able to fold this to the memory form of UNPCKL, use 4681 // MOVHPD instead. Done as custom because we can't have this in the load 4682 // table twice. 4683 if (OpNum == 2) { 4684 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4685 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); 4686 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4687 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Align < 16) { 4688 MachineInstr *NewMI = 4689 FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this); 4690 return NewMI; 4691 } 4692 } 4693 break; 4694 } 4695 4696 return nullptr; 4697} 4698 4699static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, 4700 MachineInstr &MI) { 4701 if (!hasUndefRegUpdate(MI.getOpcode(), /*ForLoadFold*/true) || 4702 !MI.getOperand(1).isReg()) 4703 return false; 4704 4705 // The are two cases we need to handle depending on where in the pipeline 4706 // the folding attempt is being made. 4707 // -Register has the undef flag set. 4708 // -Register is produced by the IMPLICIT_DEF instruction. 4709 4710 if (MI.getOperand(1).isUndef()) 4711 return true; 4712 4713 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4714 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg()); 4715 return VRegDef && VRegDef->isImplicitDef(); 4716} 4717 4718 4719MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 4720 MachineFunction &MF, MachineInstr &MI, unsigned OpNum, 4721 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 4722 unsigned Size, unsigned Align, bool AllowCommute) const { 4723 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); 4724 bool isTwoAddrFold = false; 4725 4726 // For CPUs that favor the register form of a call or push, 4727 // do not fold loads into calls or pushes, unless optimizing for size 4728 // aggressively. 4729 if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && 4730 (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || 4731 MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || 4732 MI.getOpcode() == X86::PUSH64r)) 4733 return nullptr; 4734 4735 // Avoid partial and undef register update stalls unless optimizing for size. 4736 if (!MF.getFunction().hasOptSize() && 4737 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || 4738 shouldPreventUndefRegUpdateMemFold(MF, MI))) 4739 return nullptr; 4740 4741 unsigned NumOps = MI.getDesc().getNumOperands(); 4742 bool isTwoAddr = 4743 NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 4744 4745 // FIXME: AsmPrinter doesn't know how to handle 4746 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 4747 if (MI.getOpcode() == X86::ADD32ri && 4748 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 4749 return nullptr; 4750 4751 // GOTTPOFF relocation loads can only be folded into add instructions. 4752 // FIXME: Need to exclude other relocations that only support specific 4753 // instructions. 4754 if (MOs.size() == X86::AddrNumOperands && 4755 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && 4756 MI.getOpcode() != X86::ADD64rr) 4757 return nullptr; 4758 4759 MachineInstr *NewMI = nullptr; 4760 4761 // Attempt to fold any custom cases we have. 4762 if (MachineInstr *CustomMI = 4763 foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align)) 4764 return CustomMI; 4765 4766 const X86MemoryFoldTableEntry *I = nullptr; 4767 4768 // Folding a memory location into the two-address part of a two-address 4769 // instruction is different than folding it other places. It requires 4770 // replacing the *two* registers with the memory location. 4771 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() && 4772 MI.getOperand(1).isReg() && 4773 MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { 4774 I = lookupTwoAddrFoldTable(MI.getOpcode()); 4775 isTwoAddrFold = true; 4776 } else { 4777 if (OpNum == 0) { 4778 if (MI.getOpcode() == X86::MOV32r0) { 4779 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); 4780 if (NewMI) 4781 return NewMI; 4782 } 4783 } 4784 4785 I = lookupFoldTable(MI.getOpcode(), OpNum); 4786 } 4787 4788 if (I != nullptr) { 4789 unsigned Opcode = I->DstOp; 4790 unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; 4791 if (Align < MinAlign) 4792 return nullptr; 4793 bool NarrowToMOV32rm = false; 4794 if (Size) { 4795 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4796 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, 4797 &RI, MF); 4798 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; 4799 if (Size < RCSize) { 4800 // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int. 4801 // Check if it's safe to fold the load. If the size of the object is 4802 // narrower than the load width, then it's not. 4803 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) 4804 return nullptr; 4805 // If this is a 64-bit load, but the spill slot is 32, then we can do 4806 // a 32-bit load which is implicitly zero-extended. This likely is 4807 // due to live interval analysis remat'ing a load from stack slot. 4808 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 4809 return nullptr; 4810 Opcode = X86::MOV32rm; 4811 NarrowToMOV32rm = true; 4812 } 4813 } 4814 4815 if (isTwoAddrFold) 4816 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); 4817 else 4818 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); 4819 4820 if (NarrowToMOV32rm) { 4821 // If this is the special case where we use a MOV32rm to load a 32-bit 4822 // value and zero-extend the top bits. Change the destination register 4823 // to a 32-bit one. 4824 unsigned DstReg = NewMI->getOperand(0).getReg(); 4825 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 4826 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); 4827 else 4828 NewMI->getOperand(0).setSubReg(X86::sub_32bit); 4829 } 4830 return NewMI; 4831 } 4832 4833 // If the instruction and target operand are commutable, commute the 4834 // instruction and try again. 4835 if (AllowCommute) { 4836 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; 4837 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { 4838 bool HasDef = MI.getDesc().getNumDefs(); 4839 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); 4840 Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); 4841 Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); 4842 bool Tied1 = 4843 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); 4844 bool Tied2 = 4845 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); 4846 4847 // If either of the commutable operands are tied to the destination 4848 // then we can not commute + fold. 4849 if ((HasDef && Reg0 == Reg1 && Tied1) || 4850 (HasDef && Reg0 == Reg2 && Tied2)) 4851 return nullptr; 4852 4853 MachineInstr *CommutedMI = 4854 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 4855 if (!CommutedMI) { 4856 // Unable to commute. 4857 return nullptr; 4858 } 4859 if (CommutedMI != &MI) { 4860 // New instruction. We can't fold from this. 4861 CommutedMI->eraseFromParent(); 4862 return nullptr; 4863 } 4864 4865 // Attempt to fold with the commuted version of the instruction. 4866 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, 4867 Size, Align, /*AllowCommute=*/false); 4868 if (NewMI) 4869 return NewMI; 4870 4871 // Folding failed again - undo the commute before returning. 4872 MachineInstr *UncommutedMI = 4873 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 4874 if (!UncommutedMI) { 4875 // Unable to commute. 4876 return nullptr; 4877 } 4878 if (UncommutedMI != &MI) { 4879 // New instruction. It doesn't need to be kept. 4880 UncommutedMI->eraseFromParent(); 4881 return nullptr; 4882 } 4883 4884 // Return here to prevent duplicate fuse failure report. 4885 return nullptr; 4886 } 4887 } 4888 4889 // No fusion 4890 if (PrintFailedFusing && !MI.isCopy()) 4891 dbgs() << "We failed to fuse operand " << OpNum << " in " << MI; 4892 return nullptr; 4893} 4894 4895MachineInstr * 4896X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 4897 ArrayRef<unsigned> Ops, 4898 MachineBasicBlock::iterator InsertPt, 4899 int FrameIndex, LiveIntervals *LIS, 4900 VirtRegMap *VRM) const { 4901 // Check switch flag 4902 if (NoFusing) 4903 return nullptr; 4904 4905 // Avoid partial and undef register update stalls unless optimizing for size. 4906 if (!MF.getFunction().hasOptSize() && 4907 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || 4908 shouldPreventUndefRegUpdateMemFold(MF, MI))) 4909 return nullptr; 4910 4911 // Don't fold subreg spills, or reloads that use a high subreg. 4912 for (auto Op : Ops) { 4913 MachineOperand &MO = MI.getOperand(Op); 4914 auto SubReg = MO.getSubReg(); 4915 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi)) 4916 return nullptr; 4917 } 4918 4919 const MachineFrameInfo &MFI = MF.getFrameInfo(); 4920 unsigned Size = MFI.getObjectSize(FrameIndex); 4921 unsigned Alignment = MFI.getObjectAlignment(FrameIndex); 4922 // If the function stack isn't realigned we don't want to fold instructions 4923 // that need increased alignment. 4924 if (!RI.needsStackRealignment(MF)) 4925 Alignment = 4926 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); 4927 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 4928 unsigned NewOpc = 0; 4929 unsigned RCSize = 0; 4930 switch (MI.getOpcode()) { 4931 default: return nullptr; 4932 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; 4933 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; 4934 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; 4935 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; 4936 } 4937 // Check if it's safe to fold the load. If the size of the object is 4938 // narrower than the load width, then it's not. 4939 if (Size < RCSize) 4940 return nullptr; 4941 // Change to CMPXXri r, 0 first. 4942 MI.setDesc(get(NewOpc)); 4943 MI.getOperand(1).ChangeToImmediate(0); 4944 } else if (Ops.size() != 1) 4945 return nullptr; 4946 4947 return foldMemoryOperandImpl(MF, MI, Ops[0], 4948 MachineOperand::CreateFI(FrameIndex), InsertPt, 4949 Size, Alignment, /*AllowCommute=*/true); 4950} 4951 4952/// Check if \p LoadMI is a partial register load that we can't fold into \p MI 4953/// because the latter uses contents that wouldn't be defined in the folded 4954/// version. For instance, this transformation isn't legal: 4955/// movss (%rdi), %xmm0 4956/// addps %xmm0, %xmm0 4957/// -> 4958/// addps (%rdi), %xmm0 4959/// 4960/// But this one is: 4961/// movss (%rdi), %xmm0 4962/// addss %xmm0, %xmm0 4963/// -> 4964/// addss (%rdi), %xmm0 4965/// 4966static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, 4967 const MachineInstr &UserMI, 4968 const MachineFunction &MF) { 4969 unsigned Opc = LoadMI.getOpcode(); 4970 unsigned UserOpc = UserMI.getOpcode(); 4971 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 4972 const TargetRegisterClass *RC = 4973 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); 4974 unsigned RegSize = TRI.getRegSizeInBits(*RC); 4975 4976 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm || 4977 Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt || 4978 Opc == X86::VMOVSSZrm_alt) && 4979 RegSize > 32) { 4980 // These instructions only load 32 bits, we can't fold them if the 4981 // destination register is wider than 32 bits (4 bytes), and its user 4982 // instruction isn't scalar (SS). 4983 switch (UserOpc) { 4984 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int: 4985 case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int: 4986 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int: 4987 case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int: 4988 case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int: 4989 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int: 4990 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int: 4991 case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz: 4992 case X86::VCMPSSZrr_Intk: 4993 case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz: 4994 case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz: 4995 case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz: 4996 case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz: 4997 case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz: 4998 case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int: 4999 case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int: 5000 case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int: 5001 case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int: 5002 case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int: 5003 case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int: 5004 case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int: 5005 case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int: 5006 case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int: 5007 case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int: 5008 case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int: 5009 case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int: 5010 case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int: 5011 case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int: 5012 case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk: 5013 case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk: 5014 case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk: 5015 case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk: 5016 case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk: 5017 case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk: 5018 case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz: 5019 case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz: 5020 case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz: 5021 case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz: 5022 case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz: 5023 case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz: 5024 return false; 5025 default: 5026 return true; 5027 } 5028 } 5029 5030 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm || 5031 Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt || 5032 Opc == X86::VMOVSDZrm_alt) && 5033 RegSize > 64) { 5034 // These instructions only load 64 bits, we can't fold them if the 5035 // destination register is wider than 64 bits (8 bytes), and its user 5036 // instruction isn't scalar (SD). 5037 switch (UserOpc) { 5038 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int: 5039 case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int: 5040 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int: 5041 case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int: 5042 case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int: 5043 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int: 5044 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int: 5045 case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz: 5046 case X86::VCMPSDZrr_Intk: 5047 case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz: 5048 case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz: 5049 case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz: 5050 case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz: 5051 case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz: 5052 case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int: 5053 case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int: 5054 case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int: 5055 case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int: 5056 case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int: 5057 case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int: 5058 case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int: 5059 case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int: 5060 case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int: 5061 case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int: 5062 case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int: 5063 case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int: 5064 case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int: 5065 case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int: 5066 case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk: 5067 case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk: 5068 case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk: 5069 case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk: 5070 case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk: 5071 case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk: 5072 case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz: 5073 case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz: 5074 case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz: 5075 case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz: 5076 case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz: 5077 case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz: 5078 return false; 5079 default: 5080 return true; 5081 } 5082 } 5083 5084 return false; 5085} 5086 5087MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 5088 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 5089 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 5090 LiveIntervals *LIS) const { 5091 5092 // TODO: Support the case where LoadMI loads a wide register, but MI 5093 // only uses a subreg. 5094 for (auto Op : Ops) { 5095 if (MI.getOperand(Op).getSubReg()) 5096 return nullptr; 5097 } 5098 5099 // If loading from a FrameIndex, fold directly from the FrameIndex. 5100 unsigned NumOps = LoadMI.getDesc().getNumOperands(); 5101 int FrameIndex; 5102 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { 5103 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) 5104 return nullptr; 5105 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); 5106 } 5107 5108 // Check switch flag 5109 if (NoFusing) return nullptr; 5110 5111 // Avoid partial and undef register update stalls unless optimizing for size. 5112 if (!MF.getFunction().hasOptSize() && 5113 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || 5114 shouldPreventUndefRegUpdateMemFold(MF, MI))) 5115 return nullptr; 5116 5117 // Determine the alignment of the load. 5118 unsigned Alignment = 0; 5119 if (LoadMI.hasOneMemOperand()) 5120 Alignment = (*LoadMI.memoperands_begin())->getAlignment(); 5121 else 5122 switch (LoadMI.getOpcode()) { 5123 case X86::AVX512_512_SET0: 5124 case X86::AVX512_512_SETALLONES: 5125 Alignment = 64; 5126 break; 5127 case X86::AVX2_SETALLONES: 5128 case X86::AVX1_SETALLONES: 5129 case X86::AVX_SET0: 5130 case X86::AVX512_256_SET0: 5131 Alignment = 32; 5132 break; 5133 case X86::V_SET0: 5134 case X86::V_SETALLONES: 5135 case X86::AVX512_128_SET0: 5136 Alignment = 16; 5137 break; 5138 case X86::MMX_SET0: 5139 case X86::FsFLD0SD: 5140 case X86::AVX512_FsFLD0SD: 5141 Alignment = 8; 5142 break; 5143 case X86::FsFLD0SS: 5144 case X86::AVX512_FsFLD0SS: 5145 Alignment = 4; 5146 break; 5147 default: 5148 return nullptr; 5149 } 5150 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5151 unsigned NewOpc = 0; 5152 switch (MI.getOpcode()) { 5153 default: return nullptr; 5154 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 5155 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; 5156 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; 5157 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; 5158 } 5159 // Change to CMPXXri r, 0 first. 5160 MI.setDesc(get(NewOpc)); 5161 MI.getOperand(1).ChangeToImmediate(0); 5162 } else if (Ops.size() != 1) 5163 return nullptr; 5164 5165 // Make sure the subregisters match. 5166 // Otherwise we risk changing the size of the load. 5167 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg()) 5168 return nullptr; 5169 5170 SmallVector<MachineOperand,X86::AddrNumOperands> MOs; 5171 switch (LoadMI.getOpcode()) { 5172 case X86::MMX_SET0: 5173 case X86::V_SET0: 5174 case X86::V_SETALLONES: 5175 case X86::AVX2_SETALLONES: 5176 case X86::AVX1_SETALLONES: 5177 case X86::AVX_SET0: 5178 case X86::AVX512_128_SET0: 5179 case X86::AVX512_256_SET0: 5180 case X86::AVX512_512_SET0: 5181 case X86::AVX512_512_SETALLONES: 5182 case X86::FsFLD0SD: 5183 case X86::AVX512_FsFLD0SD: 5184 case X86::FsFLD0SS: 5185 case X86::AVX512_FsFLD0SS: { 5186 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. 5187 // Create a constant-pool entry and operands to load from it. 5188 5189 // Medium and large mode can't fold loads this way. 5190 if (MF.getTarget().getCodeModel() != CodeModel::Small && 5191 MF.getTarget().getCodeModel() != CodeModel::Kernel) 5192 return nullptr; 5193 5194 // x86-32 PIC requires a PIC base register for constant pools. 5195 unsigned PICBase = 0; 5196 if (MF.getTarget().isPositionIndependent()) { 5197 if (Subtarget.is64Bit()) 5198 PICBase = X86::RIP; 5199 else 5200 // FIXME: PICBase = getGlobalBaseReg(&MF); 5201 // This doesn't work for several reasons. 5202 // 1. GlobalBaseReg may have been spilled. 5203 // 2. It may not be live at MI. 5204 return nullptr; 5205 } 5206 5207 // Create a constant-pool entry. 5208 MachineConstantPool &MCP = *MF.getConstantPool(); 5209 Type *Ty; 5210 unsigned Opc = LoadMI.getOpcode(); 5211 if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) 5212 Ty = Type::getFloatTy(MF.getFunction().getContext()); 5213 else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) 5214 Ty = Type::getDoubleTy(MF.getFunction().getContext()); 5215 else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) 5216 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16); 5217 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || 5218 Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) 5219 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8); 5220 else if (Opc == X86::MMX_SET0) 5221 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2); 5222 else 5223 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4); 5224 5225 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || 5226 Opc == X86::AVX512_512_SETALLONES || 5227 Opc == X86::AVX1_SETALLONES); 5228 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : 5229 Constant::getNullValue(Ty); 5230 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); 5231 5232 // Create operands to load from the constant pool entry. 5233 MOs.push_back(MachineOperand::CreateReg(PICBase, false)); 5234 MOs.push_back(MachineOperand::CreateImm(1)); 5235 MOs.push_back(MachineOperand::CreateReg(0, false)); 5236 MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); 5237 MOs.push_back(MachineOperand::CreateReg(0, false)); 5238 break; 5239 } 5240 default: { 5241 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) 5242 return nullptr; 5243 5244 // Folding a normal load. Just copy the load's address operands. 5245 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, 5246 LoadMI.operands_begin() + NumOps); 5247 break; 5248 } 5249 } 5250 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, 5251 /*Size=*/0, Alignment, /*AllowCommute=*/true); 5252} 5253 5254static SmallVector<MachineMemOperand *, 2> 5255extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { 5256 SmallVector<MachineMemOperand *, 2> LoadMMOs; 5257 5258 for (MachineMemOperand *MMO : MMOs) { 5259 if (!MMO->isLoad()) 5260 continue; 5261 5262 if (!MMO->isStore()) { 5263 // Reuse the MMO. 5264 LoadMMOs.push_back(MMO); 5265 } else { 5266 // Clone the MMO and unset the store flag. 5267 LoadMMOs.push_back(MF.getMachineMemOperand( 5268 MMO, MMO->getFlags() & ~MachineMemOperand::MOStore)); 5269 } 5270 } 5271 5272 return LoadMMOs; 5273} 5274 5275static SmallVector<MachineMemOperand *, 2> 5276extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { 5277 SmallVector<MachineMemOperand *, 2> StoreMMOs; 5278 5279 for (MachineMemOperand *MMO : MMOs) { 5280 if (!MMO->isStore()) 5281 continue; 5282 5283 if (!MMO->isLoad()) { 5284 // Reuse the MMO. 5285 StoreMMOs.push_back(MMO); 5286 } else { 5287 // Clone the MMO and unset the load flag. 5288 StoreMMOs.push_back(MF.getMachineMemOperand( 5289 MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad)); 5290 } 5291 } 5292 5293 return StoreMMOs; 5294} 5295 5296bool X86InstrInfo::unfoldMemoryOperand( 5297 MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, 5298 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { 5299 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode()); 5300 if (I == nullptr) 5301 return false; 5302 unsigned Opc = I->DstOp; 5303 unsigned Index = I->Flags & TB_INDEX_MASK; 5304 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5305 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5306 if (UnfoldLoad && !FoldedLoad) 5307 return false; 5308 UnfoldLoad &= FoldedLoad; 5309 if (UnfoldStore && !FoldedStore) 5310 return false; 5311 UnfoldStore &= FoldedStore; 5312 5313 const MCInstrDesc &MCID = get(Opc); 5314 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5315 // TODO: Check if 32-byte or greater accesses are slow too? 5316 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass && 5317 Subtarget.isUnalignedMem16Slow()) 5318 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will 5319 // conservatively assume the address is unaligned. That's bad for 5320 // performance. 5321 return false; 5322 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; 5323 SmallVector<MachineOperand,2> BeforeOps; 5324 SmallVector<MachineOperand,2> AfterOps; 5325 SmallVector<MachineOperand,4> ImpOps; 5326 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 5327 MachineOperand &Op = MI.getOperand(i); 5328 if (i >= Index && i < Index + X86::AddrNumOperands) 5329 AddrOps.push_back(Op); 5330 else if (Op.isReg() && Op.isImplicit()) 5331 ImpOps.push_back(Op); 5332 else if (i < Index) 5333 BeforeOps.push_back(Op); 5334 else if (i > Index) 5335 AfterOps.push_back(Op); 5336 } 5337 5338 // Emit the load instruction. 5339 if (UnfoldLoad) { 5340 auto MMOs = extractLoadMMOs(MI.memoperands(), MF); 5341 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs); 5342 if (UnfoldStore) { 5343 // Address operands cannot be marked isKill. 5344 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { 5345 MachineOperand &MO = NewMIs[0]->getOperand(i); 5346 if (MO.isReg()) 5347 MO.setIsKill(false); 5348 } 5349 } 5350 } 5351 5352 // Emit the data processing instruction. 5353 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true); 5354 MachineInstrBuilder MIB(MF, DataMI); 5355 5356 if (FoldedStore) 5357 MIB.addReg(Reg, RegState::Define); 5358 for (MachineOperand &BeforeOp : BeforeOps) 5359 MIB.add(BeforeOp); 5360 if (FoldedLoad) 5361 MIB.addReg(Reg); 5362 for (MachineOperand &AfterOp : AfterOps) 5363 MIB.add(AfterOp); 5364 for (MachineOperand &ImpOp : ImpOps) { 5365 MIB.addReg(ImpOp.getReg(), 5366 getDefRegState(ImpOp.isDef()) | 5367 RegState::Implicit | 5368 getKillRegState(ImpOp.isKill()) | 5369 getDeadRegState(ImpOp.isDead()) | 5370 getUndefRegState(ImpOp.isUndef())); 5371 } 5372 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 5373 switch (DataMI->getOpcode()) { 5374 default: break; 5375 case X86::CMP64ri32: 5376 case X86::CMP64ri8: 5377 case X86::CMP32ri: 5378 case X86::CMP32ri8: 5379 case X86::CMP16ri: 5380 case X86::CMP16ri8: 5381 case X86::CMP8ri: { 5382 MachineOperand &MO0 = DataMI->getOperand(0); 5383 MachineOperand &MO1 = DataMI->getOperand(1); 5384 if (MO1.getImm() == 0) { 5385 unsigned NewOpc; 5386 switch (DataMI->getOpcode()) { 5387 default: llvm_unreachable("Unreachable!"); 5388 case X86::CMP64ri8: 5389 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 5390 case X86::CMP32ri8: 5391 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 5392 case X86::CMP16ri8: 5393 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 5394 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 5395 } 5396 DataMI->setDesc(get(NewOpc)); 5397 MO1.ChangeToRegister(MO0.getReg(), false); 5398 } 5399 } 5400 } 5401 NewMIs.push_back(DataMI); 5402 5403 // Emit the store instruction. 5404 if (UnfoldStore) { 5405 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); 5406 auto MMOs = extractStoreMMOs(MI.memoperands(), MF); 5407 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs); 5408 } 5409 5410 return true; 5411} 5412 5413bool 5414X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 5415 SmallVectorImpl<SDNode*> &NewNodes) const { 5416 if (!N->isMachineOpcode()) 5417 return false; 5418 5419 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode()); 5420 if (I == nullptr) 5421 return false; 5422 unsigned Opc = I->DstOp; 5423 unsigned Index = I->Flags & TB_INDEX_MASK; 5424 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5425 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5426 const MCInstrDesc &MCID = get(Opc); 5427 MachineFunction &MF = DAG.getMachineFunction(); 5428 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 5429 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5430 unsigned NumDefs = MCID.NumDefs; 5431 std::vector<SDValue> AddrOps; 5432 std::vector<SDValue> BeforeOps; 5433 std::vector<SDValue> AfterOps; 5434 SDLoc dl(N); 5435 unsigned NumOps = N->getNumOperands(); 5436 for (unsigned i = 0; i != NumOps-1; ++i) { 5437 SDValue Op = N->getOperand(i); 5438 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) 5439 AddrOps.push_back(Op); 5440 else if (i < Index-NumDefs) 5441 BeforeOps.push_back(Op); 5442 else if (i > Index-NumDefs) 5443 AfterOps.push_back(Op); 5444 } 5445 SDValue Chain = N->getOperand(NumOps-1); 5446 AddrOps.push_back(Chain); 5447 5448 // Emit the load instruction. 5449 SDNode *Load = nullptr; 5450 if (FoldedLoad) { 5451 EVT VT = *TRI.legalclasstypes_begin(*RC); 5452 auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF); 5453 if (MMOs.empty() && RC == &X86::VR128RegClass && 5454 Subtarget.isUnalignedMem16Slow()) 5455 // Do not introduce a slow unaligned load. 5456 return false; 5457 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 5458 // memory access is slow above. 5459 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 5460 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; 5461 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, 5462 VT, MVT::Other, AddrOps); 5463 NewNodes.push_back(Load); 5464 5465 // Preserve memory reference information. 5466 DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs); 5467 } 5468 5469 // Emit the data processing instruction. 5470 std::vector<EVT> VTs; 5471 const TargetRegisterClass *DstRC = nullptr; 5472 if (MCID.getNumDefs() > 0) { 5473 DstRC = getRegClass(MCID, 0, &RI, MF); 5474 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC)); 5475 } 5476 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 5477 EVT VT = N->getValueType(i); 5478 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) 5479 VTs.push_back(VT); 5480 } 5481 if (Load) 5482 BeforeOps.push_back(SDValue(Load, 0)); 5483 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end()); 5484 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 5485 switch (Opc) { 5486 default: break; 5487 case X86::CMP64ri32: 5488 case X86::CMP64ri8: 5489 case X86::CMP32ri: 5490 case X86::CMP32ri8: 5491 case X86::CMP16ri: 5492 case X86::CMP16ri8: 5493 case X86::CMP8ri: 5494 if (isNullConstant(BeforeOps[1])) { 5495 switch (Opc) { 5496 default: llvm_unreachable("Unreachable!"); 5497 case X86::CMP64ri8: 5498 case X86::CMP64ri32: Opc = X86::TEST64rr; break; 5499 case X86::CMP32ri8: 5500 case X86::CMP32ri: Opc = X86::TEST32rr; break; 5501 case X86::CMP16ri8: 5502 case X86::CMP16ri: Opc = X86::TEST16rr; break; 5503 case X86::CMP8ri: Opc = X86::TEST8rr; break; 5504 } 5505 BeforeOps[1] = BeforeOps[0]; 5506 } 5507 } 5508 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); 5509 NewNodes.push_back(NewNode); 5510 5511 // Emit the store instruction. 5512 if (FoldedStore) { 5513 AddrOps.pop_back(); 5514 AddrOps.push_back(SDValue(NewNode, 0)); 5515 AddrOps.push_back(Chain); 5516 auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF); 5517 if (MMOs.empty() && RC == &X86::VR128RegClass && 5518 Subtarget.isUnalignedMem16Slow()) 5519 // Do not introduce a slow unaligned store. 5520 return false; 5521 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 5522 // memory access is slow above. 5523 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); 5524 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; 5525 SDNode *Store = 5526 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), 5527 dl, MVT::Other, AddrOps); 5528 NewNodes.push_back(Store); 5529 5530 // Preserve memory reference information. 5531 DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs); 5532 } 5533 5534 return true; 5535} 5536 5537unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 5538 bool UnfoldLoad, bool UnfoldStore, 5539 unsigned *LoadRegIndex) const { 5540 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc); 5541 if (I == nullptr) 5542 return 0; 5543 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; 5544 bool FoldedStore = I->Flags & TB_FOLDED_STORE; 5545 if (UnfoldLoad && !FoldedLoad) 5546 return 0; 5547 if (UnfoldStore && !FoldedStore) 5548 return 0; 5549 if (LoadRegIndex) 5550 *LoadRegIndex = I->Flags & TB_INDEX_MASK; 5551 return I->DstOp; 5552} 5553 5554bool 5555X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 5556 int64_t &Offset1, int64_t &Offset2) const { 5557 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 5558 return false; 5559 unsigned Opc1 = Load1->getMachineOpcode(); 5560 unsigned Opc2 = Load2->getMachineOpcode(); 5561 switch (Opc1) { 5562 default: return false; 5563 case X86::MOV8rm: 5564 case X86::MOV16rm: 5565 case X86::MOV32rm: 5566 case X86::MOV64rm: 5567 case X86::LD_Fp32m: 5568 case X86::LD_Fp64m: 5569 case X86::LD_Fp80m: 5570 case X86::MOVSSrm: 5571 case X86::MOVSSrm_alt: 5572 case X86::MOVSDrm: 5573 case X86::MOVSDrm_alt: 5574 case X86::MMX_MOVD64rm: 5575 case X86::MMX_MOVQ64rm: 5576 case X86::MOVAPSrm: 5577 case X86::MOVUPSrm: 5578 case X86::MOVAPDrm: 5579 case X86::MOVUPDrm: 5580 case X86::MOVDQArm: 5581 case X86::MOVDQUrm: 5582 // AVX load instructions 5583 case X86::VMOVSSrm: 5584 case X86::VMOVSSrm_alt: 5585 case X86::VMOVSDrm: 5586 case X86::VMOVSDrm_alt: 5587 case X86::VMOVAPSrm: 5588 case X86::VMOVUPSrm: 5589 case X86::VMOVAPDrm: 5590 case X86::VMOVUPDrm: 5591 case X86::VMOVDQArm: 5592 case X86::VMOVDQUrm: 5593 case X86::VMOVAPSYrm: 5594 case X86::VMOVUPSYrm: 5595 case X86::VMOVAPDYrm: 5596 case X86::VMOVUPDYrm: 5597 case X86::VMOVDQAYrm: 5598 case X86::VMOVDQUYrm: 5599 // AVX512 load instructions 5600 case X86::VMOVSSZrm: 5601 case X86::VMOVSSZrm_alt: 5602 case X86::VMOVSDZrm: 5603 case X86::VMOVSDZrm_alt: 5604 case X86::VMOVAPSZ128rm: 5605 case X86::VMOVUPSZ128rm: 5606 case X86::VMOVAPSZ128rm_NOVLX: 5607 case X86::VMOVUPSZ128rm_NOVLX: 5608 case X86::VMOVAPDZ128rm: 5609 case X86::VMOVUPDZ128rm: 5610 case X86::VMOVDQU8Z128rm: 5611 case X86::VMOVDQU16Z128rm: 5612 case X86::VMOVDQA32Z128rm: 5613 case X86::VMOVDQU32Z128rm: 5614 case X86::VMOVDQA64Z128rm: 5615 case X86::VMOVDQU64Z128rm: 5616 case X86::VMOVAPSZ256rm: 5617 case X86::VMOVUPSZ256rm: 5618 case X86::VMOVAPSZ256rm_NOVLX: 5619 case X86::VMOVUPSZ256rm_NOVLX: 5620 case X86::VMOVAPDZ256rm: 5621 case X86::VMOVUPDZ256rm: 5622 case X86::VMOVDQU8Z256rm: 5623 case X86::VMOVDQU16Z256rm: 5624 case X86::VMOVDQA32Z256rm: 5625 case X86::VMOVDQU32Z256rm: 5626 case X86::VMOVDQA64Z256rm: 5627 case X86::VMOVDQU64Z256rm: 5628 case X86::VMOVAPSZrm: 5629 case X86::VMOVUPSZrm: 5630 case X86::VMOVAPDZrm: 5631 case X86::VMOVUPDZrm: 5632 case X86::VMOVDQU8Zrm: 5633 case X86::VMOVDQU16Zrm: 5634 case X86::VMOVDQA32Zrm: 5635 case X86::VMOVDQU32Zrm: 5636 case X86::VMOVDQA64Zrm: 5637 case X86::VMOVDQU64Zrm: 5638 case X86::KMOVBkm: 5639 case X86::KMOVWkm: 5640 case X86::KMOVDkm: 5641 case X86::KMOVQkm: 5642 break; 5643 } 5644 switch (Opc2) { 5645 default: return false; 5646 case X86::MOV8rm: 5647 case X86::MOV16rm: 5648 case X86::MOV32rm: 5649 case X86::MOV64rm: 5650 case X86::LD_Fp32m: 5651 case X86::LD_Fp64m: 5652 case X86::LD_Fp80m: 5653 case X86::MOVSSrm: 5654 case X86::MOVSSrm_alt: 5655 case X86::MOVSDrm: 5656 case X86::MOVSDrm_alt: 5657 case X86::MMX_MOVD64rm: 5658 case X86::MMX_MOVQ64rm: 5659 case X86::MOVAPSrm: 5660 case X86::MOVUPSrm: 5661 case X86::MOVAPDrm: 5662 case X86::MOVUPDrm: 5663 case X86::MOVDQArm: 5664 case X86::MOVDQUrm: 5665 // AVX load instructions 5666 case X86::VMOVSSrm: 5667 case X86::VMOVSSrm_alt: 5668 case X86::VMOVSDrm: 5669 case X86::VMOVSDrm_alt: 5670 case X86::VMOVAPSrm: 5671 case X86::VMOVUPSrm: 5672 case X86::VMOVAPDrm: 5673 case X86::VMOVUPDrm: 5674 case X86::VMOVDQArm: 5675 case X86::VMOVDQUrm: 5676 case X86::VMOVAPSYrm: 5677 case X86::VMOVUPSYrm: 5678 case X86::VMOVAPDYrm: 5679 case X86::VMOVUPDYrm: 5680 case X86::VMOVDQAYrm: 5681 case X86::VMOVDQUYrm: 5682 // AVX512 load instructions 5683 case X86::VMOVSSZrm: 5684 case X86::VMOVSSZrm_alt: 5685 case X86::VMOVSDZrm: 5686 case X86::VMOVSDZrm_alt: 5687 case X86::VMOVAPSZ128rm: 5688 case X86::VMOVUPSZ128rm: 5689 case X86::VMOVAPSZ128rm_NOVLX: 5690 case X86::VMOVUPSZ128rm_NOVLX: 5691 case X86::VMOVAPDZ128rm: 5692 case X86::VMOVUPDZ128rm: 5693 case X86::VMOVDQU8Z128rm: 5694 case X86::VMOVDQU16Z128rm: 5695 case X86::VMOVDQA32Z128rm: 5696 case X86::VMOVDQU32Z128rm: 5697 case X86::VMOVDQA64Z128rm: 5698 case X86::VMOVDQU64Z128rm: 5699 case X86::VMOVAPSZ256rm: 5700 case X86::VMOVUPSZ256rm: 5701 case X86::VMOVAPSZ256rm_NOVLX: 5702 case X86::VMOVUPSZ256rm_NOVLX: 5703 case X86::VMOVAPDZ256rm: 5704 case X86::VMOVUPDZ256rm: 5705 case X86::VMOVDQU8Z256rm: 5706 case X86::VMOVDQU16Z256rm: 5707 case X86::VMOVDQA32Z256rm: 5708 case X86::VMOVDQU32Z256rm: 5709 case X86::VMOVDQA64Z256rm: 5710 case X86::VMOVDQU64Z256rm: 5711 case X86::VMOVAPSZrm: 5712 case X86::VMOVUPSZrm: 5713 case X86::VMOVAPDZrm: 5714 case X86::VMOVUPDZrm: 5715 case X86::VMOVDQU8Zrm: 5716 case X86::VMOVDQU16Zrm: 5717 case X86::VMOVDQA32Zrm: 5718 case X86::VMOVDQU32Zrm: 5719 case X86::VMOVDQA64Zrm: 5720 case X86::VMOVDQU64Zrm: 5721 case X86::KMOVBkm: 5722 case X86::KMOVWkm: 5723 case X86::KMOVDkm: 5724 case X86::KMOVQkm: 5725 break; 5726 } 5727 5728 // Lambda to check if both the loads have the same value for an operand index. 5729 auto HasSameOp = [&](int I) { 5730 return Load1->getOperand(I) == Load2->getOperand(I); 5731 }; 5732 5733 // All operands except the displacement should match. 5734 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) || 5735 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg)) 5736 return false; 5737 5738 // Chain Operand must be the same. 5739 if (!HasSameOp(5)) 5740 return false; 5741 5742 // Now let's examine if the displacements are constants. 5743 auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp)); 5744 auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp)); 5745 if (!Disp1 || !Disp2) 5746 return false; 5747 5748 Offset1 = Disp1->getSExtValue(); 5749 Offset2 = Disp2->getSExtValue(); 5750 return true; 5751} 5752 5753bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 5754 int64_t Offset1, int64_t Offset2, 5755 unsigned NumLoads) const { 5756 assert(Offset2 > Offset1); 5757 if ((Offset2 - Offset1) / 8 > 64) 5758 return false; 5759 5760 unsigned Opc1 = Load1->getMachineOpcode(); 5761 unsigned Opc2 = Load2->getMachineOpcode(); 5762 if (Opc1 != Opc2) 5763 return false; // FIXME: overly conservative? 5764 5765 switch (Opc1) { 5766 default: break; 5767 case X86::LD_Fp32m: 5768 case X86::LD_Fp64m: 5769 case X86::LD_Fp80m: 5770 case X86::MMX_MOVD64rm: 5771 case X86::MMX_MOVQ64rm: 5772 return false; 5773 } 5774 5775 EVT VT = Load1->getValueType(0); 5776 switch (VT.getSimpleVT().SimpleTy) { 5777 default: 5778 // XMM registers. In 64-bit mode we can be a bit more aggressive since we 5779 // have 16 of them to play with. 5780 if (Subtarget.is64Bit()) { 5781 if (NumLoads >= 3) 5782 return false; 5783 } else if (NumLoads) { 5784 return false; 5785 } 5786 break; 5787 case MVT::i8: 5788 case MVT::i16: 5789 case MVT::i32: 5790 case MVT::i64: 5791 case MVT::f32: 5792 case MVT::f64: 5793 if (NumLoads) 5794 return false; 5795 break; 5796 } 5797 5798 return true; 5799} 5800 5801bool X86InstrInfo:: 5802reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 5803 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 5804 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 5805 Cond[0].setImm(GetOppositeBranchCondition(CC)); 5806 return false; 5807} 5808 5809bool X86InstrInfo:: 5810isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 5811 // FIXME: Return false for x87 stack register classes for now. We can't 5812 // allow any loads of these registers before FpGet_ST0_80. 5813 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass || 5814 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass || 5815 RC == &X86::RFP80RegClass); 5816} 5817 5818/// Return a virtual register initialized with the 5819/// the global base register value. Output instructions required to 5820/// initialize the register in the function entry block, if necessary. 5821/// 5822/// TODO: Eliminate this and move the code to X86MachineFunctionInfo. 5823/// 5824unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 5825 assert((!Subtarget.is64Bit() || 5826 MF->getTarget().getCodeModel() == CodeModel::Medium || 5827 MF->getTarget().getCodeModel() == CodeModel::Large) && 5828 "X86-64 PIC uses RIP relative addressing"); 5829 5830 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 5831 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 5832 if (GlobalBaseReg != 0) 5833 return GlobalBaseReg; 5834 5835 // Create the register. The code to initialize it is inserted 5836 // later, by the CGBR pass (below). 5837 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 5838 GlobalBaseReg = RegInfo.createVirtualRegister( 5839 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); 5840 X86FI->setGlobalBaseReg(GlobalBaseReg); 5841 return GlobalBaseReg; 5842} 5843 5844// These are the replaceable SSE instructions. Some of these have Int variants 5845// that we don't include here. We don't want to replace instructions selected 5846// by intrinsics. 5847static const uint16_t ReplaceableInstrs[][3] = { 5848 //PackedSingle PackedDouble PackedInt 5849 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, 5850 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, 5851 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, 5852 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, 5853 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, 5854 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, 5855 { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr }, 5856 { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr }, 5857 { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, 5858 { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm }, 5859 { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, 5860 { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm }, 5861 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, 5862 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, 5863 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, 5864 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, 5865 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, 5866 { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, 5867 { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, 5868 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, 5869 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, 5870 { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm }, 5871 { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr }, 5872 { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm }, 5873 { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr }, 5874 { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm }, 5875 { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr }, 5876 { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm }, 5877 { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr }, 5878 { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr }, 5879 { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr }, 5880 // AVX 128-bit support 5881 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, 5882 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, 5883 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, 5884 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, 5885 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, 5886 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, 5887 { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr }, 5888 { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr }, 5889 { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, 5890 { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm }, 5891 { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, 5892 { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm }, 5893 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, 5894 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, 5895 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, 5896 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, 5897 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, 5898 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, 5899 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, 5900 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, 5901 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, 5902 { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm }, 5903 { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr }, 5904 { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm }, 5905 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr }, 5906 { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm }, 5907 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr }, 5908 { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm }, 5909 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr }, 5910 { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr }, 5911 { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr }, 5912 // AVX 256-bit support 5913 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, 5914 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, 5915 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, 5916 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, 5917 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, 5918 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }, 5919 { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm }, 5920 { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr }, 5921 { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi }, 5922 { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri }, 5923 // AVX512 support 5924 { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr }, 5925 { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr }, 5926 { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr }, 5927 { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr }, 5928 { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr }, 5929 { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr }, 5930 { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm }, 5931 { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm }, 5932 { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm }, 5933 { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm }, 5934 { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r }, 5935 { X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m }, 5936 { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r }, 5937 { X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m }, 5938 { X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr }, 5939 { X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm }, 5940 { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r }, 5941 { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m }, 5942 { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r }, 5943 { X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m }, 5944 { X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr }, 5945 { X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm }, 5946 { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr }, 5947 { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm }, 5948 { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr }, 5949 { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm }, 5950 { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr }, 5951 { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm }, 5952 { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr }, 5953 { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm }, 5954 { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr }, 5955 { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm }, 5956 { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr }, 5957 { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm }, 5958 { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr }, 5959 { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr }, 5960 { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr }, 5961 { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr }, 5962 { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr }, 5963 { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr }, 5964 { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr }, 5965 { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr }, 5966 { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr }, 5967 { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr }, 5968 { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr }, 5969 { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr }, 5970 { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi }, 5971 { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri }, 5972 { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi }, 5973 { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri }, 5974 { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi }, 5975 { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri }, 5976 { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi }, 5977 { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri }, 5978 { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm }, 5979 { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr }, 5980 { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi }, 5981 { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri }, 5982 { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm }, 5983 { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr }, 5984 { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm }, 5985 { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr }, 5986 { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi }, 5987 { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri }, 5988 { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm }, 5989 { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr }, 5990 { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm }, 5991 { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr }, 5992 { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm }, 5993 { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr }, 5994 { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm }, 5995 { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr }, 5996 { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm }, 5997 { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr }, 5998 { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm }, 5999 { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr }, 6000 { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm }, 6001 { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr }, 6002 { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm }, 6003 { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr }, 6004 { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm }, 6005 { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr }, 6006 { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm }, 6007 { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr }, 6008 { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm }, 6009 { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr }, 6010 { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm }, 6011 { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr }, 6012 { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm }, 6013 { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr }, 6014 { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr }, 6015 { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr }, 6016}; 6017 6018static const uint16_t ReplaceableInstrsAVX2[][3] = { 6019 //PackedSingle PackedDouble PackedInt 6020 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, 6021 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, 6022 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, 6023 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, 6024 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, 6025 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, 6026 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, 6027 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, 6028 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, 6029 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, 6030 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, 6031 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, 6032 { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm}, 6033 { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr}, 6034 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, 6035 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, 6036 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, 6037 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}, 6038 { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 }, 6039 { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri }, 6040 { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi }, 6041 { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi }, 6042 { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri }, 6043 { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm }, 6044 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr }, 6045 { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm }, 6046 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr }, 6047 { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm }, 6048 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr }, 6049 { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm }, 6050 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr }, 6051}; 6052 6053static const uint16_t ReplaceableInstrsFP[][3] = { 6054 //PackedSingle PackedDouble 6055 { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END }, 6056 { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END }, 6057 { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END }, 6058 { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END }, 6059 { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END }, 6060 { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END }, 6061 { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END }, 6062 { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END }, 6063 { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END }, 6064}; 6065 6066static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { 6067 //PackedSingle PackedDouble PackedInt 6068 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, 6069 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, 6070 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, 6071 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, 6072}; 6073 6074static const uint16_t ReplaceableInstrsAVX512[][4] = { 6075 // Two integer columns for 64-bit and 32-bit elements. 6076 //PackedSingle PackedDouble PackedInt PackedInt 6077 { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr }, 6078 { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm }, 6079 { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr }, 6080 { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr }, 6081 { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm }, 6082 { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr }, 6083 { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm }, 6084 { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr }, 6085 { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr }, 6086 { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm }, 6087 { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr }, 6088 { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm }, 6089 { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, 6090 { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, 6091 { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, 6092}; 6093 6094static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { 6095 // Two integer columns for 64-bit and 32-bit elements. 6096 //PackedSingle PackedDouble PackedInt PackedInt 6097 { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, 6098 { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, 6099 { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, 6100 { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, 6101 { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm }, 6102 { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr }, 6103 { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, 6104 { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, 6105 { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, 6106 { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, 6107 { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, 6108 { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, 6109 { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm }, 6110 { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr }, 6111 { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, 6112 { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, 6113 { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm }, 6114 { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr }, 6115 { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm }, 6116 { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr }, 6117 { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm }, 6118 { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr }, 6119 { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm }, 6120 { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr }, 6121}; 6122 6123static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = { 6124 // Two integer columns for 64-bit and 32-bit elements. 6125 //PackedSingle PackedDouble 6126 //PackedInt PackedInt 6127 { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk, 6128 X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk }, 6129 { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz, 6130 X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz }, 6131 { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk, 6132 X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk }, 6133 { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz, 6134 X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz }, 6135 { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk, 6136 X86::VPANDQZ128rmk, X86::VPANDDZ128rmk }, 6137 { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz, 6138 X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz }, 6139 { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk, 6140 X86::VPANDQZ128rrk, X86::VPANDDZ128rrk }, 6141 { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz, 6142 X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz }, 6143 { X86::VORPSZ128rmk, X86::VORPDZ128rmk, 6144 X86::VPORQZ128rmk, X86::VPORDZ128rmk }, 6145 { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz, 6146 X86::VPORQZ128rmkz, X86::VPORDZ128rmkz }, 6147 { X86::VORPSZ128rrk, X86::VORPDZ128rrk, 6148 X86::VPORQZ128rrk, X86::VPORDZ128rrk }, 6149 { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz, 6150 X86::VPORQZ128rrkz, X86::VPORDZ128rrkz }, 6151 { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk, 6152 X86::VPXORQZ128rmk, X86::VPXORDZ128rmk }, 6153 { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz, 6154 X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz }, 6155 { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk, 6156 X86::VPXORQZ128rrk, X86::VPXORDZ128rrk }, 6157 { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz, 6158 X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz }, 6159 { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk, 6160 X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk }, 6161 { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz, 6162 X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz }, 6163 { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk, 6164 X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk }, 6165 { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz, 6166 X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz }, 6167 { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk, 6168 X86::VPANDQZ256rmk, X86::VPANDDZ256rmk }, 6169 { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz, 6170 X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz }, 6171 { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk, 6172 X86::VPANDQZ256rrk, X86::VPANDDZ256rrk }, 6173 { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz, 6174 X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz }, 6175 { X86::VORPSZ256rmk, X86::VORPDZ256rmk, 6176 X86::VPORQZ256rmk, X86::VPORDZ256rmk }, 6177 { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz, 6178 X86::VPORQZ256rmkz, X86::VPORDZ256rmkz }, 6179 { X86::VORPSZ256rrk, X86::VORPDZ256rrk, 6180 X86::VPORQZ256rrk, X86::VPORDZ256rrk }, 6181 { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz, 6182 X86::VPORQZ256rrkz, X86::VPORDZ256rrkz }, 6183 { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk, 6184 X86::VPXORQZ256rmk, X86::VPXORDZ256rmk }, 6185 { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz, 6186 X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz }, 6187 { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk, 6188 X86::VPXORQZ256rrk, X86::VPXORDZ256rrk }, 6189 { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz, 6190 X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz }, 6191 { X86::VANDNPSZrmk, X86::VANDNPDZrmk, 6192 X86::VPANDNQZrmk, X86::VPANDNDZrmk }, 6193 { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz, 6194 X86::VPANDNQZrmkz, X86::VPANDNDZrmkz }, 6195 { X86::VANDNPSZrrk, X86::VANDNPDZrrk, 6196 X86::VPANDNQZrrk, X86::VPANDNDZrrk }, 6197 { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz, 6198 X86::VPANDNQZrrkz, X86::VPANDNDZrrkz }, 6199 { X86::VANDPSZrmk, X86::VANDPDZrmk, 6200 X86::VPANDQZrmk, X86::VPANDDZrmk }, 6201 { X86::VANDPSZrmkz, X86::VANDPDZrmkz, 6202 X86::VPANDQZrmkz, X86::VPANDDZrmkz }, 6203 { X86::VANDPSZrrk, X86::VANDPDZrrk, 6204 X86::VPANDQZrrk, X86::VPANDDZrrk }, 6205 { X86::VANDPSZrrkz, X86::VANDPDZrrkz, 6206 X86::VPANDQZrrkz, X86::VPANDDZrrkz }, 6207 { X86::VORPSZrmk, X86::VORPDZrmk, 6208 X86::VPORQZrmk, X86::VPORDZrmk }, 6209 { X86::VORPSZrmkz, X86::VORPDZrmkz, 6210 X86::VPORQZrmkz, X86::VPORDZrmkz }, 6211 { X86::VORPSZrrk, X86::VORPDZrrk, 6212 X86::VPORQZrrk, X86::VPORDZrrk }, 6213 { X86::VORPSZrrkz, X86::VORPDZrrkz, 6214 X86::VPORQZrrkz, X86::VPORDZrrkz }, 6215 { X86::VXORPSZrmk, X86::VXORPDZrmk, 6216 X86::VPXORQZrmk, X86::VPXORDZrmk }, 6217 { X86::VXORPSZrmkz, X86::VXORPDZrmkz, 6218 X86::VPXORQZrmkz, X86::VPXORDZrmkz }, 6219 { X86::VXORPSZrrk, X86::VXORPDZrrk, 6220 X86::VPXORQZrrk, X86::VPXORDZrrk }, 6221 { X86::VXORPSZrrkz, X86::VXORPDZrrkz, 6222 X86::VPXORQZrrkz, X86::VPXORDZrrkz }, 6223 // Broadcast loads can be handled the same as masked operations to avoid 6224 // changing element size. 6225 { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb, 6226 X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb }, 6227 { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb, 6228 X86::VPANDQZ128rmb, X86::VPANDDZ128rmb }, 6229 { X86::VORPSZ128rmb, X86::VORPDZ128rmb, 6230 X86::VPORQZ128rmb, X86::VPORDZ128rmb }, 6231 { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb, 6232 X86::VPXORQZ128rmb, X86::VPXORDZ128rmb }, 6233 { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb, 6234 X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb }, 6235 { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb, 6236 X86::VPANDQZ256rmb, X86::VPANDDZ256rmb }, 6237 { X86::VORPSZ256rmb, X86::VORPDZ256rmb, 6238 X86::VPORQZ256rmb, X86::VPORDZ256rmb }, 6239 { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb, 6240 X86::VPXORQZ256rmb, X86::VPXORDZ256rmb }, 6241 { X86::VANDNPSZrmb, X86::VANDNPDZrmb, 6242 X86::VPANDNQZrmb, X86::VPANDNDZrmb }, 6243 { X86::VANDPSZrmb, X86::VANDPDZrmb, 6244 X86::VPANDQZrmb, X86::VPANDDZrmb }, 6245 { X86::VANDPSZrmb, X86::VANDPDZrmb, 6246 X86::VPANDQZrmb, X86::VPANDDZrmb }, 6247 { X86::VORPSZrmb, X86::VORPDZrmb, 6248 X86::VPORQZrmb, X86::VPORDZrmb }, 6249 { X86::VXORPSZrmb, X86::VXORPDZrmb, 6250 X86::VPXORQZrmb, X86::VPXORDZrmb }, 6251 { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk, 6252 X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk }, 6253 { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk, 6254 X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk }, 6255 { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk, 6256 X86::VPORQZ128rmbk, X86::VPORDZ128rmbk }, 6257 { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk, 6258 X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk }, 6259 { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk, 6260 X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk }, 6261 { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk, 6262 X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk }, 6263 { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk, 6264 X86::VPORQZ256rmbk, X86::VPORDZ256rmbk }, 6265 { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk, 6266 X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk }, 6267 { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk, 6268 X86::VPANDNQZrmbk, X86::VPANDNDZrmbk }, 6269 { X86::VANDPSZrmbk, X86::VANDPDZrmbk, 6270 X86::VPANDQZrmbk, X86::VPANDDZrmbk }, 6271 { X86::VANDPSZrmbk, X86::VANDPDZrmbk, 6272 X86::VPANDQZrmbk, X86::VPANDDZrmbk }, 6273 { X86::VORPSZrmbk, X86::VORPDZrmbk, 6274 X86::VPORQZrmbk, X86::VPORDZrmbk }, 6275 { X86::VXORPSZrmbk, X86::VXORPDZrmbk, 6276 X86::VPXORQZrmbk, X86::VPXORDZrmbk }, 6277 { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz, 6278 X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz}, 6279 { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz, 6280 X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz }, 6281 { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz, 6282 X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz }, 6283 { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz, 6284 X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz }, 6285 { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz, 6286 X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz}, 6287 { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz, 6288 X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz }, 6289 { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz, 6290 X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz }, 6291 { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz, 6292 X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz }, 6293 { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz, 6294 X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz }, 6295 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, 6296 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, 6297 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, 6298 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, 6299 { X86::VORPSZrmbkz, X86::VORPDZrmbkz, 6300 X86::VPORQZrmbkz, X86::VPORDZrmbkz }, 6301 { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz, 6302 X86::VPXORQZrmbkz, X86::VPXORDZrmbkz }, 6303}; 6304 6305// NOTE: These should only be used by the custom domain methods. 6306static const uint16_t ReplaceableBlendInstrs[][3] = { 6307 //PackedSingle PackedDouble PackedInt 6308 { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi }, 6309 { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri }, 6310 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi }, 6311 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri }, 6312 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi }, 6313 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri }, 6314}; 6315static const uint16_t ReplaceableBlendAVX2Instrs[][3] = { 6316 //PackedSingle PackedDouble PackedInt 6317 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi }, 6318 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri }, 6319 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi }, 6320 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri }, 6321}; 6322 6323// Special table for changing EVEX logic instructions to VEX. 6324// TODO: Should we run EVEX->VEX earlier? 6325static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = { 6326 // Two integer columns for 64-bit and 32-bit elements. 6327 //PackedSingle PackedDouble PackedInt PackedInt 6328 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, 6329 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, 6330 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, 6331 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, 6332 { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm }, 6333 { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr }, 6334 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, 6335 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, 6336 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, 6337 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, 6338 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, 6339 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, 6340 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm }, 6341 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr }, 6342 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, 6343 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, 6344}; 6345 6346// FIXME: Some shuffle and unpack instructions have equivalents in different 6347// domains, but they require a bit more work than just switching opcodes. 6348 6349static const uint16_t *lookup(unsigned opcode, unsigned domain, 6350 ArrayRef<uint16_t[3]> Table) { 6351 for (const uint16_t (&Row)[3] : Table) 6352 if (Row[domain-1] == opcode) 6353 return Row; 6354 return nullptr; 6355} 6356 6357static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain, 6358 ArrayRef<uint16_t[4]> Table) { 6359 // If this is the integer domain make sure to check both integer columns. 6360 for (const uint16_t (&Row)[4] : Table) 6361 if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode)) 6362 return Row; 6363 return nullptr; 6364} 6365 6366// Helper to attempt to widen/narrow blend masks. 6367static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, 6368 unsigned NewWidth, unsigned *pNewMask = nullptr) { 6369 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && 6370 "Illegal blend mask scale"); 6371 unsigned NewMask = 0; 6372 6373 if ((OldWidth % NewWidth) == 0) { 6374 unsigned Scale = OldWidth / NewWidth; 6375 unsigned SubMask = (1u << Scale) - 1; 6376 for (unsigned i = 0; i != NewWidth; ++i) { 6377 unsigned Sub = (OldMask >> (i * Scale)) & SubMask; 6378 if (Sub == SubMask) 6379 NewMask |= (1u << i); 6380 else if (Sub != 0x0) 6381 return false; 6382 } 6383 } else { 6384 unsigned Scale = NewWidth / OldWidth; 6385 unsigned SubMask = (1u << Scale) - 1; 6386 for (unsigned i = 0; i != OldWidth; ++i) { 6387 if (OldMask & (1 << i)) { 6388 NewMask |= (SubMask << (i * Scale)); 6389 } 6390 } 6391 } 6392 6393 if (pNewMask) 6394 *pNewMask = NewMask; 6395 return true; 6396} 6397 6398uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { 6399 unsigned Opcode = MI.getOpcode(); 6400 unsigned NumOperands = MI.getDesc().getNumOperands(); 6401 6402 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) { 6403 uint16_t validDomains = 0; 6404 if (MI.getOperand(NumOperands - 1).isImm()) { 6405 unsigned Imm = MI.getOperand(NumOperands - 1).getImm(); 6406 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4)) 6407 validDomains |= 0x2; // PackedSingle 6408 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2)) 6409 validDomains |= 0x4; // PackedDouble 6410 if (!Is256 || Subtarget.hasAVX2()) 6411 validDomains |= 0x8; // PackedInt 6412 } 6413 return validDomains; 6414 }; 6415 6416 switch (Opcode) { 6417 case X86::BLENDPDrmi: 6418 case X86::BLENDPDrri: 6419 case X86::VBLENDPDrmi: 6420 case X86::VBLENDPDrri: 6421 return GetBlendDomains(2, false); 6422 case X86::VBLENDPDYrmi: 6423 case X86::VBLENDPDYrri: 6424 return GetBlendDomains(4, true); 6425 case X86::BLENDPSrmi: 6426 case X86::BLENDPSrri: 6427 case X86::VBLENDPSrmi: 6428 case X86::VBLENDPSrri: 6429 case X86::VPBLENDDrmi: 6430 case X86::VPBLENDDrri: 6431 return GetBlendDomains(4, false); 6432 case X86::VBLENDPSYrmi: 6433 case X86::VBLENDPSYrri: 6434 case X86::VPBLENDDYrmi: 6435 case X86::VPBLENDDYrri: 6436 return GetBlendDomains(8, true); 6437 case X86::PBLENDWrmi: 6438 case X86::PBLENDWrri: 6439 case X86::VPBLENDWrmi: 6440 case X86::VPBLENDWrri: 6441 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks. 6442 case X86::VPBLENDWYrmi: 6443 case X86::VPBLENDWYrri: 6444 return GetBlendDomains(8, false); 6445 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: 6446 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: 6447 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: 6448 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: 6449 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: 6450 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: 6451 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: 6452 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: 6453 case X86::VPORDZ128rr: case X86::VPORDZ128rm: 6454 case X86::VPORDZ256rr: case X86::VPORDZ256rm: 6455 case X86::VPORQZ128rr: case X86::VPORQZ128rm: 6456 case X86::VPORQZ256rr: case X86::VPORQZ256rm: 6457 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: 6458 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: 6459 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: 6460 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: 6461 // If we don't have DQI see if we can still switch from an EVEX integer 6462 // instruction to a VEX floating point instruction. 6463 if (Subtarget.hasDQI()) 6464 return 0; 6465 6466 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16) 6467 return 0; 6468 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16) 6469 return 0; 6470 // Register forms will have 3 operands. Memory form will have more. 6471 if (NumOperands == 3 && 6472 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16) 6473 return 0; 6474 6475 // All domains are valid. 6476 return 0xe; 6477 case X86::MOVHLPSrr: 6478 // We can swap domains when both inputs are the same register. 6479 // FIXME: This doesn't catch all the cases we would like. If the input 6480 // register isn't KILLed by the instruction, the two address instruction 6481 // pass puts a COPY on one input. The other input uses the original 6482 // register. This prevents the same physical register from being used by 6483 // both inputs. 6484 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && 6485 MI.getOperand(0).getSubReg() == 0 && 6486 MI.getOperand(1).getSubReg() == 0 && 6487 MI.getOperand(2).getSubReg() == 0) 6488 return 0x6; 6489 return 0; 6490 case X86::SHUFPDrri: 6491 return 0x6; 6492 } 6493 return 0; 6494} 6495 6496bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI, 6497 unsigned Domain) const { 6498 assert(Domain > 0 && Domain < 4 && "Invalid execution domain"); 6499 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6500 assert(dom && "Not an SSE instruction"); 6501 6502 unsigned Opcode = MI.getOpcode(); 6503 unsigned NumOperands = MI.getDesc().getNumOperands(); 6504 6505 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) { 6506 if (MI.getOperand(NumOperands - 1).isImm()) { 6507 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255; 6508 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm); 6509 unsigned NewImm = Imm; 6510 6511 const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs); 6512 if (!table) 6513 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); 6514 6515 if (Domain == 1) { // PackedSingle 6516 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); 6517 } else if (Domain == 2) { // PackedDouble 6518 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm); 6519 } else if (Domain == 3) { // PackedInt 6520 if (Subtarget.hasAVX2()) { 6521 // If we are already VPBLENDW use that, else use VPBLENDD. 6522 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) { 6523 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); 6524 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); 6525 } 6526 } else { 6527 assert(!Is256 && "128-bit vector expected"); 6528 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm); 6529 } 6530 } 6531 6532 assert(table && table[Domain - 1] && "Unknown domain op"); 6533 MI.setDesc(get(table[Domain - 1])); 6534 MI.getOperand(NumOperands - 1).setImm(NewImm & 255); 6535 } 6536 return true; 6537 }; 6538 6539 switch (Opcode) { 6540 case X86::BLENDPDrmi: 6541 case X86::BLENDPDrri: 6542 case X86::VBLENDPDrmi: 6543 case X86::VBLENDPDrri: 6544 return SetBlendDomain(2, false); 6545 case X86::VBLENDPDYrmi: 6546 case X86::VBLENDPDYrri: 6547 return SetBlendDomain(4, true); 6548 case X86::BLENDPSrmi: 6549 case X86::BLENDPSrri: 6550 case X86::VBLENDPSrmi: 6551 case X86::VBLENDPSrri: 6552 case X86::VPBLENDDrmi: 6553 case X86::VPBLENDDrri: 6554 return SetBlendDomain(4, false); 6555 case X86::VBLENDPSYrmi: 6556 case X86::VBLENDPSYrri: 6557 case X86::VPBLENDDYrmi: 6558 case X86::VPBLENDDYrri: 6559 return SetBlendDomain(8, true); 6560 case X86::PBLENDWrmi: 6561 case X86::PBLENDWrri: 6562 case X86::VPBLENDWrmi: 6563 case X86::VPBLENDWrri: 6564 return SetBlendDomain(8, false); 6565 case X86::VPBLENDWYrmi: 6566 case X86::VPBLENDWYrri: 6567 return SetBlendDomain(16, true); 6568 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: 6569 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: 6570 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: 6571 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: 6572 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: 6573 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: 6574 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: 6575 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: 6576 case X86::VPORDZ128rr: case X86::VPORDZ128rm: 6577 case X86::VPORDZ256rr: case X86::VPORDZ256rm: 6578 case X86::VPORQZ128rr: case X86::VPORQZ128rm: 6579 case X86::VPORQZ256rr: case X86::VPORQZ256rm: 6580 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: 6581 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: 6582 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: 6583 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: { 6584 // Without DQI, convert EVEX instructions to VEX instructions. 6585 if (Subtarget.hasDQI()) 6586 return false; 6587 6588 const uint16_t *table = lookupAVX512(MI.getOpcode(), dom, 6589 ReplaceableCustomAVX512LogicInstrs); 6590 assert(table && "Instruction not found in table?"); 6591 // Don't change integer Q instructions to D instructions and 6592 // use D intructions if we started with a PS instruction. 6593 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6594 Domain = 4; 6595 MI.setDesc(get(table[Domain - 1])); 6596 return true; 6597 } 6598 case X86::UNPCKHPDrr: 6599 case X86::MOVHLPSrr: 6600 // We just need to commute the instruction which will switch the domains. 6601 if (Domain != dom && Domain != 3 && 6602 MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && 6603 MI.getOperand(0).getSubReg() == 0 && 6604 MI.getOperand(1).getSubReg() == 0 && 6605 MI.getOperand(2).getSubReg() == 0) { 6606 commuteInstruction(MI, false); 6607 return true; 6608 } 6609 // We must always return true for MOVHLPSrr. 6610 if (Opcode == X86::MOVHLPSrr) 6611 return true; 6612 break; 6613 case X86::SHUFPDrri: { 6614 if (Domain == 1) { 6615 unsigned Imm = MI.getOperand(3).getImm(); 6616 unsigned NewImm = 0x44; 6617 if (Imm & 1) NewImm |= 0x0a; 6618 if (Imm & 2) NewImm |= 0xa0; 6619 MI.getOperand(3).setImm(NewImm); 6620 MI.setDesc(get(X86::SHUFPSrri)); 6621 } 6622 return true; 6623 } 6624 } 6625 return false; 6626} 6627 6628std::pair<uint16_t, uint16_t> 6629X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { 6630 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6631 unsigned opcode = MI.getOpcode(); 6632 uint16_t validDomains = 0; 6633 if (domain) { 6634 // Attempt to match for custom instructions. 6635 validDomains = getExecutionDomainCustom(MI); 6636 if (validDomains) 6637 return std::make_pair(domain, validDomains); 6638 6639 if (lookup(opcode, domain, ReplaceableInstrs)) { 6640 validDomains = 0xe; 6641 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) { 6642 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6; 6643 } else if (lookup(opcode, domain, ReplaceableInstrsFP)) { 6644 validDomains = 0x6; 6645 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) { 6646 // Insert/extract instructions should only effect domain if AVX2 6647 // is enabled. 6648 if (!Subtarget.hasAVX2()) 6649 return std::make_pair(0, 0); 6650 validDomains = 0xe; 6651 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) { 6652 validDomains = 0xe; 6653 } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain, 6654 ReplaceableInstrsAVX512DQ)) { 6655 validDomains = 0xe; 6656 } else if (Subtarget.hasDQI()) { 6657 if (const uint16_t *table = lookupAVX512(opcode, domain, 6658 ReplaceableInstrsAVX512DQMasked)) { 6659 if (domain == 1 || (domain == 3 && table[3] == opcode)) 6660 validDomains = 0xa; 6661 else 6662 validDomains = 0xc; 6663 } 6664 } 6665 } 6666 return std::make_pair(domain, validDomains); 6667} 6668 6669void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { 6670 assert(Domain>0 && Domain<4 && "Invalid execution domain"); 6671 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6672 assert(dom && "Not an SSE instruction"); 6673 6674 // Attempt to match for custom instructions. 6675 if (setExecutionDomainCustom(MI, Domain)) 6676 return; 6677 6678 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs); 6679 if (!table) { // try the other table 6680 assert((Subtarget.hasAVX2() || Domain < 3) && 6681 "256-bit vector operations only available in AVX2"); 6682 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2); 6683 } 6684 if (!table) { // try the FP table 6685 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP); 6686 assert((!table || Domain < 3) && 6687 "Can only select PackedSingle or PackedDouble"); 6688 } 6689 if (!table) { // try the other table 6690 assert(Subtarget.hasAVX2() && 6691 "256-bit insert/extract only available in AVX2"); 6692 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract); 6693 } 6694 if (!table) { // try the AVX512 table 6695 assert(Subtarget.hasAVX512() && "Requires AVX-512"); 6696 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512); 6697 // Don't change integer Q instructions to D instructions. 6698 if (table && Domain == 3 && table[3] == MI.getOpcode()) 6699 Domain = 4; 6700 } 6701 if (!table) { // try the AVX512DQ table 6702 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); 6703 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ); 6704 // Don't change integer Q instructions to D instructions and 6705 // use D intructions if we started with a PS instruction. 6706 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6707 Domain = 4; 6708 } 6709 if (!table) { // try the AVX512DQMasked table 6710 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); 6711 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked); 6712 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) 6713 Domain = 4; 6714 } 6715 assert(table && "Cannot change domain"); 6716 MI.setDesc(get(table[Domain - 1])); 6717} 6718 6719/// Return the noop instruction to use for a noop. 6720void X86InstrInfo::getNoop(MCInst &NopInst) const { 6721 NopInst.setOpcode(X86::NOOP); 6722} 6723 6724bool X86InstrInfo::isHighLatencyDef(int opc) const { 6725 switch (opc) { 6726 default: return false; 6727 case X86::DIVPDrm: 6728 case X86::DIVPDrr: 6729 case X86::DIVPSrm: 6730 case X86::DIVPSrr: 6731 case X86::DIVSDrm: 6732 case X86::DIVSDrm_Int: 6733 case X86::DIVSDrr: 6734 case X86::DIVSDrr_Int: 6735 case X86::DIVSSrm: 6736 case X86::DIVSSrm_Int: 6737 case X86::DIVSSrr: 6738 case X86::DIVSSrr_Int: 6739 case X86::SQRTPDm: 6740 case X86::SQRTPDr: 6741 case X86::SQRTPSm: 6742 case X86::SQRTPSr: 6743 case X86::SQRTSDm: 6744 case X86::SQRTSDm_Int: 6745 case X86::SQRTSDr: 6746 case X86::SQRTSDr_Int: 6747 case X86::SQRTSSm: 6748 case X86::SQRTSSm_Int: 6749 case X86::SQRTSSr: 6750 case X86::SQRTSSr_Int: 6751 // AVX instructions with high latency 6752 case X86::VDIVPDrm: 6753 case X86::VDIVPDrr: 6754 case X86::VDIVPDYrm: 6755 case X86::VDIVPDYrr: 6756 case X86::VDIVPSrm: 6757 case X86::VDIVPSrr: 6758 case X86::VDIVPSYrm: 6759 case X86::VDIVPSYrr: 6760 case X86::VDIVSDrm: 6761 case X86::VDIVSDrm_Int: 6762 case X86::VDIVSDrr: 6763 case X86::VDIVSDrr_Int: 6764 case X86::VDIVSSrm: 6765 case X86::VDIVSSrm_Int: 6766 case X86::VDIVSSrr: 6767 case X86::VDIVSSrr_Int: 6768 case X86::VSQRTPDm: 6769 case X86::VSQRTPDr: 6770 case X86::VSQRTPDYm: 6771 case X86::VSQRTPDYr: 6772 case X86::VSQRTPSm: 6773 case X86::VSQRTPSr: 6774 case X86::VSQRTPSYm: 6775 case X86::VSQRTPSYr: 6776 case X86::VSQRTSDm: 6777 case X86::VSQRTSDm_Int: 6778 case X86::VSQRTSDr: 6779 case X86::VSQRTSDr_Int: 6780 case X86::VSQRTSSm: 6781 case X86::VSQRTSSm_Int: 6782 case X86::VSQRTSSr: 6783 case X86::VSQRTSSr_Int: 6784 // AVX512 instructions with high latency 6785 case X86::VDIVPDZ128rm: 6786 case X86::VDIVPDZ128rmb: 6787 case X86::VDIVPDZ128rmbk: 6788 case X86::VDIVPDZ128rmbkz: 6789 case X86::VDIVPDZ128rmk: 6790 case X86::VDIVPDZ128rmkz: 6791 case X86::VDIVPDZ128rr: 6792 case X86::VDIVPDZ128rrk: 6793 case X86::VDIVPDZ128rrkz: 6794 case X86::VDIVPDZ256rm: 6795 case X86::VDIVPDZ256rmb: 6796 case X86::VDIVPDZ256rmbk: 6797 case X86::VDIVPDZ256rmbkz: 6798 case X86::VDIVPDZ256rmk: 6799 case X86::VDIVPDZ256rmkz: 6800 case X86::VDIVPDZ256rr: 6801 case X86::VDIVPDZ256rrk: 6802 case X86::VDIVPDZ256rrkz: 6803 case X86::VDIVPDZrrb: 6804 case X86::VDIVPDZrrbk: 6805 case X86::VDIVPDZrrbkz: 6806 case X86::VDIVPDZrm: 6807 case X86::VDIVPDZrmb: 6808 case X86::VDIVPDZrmbk: 6809 case X86::VDIVPDZrmbkz: 6810 case X86::VDIVPDZrmk: 6811 case X86::VDIVPDZrmkz: 6812 case X86::VDIVPDZrr: 6813 case X86::VDIVPDZrrk: 6814 case X86::VDIVPDZrrkz: 6815 case X86::VDIVPSZ128rm: 6816 case X86::VDIVPSZ128rmb: 6817 case X86::VDIVPSZ128rmbk: 6818 case X86::VDIVPSZ128rmbkz: 6819 case X86::VDIVPSZ128rmk: 6820 case X86::VDIVPSZ128rmkz: 6821 case X86::VDIVPSZ128rr: 6822 case X86::VDIVPSZ128rrk: 6823 case X86::VDIVPSZ128rrkz: 6824 case X86::VDIVPSZ256rm: 6825 case X86::VDIVPSZ256rmb: 6826 case X86::VDIVPSZ256rmbk: 6827 case X86::VDIVPSZ256rmbkz: 6828 case X86::VDIVPSZ256rmk: 6829 case X86::VDIVPSZ256rmkz: 6830 case X86::VDIVPSZ256rr: 6831 case X86::VDIVPSZ256rrk: 6832 case X86::VDIVPSZ256rrkz: 6833 case X86::VDIVPSZrrb: 6834 case X86::VDIVPSZrrbk: 6835 case X86::VDIVPSZrrbkz: 6836 case X86::VDIVPSZrm: 6837 case X86::VDIVPSZrmb: 6838 case X86::VDIVPSZrmbk: 6839 case X86::VDIVPSZrmbkz: 6840 case X86::VDIVPSZrmk: 6841 case X86::VDIVPSZrmkz: 6842 case X86::VDIVPSZrr: 6843 case X86::VDIVPSZrrk: 6844 case X86::VDIVPSZrrkz: 6845 case X86::VDIVSDZrm: 6846 case X86::VDIVSDZrr: 6847 case X86::VDIVSDZrm_Int: 6848 case X86::VDIVSDZrm_Intk: 6849 case X86::VDIVSDZrm_Intkz: 6850 case X86::VDIVSDZrr_Int: 6851 case X86::VDIVSDZrr_Intk: 6852 case X86::VDIVSDZrr_Intkz: 6853 case X86::VDIVSDZrrb_Int: 6854 case X86::VDIVSDZrrb_Intk: 6855 case X86::VDIVSDZrrb_Intkz: 6856 case X86::VDIVSSZrm: 6857 case X86::VDIVSSZrr: 6858 case X86::VDIVSSZrm_Int: 6859 case X86::VDIVSSZrm_Intk: 6860 case X86::VDIVSSZrm_Intkz: 6861 case X86::VDIVSSZrr_Int: 6862 case X86::VDIVSSZrr_Intk: 6863 case X86::VDIVSSZrr_Intkz: 6864 case X86::VDIVSSZrrb_Int: 6865 case X86::VDIVSSZrrb_Intk: 6866 case X86::VDIVSSZrrb_Intkz: 6867 case X86::VSQRTPDZ128m: 6868 case X86::VSQRTPDZ128mb: 6869 case X86::VSQRTPDZ128mbk: 6870 case X86::VSQRTPDZ128mbkz: 6871 case X86::VSQRTPDZ128mk: 6872 case X86::VSQRTPDZ128mkz: 6873 case X86::VSQRTPDZ128r: 6874 case X86::VSQRTPDZ128rk: 6875 case X86::VSQRTPDZ128rkz: 6876 case X86::VSQRTPDZ256m: 6877 case X86::VSQRTPDZ256mb: 6878 case X86::VSQRTPDZ256mbk: 6879 case X86::VSQRTPDZ256mbkz: 6880 case X86::VSQRTPDZ256mk: 6881 case X86::VSQRTPDZ256mkz: 6882 case X86::VSQRTPDZ256r: 6883 case X86::VSQRTPDZ256rk: 6884 case X86::VSQRTPDZ256rkz: 6885 case X86::VSQRTPDZm: 6886 case X86::VSQRTPDZmb: 6887 case X86::VSQRTPDZmbk: 6888 case X86::VSQRTPDZmbkz: 6889 case X86::VSQRTPDZmk: 6890 case X86::VSQRTPDZmkz: 6891 case X86::VSQRTPDZr: 6892 case X86::VSQRTPDZrb: 6893 case X86::VSQRTPDZrbk: 6894 case X86::VSQRTPDZrbkz: 6895 case X86::VSQRTPDZrk: 6896 case X86::VSQRTPDZrkz: 6897 case X86::VSQRTPSZ128m: 6898 case X86::VSQRTPSZ128mb: 6899 case X86::VSQRTPSZ128mbk: 6900 case X86::VSQRTPSZ128mbkz: 6901 case X86::VSQRTPSZ128mk: 6902 case X86::VSQRTPSZ128mkz: 6903 case X86::VSQRTPSZ128r: 6904 case X86::VSQRTPSZ128rk: 6905 case X86::VSQRTPSZ128rkz: 6906 case X86::VSQRTPSZ256m: 6907 case X86::VSQRTPSZ256mb: 6908 case X86::VSQRTPSZ256mbk: 6909 case X86::VSQRTPSZ256mbkz: 6910 case X86::VSQRTPSZ256mk: 6911 case X86::VSQRTPSZ256mkz: 6912 case X86::VSQRTPSZ256r: 6913 case X86::VSQRTPSZ256rk: 6914 case X86::VSQRTPSZ256rkz: 6915 case X86::VSQRTPSZm: 6916 case X86::VSQRTPSZmb: 6917 case X86::VSQRTPSZmbk: 6918 case X86::VSQRTPSZmbkz: 6919 case X86::VSQRTPSZmk: 6920 case X86::VSQRTPSZmkz: 6921 case X86::VSQRTPSZr: 6922 case X86::VSQRTPSZrb: 6923 case X86::VSQRTPSZrbk: 6924 case X86::VSQRTPSZrbkz: 6925 case X86::VSQRTPSZrk: 6926 case X86::VSQRTPSZrkz: 6927 case X86::VSQRTSDZm: 6928 case X86::VSQRTSDZm_Int: 6929 case X86::VSQRTSDZm_Intk: 6930 case X86::VSQRTSDZm_Intkz: 6931 case X86::VSQRTSDZr: 6932 case X86::VSQRTSDZr_Int: 6933 case X86::VSQRTSDZr_Intk: 6934 case X86::VSQRTSDZr_Intkz: 6935 case X86::VSQRTSDZrb_Int: 6936 case X86::VSQRTSDZrb_Intk: 6937 case X86::VSQRTSDZrb_Intkz: 6938 case X86::VSQRTSSZm: 6939 case X86::VSQRTSSZm_Int: 6940 case X86::VSQRTSSZm_Intk: 6941 case X86::VSQRTSSZm_Intkz: 6942 case X86::VSQRTSSZr: 6943 case X86::VSQRTSSZr_Int: 6944 case X86::VSQRTSSZr_Intk: 6945 case X86::VSQRTSSZr_Intkz: 6946 case X86::VSQRTSSZrb_Int: 6947 case X86::VSQRTSSZrb_Intk: 6948 case X86::VSQRTSSZrb_Intkz: 6949 6950 case X86::VGATHERDPDYrm: 6951 case X86::VGATHERDPDZ128rm: 6952 case X86::VGATHERDPDZ256rm: 6953 case X86::VGATHERDPDZrm: 6954 case X86::VGATHERDPDrm: 6955 case X86::VGATHERDPSYrm: 6956 case X86::VGATHERDPSZ128rm: 6957 case X86::VGATHERDPSZ256rm: 6958 case X86::VGATHERDPSZrm: 6959 case X86::VGATHERDPSrm: 6960 case X86::VGATHERPF0DPDm: 6961 case X86::VGATHERPF0DPSm: 6962 case X86::VGATHERPF0QPDm: 6963 case X86::VGATHERPF0QPSm: 6964 case X86::VGATHERPF1DPDm: 6965 case X86::VGATHERPF1DPSm: 6966 case X86::VGATHERPF1QPDm: 6967 case X86::VGATHERPF1QPSm: 6968 case X86::VGATHERQPDYrm: 6969 case X86::VGATHERQPDZ128rm: 6970 case X86::VGATHERQPDZ256rm: 6971 case X86::VGATHERQPDZrm: 6972 case X86::VGATHERQPDrm: 6973 case X86::VGATHERQPSYrm: 6974 case X86::VGATHERQPSZ128rm: 6975 case X86::VGATHERQPSZ256rm: 6976 case X86::VGATHERQPSZrm: 6977 case X86::VGATHERQPSrm: 6978 case X86::VPGATHERDDYrm: 6979 case X86::VPGATHERDDZ128rm: 6980 case X86::VPGATHERDDZ256rm: 6981 case X86::VPGATHERDDZrm: 6982 case X86::VPGATHERDDrm: 6983 case X86::VPGATHERDQYrm: 6984 case X86::VPGATHERDQZ128rm: 6985 case X86::VPGATHERDQZ256rm: 6986 case X86::VPGATHERDQZrm: 6987 case X86::VPGATHERDQrm: 6988 case X86::VPGATHERQDYrm: 6989 case X86::VPGATHERQDZ128rm: 6990 case X86::VPGATHERQDZ256rm: 6991 case X86::VPGATHERQDZrm: 6992 case X86::VPGATHERQDrm: 6993 case X86::VPGATHERQQYrm: 6994 case X86::VPGATHERQQZ128rm: 6995 case X86::VPGATHERQQZ256rm: 6996 case X86::VPGATHERQQZrm: 6997 case X86::VPGATHERQQrm: 6998 case X86::VSCATTERDPDZ128mr: 6999 case X86::VSCATTERDPDZ256mr: 7000 case X86::VSCATTERDPDZmr: 7001 case X86::VSCATTERDPSZ128mr: 7002 case X86::VSCATTERDPSZ256mr: 7003 case X86::VSCATTERDPSZmr: 7004 case X86::VSCATTERPF0DPDm: 7005 case X86::VSCATTERPF0DPSm: 7006 case X86::VSCATTERPF0QPDm: 7007 case X86::VSCATTERPF0QPSm: 7008 case X86::VSCATTERPF1DPDm: 7009 case X86::VSCATTERPF1DPSm: 7010 case X86::VSCATTERPF1QPDm: 7011 case X86::VSCATTERPF1QPSm: 7012 case X86::VSCATTERQPDZ128mr: 7013 case X86::VSCATTERQPDZ256mr: 7014 case X86::VSCATTERQPDZmr: 7015 case X86::VSCATTERQPSZ128mr: 7016 case X86::VSCATTERQPSZ256mr: 7017 case X86::VSCATTERQPSZmr: 7018 case X86::VPSCATTERDDZ128mr: 7019 case X86::VPSCATTERDDZ256mr: 7020 case X86::VPSCATTERDDZmr: 7021 case X86::VPSCATTERDQZ128mr: 7022 case X86::VPSCATTERDQZ256mr: 7023 case X86::VPSCATTERDQZmr: 7024 case X86::VPSCATTERQDZ128mr: 7025 case X86::VPSCATTERQDZ256mr: 7026 case X86::VPSCATTERQDZmr: 7027 case X86::VPSCATTERQQZ128mr: 7028 case X86::VPSCATTERQQZ256mr: 7029 case X86::VPSCATTERQQZmr: 7030 return true; 7031 } 7032} 7033 7034bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 7035 const MachineRegisterInfo *MRI, 7036 const MachineInstr &DefMI, 7037 unsigned DefIdx, 7038 const MachineInstr &UseMI, 7039 unsigned UseIdx) const { 7040 return isHighLatencyDef(DefMI.getOpcode()); 7041} 7042 7043bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, 7044 const MachineBasicBlock *MBB) const { 7045 assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && 7046 "Reassociation needs binary operators"); 7047 7048 // Integer binary math/logic instructions have a third source operand: 7049 // the EFLAGS register. That operand must be both defined here and never 7050 // used; ie, it must be dead. If the EFLAGS operand is live, then we can 7051 // not change anything because rearranging the operands could affect other 7052 // instructions that depend on the exact status flags (zero, sign, etc.) 7053 // that are set by using these particular operands with this operation. 7054 if (Inst.getNumOperands() == 4) { 7055 assert(Inst.getOperand(3).isReg() && 7056 Inst.getOperand(3).getReg() == X86::EFLAGS && 7057 "Unexpected operand in reassociable instruction"); 7058 if (!Inst.getOperand(3).isDead()) 7059 return false; 7060 } 7061 7062 return TargetInstrInfo::hasReassociableOperands(Inst, MBB); 7063} 7064 7065// TODO: There are many more machine instruction opcodes to match: 7066// 1. Other data types (integer, vectors) 7067// 2. Other math / logic operations (xor, or) 7068// 3. Other forms of the same operation (intrinsics and other variants) 7069bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { 7070 switch (Inst.getOpcode()) { 7071 case X86::AND8rr: 7072 case X86::AND16rr: 7073 case X86::AND32rr: 7074 case X86::AND64rr: 7075 case X86::OR8rr: 7076 case X86::OR16rr: 7077 case X86::OR32rr: 7078 case X86::OR64rr: 7079 case X86::XOR8rr: 7080 case X86::XOR16rr: 7081 case X86::XOR32rr: 7082 case X86::XOR64rr: 7083 case X86::IMUL16rr: 7084 case X86::IMUL32rr: 7085 case X86::IMUL64rr: 7086 case X86::PANDrr: 7087 case X86::PORrr: 7088 case X86::PXORrr: 7089 case X86::ANDPDrr: 7090 case X86::ANDPSrr: 7091 case X86::ORPDrr: 7092 case X86::ORPSrr: 7093 case X86::XORPDrr: 7094 case X86::XORPSrr: 7095 case X86::PADDBrr: 7096 case X86::PADDWrr: 7097 case X86::PADDDrr: 7098 case X86::PADDQrr: 7099 case X86::PMULLWrr: 7100 case X86::PMULLDrr: 7101 case X86::PMAXSBrr: 7102 case X86::PMAXSDrr: 7103 case X86::PMAXSWrr: 7104 case X86::PMAXUBrr: 7105 case X86::PMAXUDrr: 7106 case X86::PMAXUWrr: 7107 case X86::PMINSBrr: 7108 case X86::PMINSDrr: 7109 case X86::PMINSWrr: 7110 case X86::PMINUBrr: 7111 case X86::PMINUDrr: 7112 case X86::PMINUWrr: 7113 case X86::VPANDrr: 7114 case X86::VPANDYrr: 7115 case X86::VPANDDZ128rr: 7116 case X86::VPANDDZ256rr: 7117 case X86::VPANDDZrr: 7118 case X86::VPANDQZ128rr: 7119 case X86::VPANDQZ256rr: 7120 case X86::VPANDQZrr: 7121 case X86::VPORrr: 7122 case X86::VPORYrr: 7123 case X86::VPORDZ128rr: 7124 case X86::VPORDZ256rr: 7125 case X86::VPORDZrr: 7126 case X86::VPORQZ128rr: 7127 case X86::VPORQZ256rr: 7128 case X86::VPORQZrr: 7129 case X86::VPXORrr: 7130 case X86::VPXORYrr: 7131 case X86::VPXORDZ128rr: 7132 case X86::VPXORDZ256rr: 7133 case X86::VPXORDZrr: 7134 case X86::VPXORQZ128rr: 7135 case X86::VPXORQZ256rr: 7136 case X86::VPXORQZrr: 7137 case X86::VANDPDrr: 7138 case X86::VANDPSrr: 7139 case X86::VANDPDYrr: 7140 case X86::VANDPSYrr: 7141 case X86::VANDPDZ128rr: 7142 case X86::VANDPSZ128rr: 7143 case X86::VANDPDZ256rr: 7144 case X86::VANDPSZ256rr: 7145 case X86::VANDPDZrr: 7146 case X86::VANDPSZrr: 7147 case X86::VORPDrr: 7148 case X86::VORPSrr: 7149 case X86::VORPDYrr: 7150 case X86::VORPSYrr: 7151 case X86::VORPDZ128rr: 7152 case X86::VORPSZ128rr: 7153 case X86::VORPDZ256rr: 7154 case X86::VORPSZ256rr: 7155 case X86::VORPDZrr: 7156 case X86::VORPSZrr: 7157 case X86::VXORPDrr: 7158 case X86::VXORPSrr: 7159 case X86::VXORPDYrr: 7160 case X86::VXORPSYrr: 7161 case X86::VXORPDZ128rr: 7162 case X86::VXORPSZ128rr: 7163 case X86::VXORPDZ256rr: 7164 case X86::VXORPSZ256rr: 7165 case X86::VXORPDZrr: 7166 case X86::VXORPSZrr: 7167 case X86::KADDBrr: 7168 case X86::KADDWrr: 7169 case X86::KADDDrr: 7170 case X86::KADDQrr: 7171 case X86::KANDBrr: 7172 case X86::KANDWrr: 7173 case X86::KANDDrr: 7174 case X86::KANDQrr: 7175 case X86::KORBrr: 7176 case X86::KORWrr: 7177 case X86::KORDrr: 7178 case X86::KORQrr: 7179 case X86::KXORBrr: 7180 case X86::KXORWrr: 7181 case X86::KXORDrr: 7182 case X86::KXORQrr: 7183 case X86::VPADDBrr: 7184 case X86::VPADDWrr: 7185 case X86::VPADDDrr: 7186 case X86::VPADDQrr: 7187 case X86::VPADDBYrr: 7188 case X86::VPADDWYrr: 7189 case X86::VPADDDYrr: 7190 case X86::VPADDQYrr: 7191 case X86::VPADDBZ128rr: 7192 case X86::VPADDWZ128rr: 7193 case X86::VPADDDZ128rr: 7194 case X86::VPADDQZ128rr: 7195 case X86::VPADDBZ256rr: 7196 case X86::VPADDWZ256rr: 7197 case X86::VPADDDZ256rr: 7198 case X86::VPADDQZ256rr: 7199 case X86::VPADDBZrr: 7200 case X86::VPADDWZrr: 7201 case X86::VPADDDZrr: 7202 case X86::VPADDQZrr: 7203 case X86::VPMULLWrr: 7204 case X86::VPMULLWYrr: 7205 case X86::VPMULLWZ128rr: 7206 case X86::VPMULLWZ256rr: 7207 case X86::VPMULLWZrr: 7208 case X86::VPMULLDrr: 7209 case X86::VPMULLDYrr: 7210 case X86::VPMULLDZ128rr: 7211 case X86::VPMULLDZ256rr: 7212 case X86::VPMULLDZrr: 7213 case X86::VPMULLQZ128rr: 7214 case X86::VPMULLQZ256rr: 7215 case X86::VPMULLQZrr: 7216 case X86::VPMAXSBrr: 7217 case X86::VPMAXSBYrr: 7218 case X86::VPMAXSBZ128rr: 7219 case X86::VPMAXSBZ256rr: 7220 case X86::VPMAXSBZrr: 7221 case X86::VPMAXSDrr: 7222 case X86::VPMAXSDYrr: 7223 case X86::VPMAXSDZ128rr: 7224 case X86::VPMAXSDZ256rr: 7225 case X86::VPMAXSDZrr: 7226 case X86::VPMAXSQZ128rr: 7227 case X86::VPMAXSQZ256rr: 7228 case X86::VPMAXSQZrr: 7229 case X86::VPMAXSWrr: 7230 case X86::VPMAXSWYrr: 7231 case X86::VPMAXSWZ128rr: 7232 case X86::VPMAXSWZ256rr: 7233 case X86::VPMAXSWZrr: 7234 case X86::VPMAXUBrr: 7235 case X86::VPMAXUBYrr: 7236 case X86::VPMAXUBZ128rr: 7237 case X86::VPMAXUBZ256rr: 7238 case X86::VPMAXUBZrr: 7239 case X86::VPMAXUDrr: 7240 case X86::VPMAXUDYrr: 7241 case X86::VPMAXUDZ128rr: 7242 case X86::VPMAXUDZ256rr: 7243 case X86::VPMAXUDZrr: 7244 case X86::VPMAXUQZ128rr: 7245 case X86::VPMAXUQZ256rr: 7246 case X86::VPMAXUQZrr: 7247 case X86::VPMAXUWrr: 7248 case X86::VPMAXUWYrr: 7249 case X86::VPMAXUWZ128rr: 7250 case X86::VPMAXUWZ256rr: 7251 case X86::VPMAXUWZrr: 7252 case X86::VPMINSBrr: 7253 case X86::VPMINSBYrr: 7254 case X86::VPMINSBZ128rr: 7255 case X86::VPMINSBZ256rr: 7256 case X86::VPMINSBZrr: 7257 case X86::VPMINSDrr: 7258 case X86::VPMINSDYrr: 7259 case X86::VPMINSDZ128rr: 7260 case X86::VPMINSDZ256rr: 7261 case X86::VPMINSDZrr: 7262 case X86::VPMINSQZ128rr: 7263 case X86::VPMINSQZ256rr: 7264 case X86::VPMINSQZrr: 7265 case X86::VPMINSWrr: 7266 case X86::VPMINSWYrr: 7267 case X86::VPMINSWZ128rr: 7268 case X86::VPMINSWZ256rr: 7269 case X86::VPMINSWZrr: 7270 case X86::VPMINUBrr: 7271 case X86::VPMINUBYrr: 7272 case X86::VPMINUBZ128rr: 7273 case X86::VPMINUBZ256rr: 7274 case X86::VPMINUBZrr: 7275 case X86::VPMINUDrr: 7276 case X86::VPMINUDYrr: 7277 case X86::VPMINUDZ128rr: 7278 case X86::VPMINUDZ256rr: 7279 case X86::VPMINUDZrr: 7280 case X86::VPMINUQZ128rr: 7281 case X86::VPMINUQZ256rr: 7282 case X86::VPMINUQZrr: 7283 case X86::VPMINUWrr: 7284 case X86::VPMINUWYrr: 7285 case X86::VPMINUWZ128rr: 7286 case X86::VPMINUWZ256rr: 7287 case X86::VPMINUWZrr: 7288 // Normal min/max instructions are not commutative because of NaN and signed 7289 // zero semantics, but these are. Thus, there's no need to check for global 7290 // relaxed math; the instructions themselves have the properties we need. 7291 case X86::MAXCPDrr: 7292 case X86::MAXCPSrr: 7293 case X86::MAXCSDrr: 7294 case X86::MAXCSSrr: 7295 case X86::MINCPDrr: 7296 case X86::MINCPSrr: 7297 case X86::MINCSDrr: 7298 case X86::MINCSSrr: 7299 case X86::VMAXCPDrr: 7300 case X86::VMAXCPSrr: 7301 case X86::VMAXCPDYrr: 7302 case X86::VMAXCPSYrr: 7303 case X86::VMAXCPDZ128rr: 7304 case X86::VMAXCPSZ128rr: 7305 case X86::VMAXCPDZ256rr: 7306 case X86::VMAXCPSZ256rr: 7307 case X86::VMAXCPDZrr: 7308 case X86::VMAXCPSZrr: 7309 case X86::VMAXCSDrr: 7310 case X86::VMAXCSSrr: 7311 case X86::VMAXCSDZrr: 7312 case X86::VMAXCSSZrr: 7313 case X86::VMINCPDrr: 7314 case X86::VMINCPSrr: 7315 case X86::VMINCPDYrr: 7316 case X86::VMINCPSYrr: 7317 case X86::VMINCPDZ128rr: 7318 case X86::VMINCPSZ128rr: 7319 case X86::VMINCPDZ256rr: 7320 case X86::VMINCPSZ256rr: 7321 case X86::VMINCPDZrr: 7322 case X86::VMINCPSZrr: 7323 case X86::VMINCSDrr: 7324 case X86::VMINCSSrr: 7325 case X86::VMINCSDZrr: 7326 case X86::VMINCSSZrr: 7327 return true; 7328 case X86::ADDPDrr: 7329 case X86::ADDPSrr: 7330 case X86::ADDSDrr: 7331 case X86::ADDSSrr: 7332 case X86::MULPDrr: 7333 case X86::MULPSrr: 7334 case X86::MULSDrr: 7335 case X86::MULSSrr: 7336 case X86::VADDPDrr: 7337 case X86::VADDPSrr: 7338 case X86::VADDPDYrr: 7339 case X86::VADDPSYrr: 7340 case X86::VADDPDZ128rr: 7341 case X86::VADDPSZ128rr: 7342 case X86::VADDPDZ256rr: 7343 case X86::VADDPSZ256rr: 7344 case X86::VADDPDZrr: 7345 case X86::VADDPSZrr: 7346 case X86::VADDSDrr: 7347 case X86::VADDSSrr: 7348 case X86::VADDSDZrr: 7349 case X86::VADDSSZrr: 7350 case X86::VMULPDrr: 7351 case X86::VMULPSrr: 7352 case X86::VMULPDYrr: 7353 case X86::VMULPSYrr: 7354 case X86::VMULPDZ128rr: 7355 case X86::VMULPSZ128rr: 7356 case X86::VMULPDZ256rr: 7357 case X86::VMULPSZ256rr: 7358 case X86::VMULPDZrr: 7359 case X86::VMULPSZrr: 7360 case X86::VMULSDrr: 7361 case X86::VMULSSrr: 7362 case X86::VMULSDZrr: 7363 case X86::VMULSSZrr: 7364 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; 7365 default: 7366 return false; 7367 } 7368} 7369 7370/// This is an architecture-specific helper function of reassociateOps. 7371/// Set special operand attributes for new instructions after reassociation. 7372void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, 7373 MachineInstr &OldMI2, 7374 MachineInstr &NewMI1, 7375 MachineInstr &NewMI2) const { 7376 // Integer instructions define an implicit EFLAGS source register operand as 7377 // the third source (fourth total) operand. 7378 if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4) 7379 return; 7380 7381 assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 && 7382 "Unexpected instruction type for reassociation"); 7383 7384 MachineOperand &OldOp1 = OldMI1.getOperand(3); 7385 MachineOperand &OldOp2 = OldMI2.getOperand(3); 7386 MachineOperand &NewOp1 = NewMI1.getOperand(3); 7387 MachineOperand &NewOp2 = NewMI2.getOperand(3); 7388 7389 assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && 7390 "Must have dead EFLAGS operand in reassociable instruction"); 7391 assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && 7392 "Must have dead EFLAGS operand in reassociable instruction"); 7393 7394 (void)OldOp1; 7395 (void)OldOp2; 7396 7397 assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && 7398 "Unexpected operand in reassociable instruction"); 7399 assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && 7400 "Unexpected operand in reassociable instruction"); 7401 7402 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations 7403 // of this pass or other passes. The EFLAGS operands must be dead in these new 7404 // instructions because the EFLAGS operands in the original instructions must 7405 // be dead in order for reassociation to occur. 7406 NewOp1.setIsDead(); 7407 NewOp2.setIsDead(); 7408} 7409 7410std::pair<unsigned, unsigned> 7411X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7412 return std::make_pair(TF, 0u); 7413} 7414 7415ArrayRef<std::pair<unsigned, const char *>> 7416X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7417 using namespace X86II; 7418 static const std::pair<unsigned, const char *> TargetFlags[] = { 7419 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, 7420 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, 7421 {MO_GOT, "x86-got"}, 7422 {MO_GOTOFF, "x86-gotoff"}, 7423 {MO_GOTPCREL, "x86-gotpcrel"}, 7424 {MO_PLT, "x86-plt"}, 7425 {MO_TLSGD, "x86-tlsgd"}, 7426 {MO_TLSLD, "x86-tlsld"}, 7427 {MO_TLSLDM, "x86-tlsldm"}, 7428 {MO_GOTTPOFF, "x86-gottpoff"}, 7429 {MO_INDNTPOFF, "x86-indntpoff"}, 7430 {MO_TPOFF, "x86-tpoff"}, 7431 {MO_DTPOFF, "x86-dtpoff"}, 7432 {MO_NTPOFF, "x86-ntpoff"}, 7433 {MO_GOTNTPOFF, "x86-gotntpoff"}, 7434 {MO_DLLIMPORT, "x86-dllimport"}, 7435 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, 7436 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, 7437 {MO_TLVP, "x86-tlvp"}, 7438 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, 7439 {MO_SECREL, "x86-secrel"}, 7440 {MO_COFFSTUB, "x86-coffstub"}}; 7441 return makeArrayRef(TargetFlags); 7442} 7443 7444namespace { 7445 /// Create Global Base Reg pass. This initializes the PIC 7446 /// global base register for x86-32. 7447 struct CGBR : public MachineFunctionPass { 7448 static char ID; 7449 CGBR() : MachineFunctionPass(ID) {} 7450 7451 bool runOnMachineFunction(MachineFunction &MF) override { 7452 const X86TargetMachine *TM = 7453 static_cast<const X86TargetMachine *>(&MF.getTarget()); 7454 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 7455 7456 // Don't do anything in the 64-bit small and kernel code models. They use 7457 // RIP-relative addressing for everything. 7458 if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || 7459 TM->getCodeModel() == CodeModel::Kernel)) 7460 return false; 7461 7462 // Only emit a global base reg in PIC mode. 7463 if (!TM->isPositionIndependent()) 7464 return false; 7465 7466 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 7467 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 7468 7469 // If we didn't need a GlobalBaseReg, don't insert code. 7470 if (GlobalBaseReg == 0) 7471 return false; 7472 7473 // Insert the set of GlobalBaseReg into the first MBB of the function 7474 MachineBasicBlock &FirstMBB = MF.front(); 7475 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 7476 DebugLoc DL = FirstMBB.findDebugLoc(MBBI); 7477 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7478 const X86InstrInfo *TII = STI.getInstrInfo(); 7479 7480 unsigned PC; 7481 if (STI.isPICStyleGOT()) 7482 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); 7483 else 7484 PC = GlobalBaseReg; 7485 7486 if (STI.is64Bit()) { 7487 if (TM->getCodeModel() == CodeModel::Medium) { 7488 // In the medium code model, use a RIP-relative LEA to materialize the 7489 // GOT. 7490 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) 7491 .addReg(X86::RIP) 7492 .addImm(0) 7493 .addReg(0) 7494 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") 7495 .addReg(0); 7496 } else if (TM->getCodeModel() == CodeModel::Large) { 7497 // In the large code model, we are aiming for this code, though the 7498 // register allocation may vary: 7499 // leaq .LN$pb(%rip), %rax 7500 // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx 7501 // addq %rcx, %rax 7502 // RAX now holds address of _GLOBAL_OFFSET_TABLE_. 7503 unsigned PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); 7504 unsigned GOTReg = 7505 RegInfo.createVirtualRegister(&X86::GR64RegClass); 7506 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg) 7507 .addReg(X86::RIP) 7508 .addImm(0) 7509 .addReg(0) 7510 .addSym(MF.getPICBaseSymbol()) 7511 .addReg(0); 7512 std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol()); 7513 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg) 7514 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 7515 X86II::MO_PIC_BASE_OFFSET); 7516 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC) 7517 .addReg(PBReg, RegState::Kill) 7518 .addReg(GOTReg, RegState::Kill); 7519 } else { 7520 llvm_unreachable("unexpected code model"); 7521 } 7522 } else { 7523 // Operand of MovePCtoStack is completely ignored by asm printer. It's 7524 // only used in JIT code emission as displacement to pc. 7525 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); 7526 7527 // If we're using vanilla 'GOT' PIC style, we should use relative 7528 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. 7529 if (STI.isPICStyleGOT()) { 7530 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], 7531 // %some_register 7532 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) 7533 .addReg(PC) 7534 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 7535 X86II::MO_GOT_ABSOLUTE_ADDRESS); 7536 } 7537 } 7538 7539 return true; 7540 } 7541 7542 StringRef getPassName() const override { 7543 return "X86 PIC Global Base Reg Initialization"; 7544 } 7545 7546 void getAnalysisUsage(AnalysisUsage &AU) const override { 7547 AU.setPreservesCFG(); 7548 MachineFunctionPass::getAnalysisUsage(AU); 7549 } 7550 }; 7551} 7552 7553char CGBR::ID = 0; 7554FunctionPass* 7555llvm::createX86GlobalBaseRegPass() { return new CGBR(); } 7556 7557namespace { 7558 struct LDTLSCleanup : public MachineFunctionPass { 7559 static char ID; 7560 LDTLSCleanup() : MachineFunctionPass(ID) {} 7561 7562 bool runOnMachineFunction(MachineFunction &MF) override { 7563 if (skipFunction(MF.getFunction())) 7564 return false; 7565 7566 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); 7567 if (MFI->getNumLocalDynamicTLSAccesses() < 2) { 7568 // No point folding accesses if there isn't at least two. 7569 return false; 7570 } 7571 7572 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); 7573 return VisitNode(DT->getRootNode(), 0); 7574 } 7575 7576 // Visit the dominator subtree rooted at Node in pre-order. 7577 // If TLSBaseAddrReg is non-null, then use that to replace any 7578 // TLS_base_addr instructions. Otherwise, create the register 7579 // when the first such instruction is seen, and then use it 7580 // as we encounter more instructions. 7581 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { 7582 MachineBasicBlock *BB = Node->getBlock(); 7583 bool Changed = false; 7584 7585 // Traverse the current block. 7586 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; 7587 ++I) { 7588 switch (I->getOpcode()) { 7589 case X86::TLS_base_addr32: 7590 case X86::TLS_base_addr64: 7591 if (TLSBaseAddrReg) 7592 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg); 7593 else 7594 I = SetRegister(*I, &TLSBaseAddrReg); 7595 Changed = true; 7596 break; 7597 default: 7598 break; 7599 } 7600 } 7601 7602 // Visit the children of this block in the dominator tree. 7603 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); 7604 I != E; ++I) { 7605 Changed |= VisitNode(*I, TLSBaseAddrReg); 7606 } 7607 7608 return Changed; 7609 } 7610 7611 // Replace the TLS_base_addr instruction I with a copy from 7612 // TLSBaseAddrReg, returning the new instruction. 7613 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I, 7614 unsigned TLSBaseAddrReg) { 7615 MachineFunction *MF = I.getParent()->getParent(); 7616 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7617 const bool is64Bit = STI.is64Bit(); 7618 const X86InstrInfo *TII = STI.getInstrInfo(); 7619 7620 // Insert a Copy from TLSBaseAddrReg to RAX/EAX. 7621 MachineInstr *Copy = 7622 BuildMI(*I.getParent(), I, I.getDebugLoc(), 7623 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX) 7624 .addReg(TLSBaseAddrReg); 7625 7626 // Erase the TLS_base_addr instruction. 7627 I.eraseFromParent(); 7628 7629 return Copy; 7630 } 7631 7632 // Create a virtual register in *TLSBaseAddrReg, and populate it by 7633 // inserting a copy instruction after I. Returns the new instruction. 7634 MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { 7635 MachineFunction *MF = I.getParent()->getParent(); 7636 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7637 const bool is64Bit = STI.is64Bit(); 7638 const X86InstrInfo *TII = STI.getInstrInfo(); 7639 7640 // Create a virtual register for the TLS base address. 7641 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 7642 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit 7643 ? &X86::GR64RegClass 7644 : &X86::GR32RegClass); 7645 7646 // Insert a copy from RAX/EAX to TLSBaseAddrReg. 7647 MachineInstr *Next = I.getNextNode(); 7648 MachineInstr *Copy = 7649 BuildMI(*I.getParent(), Next, I.getDebugLoc(), 7650 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) 7651 .addReg(is64Bit ? X86::RAX : X86::EAX); 7652 7653 return Copy; 7654 } 7655 7656 StringRef getPassName() const override { 7657 return "Local Dynamic TLS Access Clean-up"; 7658 } 7659 7660 void getAnalysisUsage(AnalysisUsage &AU) const override { 7661 AU.setPreservesCFG(); 7662 AU.addRequired<MachineDominatorTree>(); 7663 MachineFunctionPass::getAnalysisUsage(AU); 7664 } 7665 }; 7666} 7667 7668char LDTLSCleanup::ID = 0; 7669FunctionPass* 7670llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } 7671 7672/// Constants defining how certain sequences should be outlined. 7673/// 7674/// \p MachineOutlinerDefault implies that the function is called with a call 7675/// instruction, and a return must be emitted for the outlined function frame. 7676/// 7677/// That is, 7678/// 7679/// I1 OUTLINED_FUNCTION: 7680/// I2 --> call OUTLINED_FUNCTION I1 7681/// I3 I2 7682/// I3 7683/// ret 7684/// 7685/// * Call construction overhead: 1 (call instruction) 7686/// * Frame construction overhead: 1 (return instruction) 7687/// 7688/// \p MachineOutlinerTailCall implies that the function is being tail called. 7689/// A jump is emitted instead of a call, and the return is already present in 7690/// the outlined sequence. That is, 7691/// 7692/// I1 OUTLINED_FUNCTION: 7693/// I2 --> jmp OUTLINED_FUNCTION I1 7694/// ret I2 7695/// ret 7696/// 7697/// * Call construction overhead: 1 (jump instruction) 7698/// * Frame construction overhead: 0 (don't need to return) 7699/// 7700enum MachineOutlinerClass { 7701 MachineOutlinerDefault, 7702 MachineOutlinerTailCall 7703}; 7704 7705outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo( 7706 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 7707 unsigned SequenceSize = 7708 std::accumulate(RepeatedSequenceLocs[0].front(), 7709 std::next(RepeatedSequenceLocs[0].back()), 0, 7710 [](unsigned Sum, const MachineInstr &MI) { 7711 // FIXME: x86 doesn't implement getInstSizeInBytes, so 7712 // we can't tell the cost. Just assume each instruction 7713 // is one byte. 7714 if (MI.isDebugInstr() || MI.isKill()) 7715 return Sum; 7716 return Sum + 1; 7717 }); 7718 7719 // FIXME: Use real size in bytes for call and ret instructions. 7720 if (RepeatedSequenceLocs[0].back()->isTerminator()) { 7721 for (outliner::Candidate &C : RepeatedSequenceLocs) 7722 C.setCallInfo(MachineOutlinerTailCall, 1); 7723 7724 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 7725 0, // Number of bytes to emit frame. 7726 MachineOutlinerTailCall // Type of frame. 7727 ); 7728 } 7729 7730 for (outliner::Candidate &C : RepeatedSequenceLocs) 7731 C.setCallInfo(MachineOutlinerDefault, 1); 7732 7733 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1, 7734 MachineOutlinerDefault); 7735} 7736 7737bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, 7738 bool OutlineFromLinkOnceODRs) const { 7739 const Function &F = MF.getFunction(); 7740 7741 // Does the function use a red zone? If it does, then we can't risk messing 7742 // with the stack. 7743 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) { 7744 // It could have a red zone. If it does, then we don't want to touch it. 7745 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 7746 if (!X86FI || X86FI->getUsesRedZone()) 7747 return false; 7748 } 7749 7750 // If we *don't* want to outline from things that could potentially be deduped 7751 // then return false. 7752 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 7753 return false; 7754 7755 // This function is viable for outlining, so return true. 7756 return true; 7757} 7758 7759outliner::InstrType 7760X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { 7761 MachineInstr &MI = *MIT; 7762 // Don't allow debug values to impact outlining type. 7763 if (MI.isDebugInstr() || MI.isIndirectDebugValue()) 7764 return outliner::InstrType::Invisible; 7765 7766 // At this point, KILL instructions don't really tell us much so we can go 7767 // ahead and skip over them. 7768 if (MI.isKill()) 7769 return outliner::InstrType::Invisible; 7770 7771 // Is this a tail call? If yes, we can outline as a tail call. 7772 if (isTailCall(MI)) 7773 return outliner::InstrType::Legal; 7774 7775 // Is this the terminator of a basic block? 7776 if (MI.isTerminator() || MI.isReturn()) { 7777 7778 // Does its parent have any successors in its MachineFunction? 7779 if (MI.getParent()->succ_empty()) 7780 return outliner::InstrType::Legal; 7781 7782 // It does, so we can't tail call it. 7783 return outliner::InstrType::Illegal; 7784 } 7785 7786 // Don't outline anything that modifies or reads from the stack pointer. 7787 // 7788 // FIXME: There are instructions which are being manually built without 7789 // explicit uses/defs so we also have to check the MCInstrDesc. We should be 7790 // able to remove the extra checks once those are fixed up. For example, 7791 // sometimes we might get something like %rax = POP64r 1. This won't be 7792 // caught by modifiesRegister or readsRegister even though the instruction 7793 // really ought to be formed so that modifiesRegister/readsRegister would 7794 // catch it. 7795 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) || 7796 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) || 7797 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP)) 7798 return outliner::InstrType::Illegal; 7799 7800 // Outlined calls change the instruction pointer, so don't read from it. 7801 if (MI.readsRegister(X86::RIP, &RI) || 7802 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) || 7803 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP)) 7804 return outliner::InstrType::Illegal; 7805 7806 // Positions can't safely be outlined. 7807 if (MI.isPosition()) 7808 return outliner::InstrType::Illegal; 7809 7810 // Make sure none of the operands of this instruction do anything tricky. 7811 for (const MachineOperand &MOP : MI.operands()) 7812 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || 7813 MOP.isTargetIndex()) 7814 return outliner::InstrType::Illegal; 7815 7816 return outliner::InstrType::Legal; 7817} 7818 7819void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB, 7820 MachineFunction &MF, 7821 const outliner::OutlinedFunction &OF) 7822 const { 7823 // If we're a tail call, we already have a return, so don't do anything. 7824 if (OF.FrameConstructionID == MachineOutlinerTailCall) 7825 return; 7826 7827 // We're a normal call, so our sequence doesn't have a return instruction. 7828 // Add it in. 7829 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ)); 7830 MBB.insert(MBB.end(), retq); 7831} 7832 7833MachineBasicBlock::iterator 7834X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 7835 MachineBasicBlock::iterator &It, 7836 MachineFunction &MF, 7837 const outliner::Candidate &C) const { 7838 // Is it a tail call? 7839 if (C.CallConstructionID == MachineOutlinerTailCall) { 7840 // Yes, just insert a JMP. 7841 It = MBB.insert(It, 7842 BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64)) 7843 .addGlobalAddress(M.getNamedValue(MF.getName()))); 7844 } else { 7845 // No, insert a call. 7846 It = MBB.insert(It, 7847 BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32)) 7848 .addGlobalAddress(M.getNamedValue(MF.getName()))); 7849 } 7850 7851 return It; 7852} 7853 7854#define GET_INSTRINFO_HELPERS 7855#include "X86GenInstrInfo.inc" 7856