X86InstrInfo.cpp revision 296417
1//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/ADT/STLExtras.h" 21#include "llvm/CodeGen/LiveVariables.h" 22#include "llvm/CodeGen/MachineConstantPool.h" 23#include "llvm/CodeGen/MachineDominators.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/StackMaps.h" 28#include "llvm/IR/DerivedTypes.h" 29#include "llvm/IR/Function.h" 30#include "llvm/IR/LLVMContext.h" 31#include "llvm/MC/MCAsmInfo.h" 32#include "llvm/MC/MCExpr.h" 33#include "llvm/MC/MCInst.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/Target/TargetOptions.h" 39#include <limits> 40 41using namespace llvm; 42 43#define DEBUG_TYPE "x86-instr-info" 44 45#define GET_INSTRINFO_CTOR_DTOR 46#include "X86GenInstrInfo.inc" 47 48static cl::opt<bool> 49NoFusing("disable-spill-fusing", 50 cl::desc("Disable fusing of spill code into instructions")); 51static cl::opt<bool> 52PrintFailedFusing("print-failed-fuse-candidates", 53 cl::desc("Print instructions that the allocator wants to" 54 " fuse, but the X86 backend currently can't"), 55 cl::Hidden); 56static cl::opt<bool> 57ReMatPICStubLoad("remat-pic-stub-load", 58 cl::desc("Re-materialize load from stub in PIC mode"), 59 cl::init(false), cl::Hidden); 60 61enum { 62 // Select which memory operand is being unfolded. 63 // (stored in bits 0 - 3) 64 TB_INDEX_0 = 0, 65 TB_INDEX_1 = 1, 66 TB_INDEX_2 = 2, 67 TB_INDEX_3 = 3, 68 TB_INDEX_4 = 4, 69 TB_INDEX_MASK = 0xf, 70 71 // Do not insert the reverse map (MemOp -> RegOp) into the table. 72 // This may be needed because there is a many -> one mapping. 73 TB_NO_REVERSE = 1 << 4, 74 75 // Do not insert the forward map (RegOp -> MemOp) into the table. 76 // This is needed for Native Client, which prohibits branch 77 // instructions from using a memory operand. 78 TB_NO_FORWARD = 1 << 5, 79 80 TB_FOLDED_LOAD = 1 << 6, 81 TB_FOLDED_STORE = 1 << 7, 82 83 // Minimum alignment required for load/store. 84 // Used for RegOp->MemOp conversion. 85 // (stored in bits 8 - 15) 86 TB_ALIGN_SHIFT = 8, 87 TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT, 88 TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT, 89 TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT, 90 TB_ALIGN_64 = 64 << TB_ALIGN_SHIFT, 91 TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT 92}; 93 94struct X86MemoryFoldTableEntry { 95 uint16_t RegOp; 96 uint16_t MemOp; 97 uint16_t Flags; 98}; 99 100// Pin the vtable to this file. 101void X86InstrInfo::anchor() {} 102 103X86InstrInfo::X86InstrInfo(X86Subtarget &STI) 104 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 105 : X86::ADJCALLSTACKDOWN32), 106 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 107 : X86::ADJCALLSTACKUP32), 108 X86::CATCHRET), 109 Subtarget(STI), RI(STI.getTargetTriple()) { 110 111 static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = { 112 { X86::ADC32ri, X86::ADC32mi, 0 }, 113 { X86::ADC32ri8, X86::ADC32mi8, 0 }, 114 { X86::ADC32rr, X86::ADC32mr, 0 }, 115 { X86::ADC64ri32, X86::ADC64mi32, 0 }, 116 { X86::ADC64ri8, X86::ADC64mi8, 0 }, 117 { X86::ADC64rr, X86::ADC64mr, 0 }, 118 { X86::ADD16ri, X86::ADD16mi, 0 }, 119 { X86::ADD16ri8, X86::ADD16mi8, 0 }, 120 { X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE }, 121 { X86::ADD16ri8_DB, X86::ADD16mi8, TB_NO_REVERSE }, 122 { X86::ADD16rr, X86::ADD16mr, 0 }, 123 { X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE }, 124 { X86::ADD32ri, X86::ADD32mi, 0 }, 125 { X86::ADD32ri8, X86::ADD32mi8, 0 }, 126 { X86::ADD32ri_DB, X86::ADD32mi, TB_NO_REVERSE }, 127 { X86::ADD32ri8_DB, X86::ADD32mi8, TB_NO_REVERSE }, 128 { X86::ADD32rr, X86::ADD32mr, 0 }, 129 { X86::ADD32rr_DB, X86::ADD32mr, TB_NO_REVERSE }, 130 { X86::ADD64ri32, X86::ADD64mi32, 0 }, 131 { X86::ADD64ri8, X86::ADD64mi8, 0 }, 132 { X86::ADD64ri32_DB,X86::ADD64mi32, TB_NO_REVERSE }, 133 { X86::ADD64ri8_DB, X86::ADD64mi8, TB_NO_REVERSE }, 134 { X86::ADD64rr, X86::ADD64mr, 0 }, 135 { X86::ADD64rr_DB, X86::ADD64mr, TB_NO_REVERSE }, 136 { X86::ADD8ri, X86::ADD8mi, 0 }, 137 { X86::ADD8rr, X86::ADD8mr, 0 }, 138 { X86::AND16ri, X86::AND16mi, 0 }, 139 { X86::AND16ri8, X86::AND16mi8, 0 }, 140 { X86::AND16rr, X86::AND16mr, 0 }, 141 { X86::AND32ri, X86::AND32mi, 0 }, 142 { X86::AND32ri8, X86::AND32mi8, 0 }, 143 { X86::AND32rr, X86::AND32mr, 0 }, 144 { X86::AND64ri32, X86::AND64mi32, 0 }, 145 { X86::AND64ri8, X86::AND64mi8, 0 }, 146 { X86::AND64rr, X86::AND64mr, 0 }, 147 { X86::AND8ri, X86::AND8mi, 0 }, 148 { X86::AND8rr, X86::AND8mr, 0 }, 149 { X86::DEC16r, X86::DEC16m, 0 }, 150 { X86::DEC32r, X86::DEC32m, 0 }, 151 { X86::DEC64r, X86::DEC64m, 0 }, 152 { X86::DEC8r, X86::DEC8m, 0 }, 153 { X86::INC16r, X86::INC16m, 0 }, 154 { X86::INC32r, X86::INC32m, 0 }, 155 { X86::INC64r, X86::INC64m, 0 }, 156 { X86::INC8r, X86::INC8m, 0 }, 157 { X86::NEG16r, X86::NEG16m, 0 }, 158 { X86::NEG32r, X86::NEG32m, 0 }, 159 { X86::NEG64r, X86::NEG64m, 0 }, 160 { X86::NEG8r, X86::NEG8m, 0 }, 161 { X86::NOT16r, X86::NOT16m, 0 }, 162 { X86::NOT32r, X86::NOT32m, 0 }, 163 { X86::NOT64r, X86::NOT64m, 0 }, 164 { X86::NOT8r, X86::NOT8m, 0 }, 165 { X86::OR16ri, X86::OR16mi, 0 }, 166 { X86::OR16ri8, X86::OR16mi8, 0 }, 167 { X86::OR16rr, X86::OR16mr, 0 }, 168 { X86::OR32ri, X86::OR32mi, 0 }, 169 { X86::OR32ri8, X86::OR32mi8, 0 }, 170 { X86::OR32rr, X86::OR32mr, 0 }, 171 { X86::OR64ri32, X86::OR64mi32, 0 }, 172 { X86::OR64ri8, X86::OR64mi8, 0 }, 173 { X86::OR64rr, X86::OR64mr, 0 }, 174 { X86::OR8ri, X86::OR8mi, 0 }, 175 { X86::OR8rr, X86::OR8mr, 0 }, 176 { X86::ROL16r1, X86::ROL16m1, 0 }, 177 { X86::ROL16rCL, X86::ROL16mCL, 0 }, 178 { X86::ROL16ri, X86::ROL16mi, 0 }, 179 { X86::ROL32r1, X86::ROL32m1, 0 }, 180 { X86::ROL32rCL, X86::ROL32mCL, 0 }, 181 { X86::ROL32ri, X86::ROL32mi, 0 }, 182 { X86::ROL64r1, X86::ROL64m1, 0 }, 183 { X86::ROL64rCL, X86::ROL64mCL, 0 }, 184 { X86::ROL64ri, X86::ROL64mi, 0 }, 185 { X86::ROL8r1, X86::ROL8m1, 0 }, 186 { X86::ROL8rCL, X86::ROL8mCL, 0 }, 187 { X86::ROL8ri, X86::ROL8mi, 0 }, 188 { X86::ROR16r1, X86::ROR16m1, 0 }, 189 { X86::ROR16rCL, X86::ROR16mCL, 0 }, 190 { X86::ROR16ri, X86::ROR16mi, 0 }, 191 { X86::ROR32r1, X86::ROR32m1, 0 }, 192 { X86::ROR32rCL, X86::ROR32mCL, 0 }, 193 { X86::ROR32ri, X86::ROR32mi, 0 }, 194 { X86::ROR64r1, X86::ROR64m1, 0 }, 195 { X86::ROR64rCL, X86::ROR64mCL, 0 }, 196 { X86::ROR64ri, X86::ROR64mi, 0 }, 197 { X86::ROR8r1, X86::ROR8m1, 0 }, 198 { X86::ROR8rCL, X86::ROR8mCL, 0 }, 199 { X86::ROR8ri, X86::ROR8mi, 0 }, 200 { X86::SAR16r1, X86::SAR16m1, 0 }, 201 { X86::SAR16rCL, X86::SAR16mCL, 0 }, 202 { X86::SAR16ri, X86::SAR16mi, 0 }, 203 { X86::SAR32r1, X86::SAR32m1, 0 }, 204 { X86::SAR32rCL, X86::SAR32mCL, 0 }, 205 { X86::SAR32ri, X86::SAR32mi, 0 }, 206 { X86::SAR64r1, X86::SAR64m1, 0 }, 207 { X86::SAR64rCL, X86::SAR64mCL, 0 }, 208 { X86::SAR64ri, X86::SAR64mi, 0 }, 209 { X86::SAR8r1, X86::SAR8m1, 0 }, 210 { X86::SAR8rCL, X86::SAR8mCL, 0 }, 211 { X86::SAR8ri, X86::SAR8mi, 0 }, 212 { X86::SBB32ri, X86::SBB32mi, 0 }, 213 { X86::SBB32ri8, X86::SBB32mi8, 0 }, 214 { X86::SBB32rr, X86::SBB32mr, 0 }, 215 { X86::SBB64ri32, X86::SBB64mi32, 0 }, 216 { X86::SBB64ri8, X86::SBB64mi8, 0 }, 217 { X86::SBB64rr, X86::SBB64mr, 0 }, 218 { X86::SHL16rCL, X86::SHL16mCL, 0 }, 219 { X86::SHL16ri, X86::SHL16mi, 0 }, 220 { X86::SHL32rCL, X86::SHL32mCL, 0 }, 221 { X86::SHL32ri, X86::SHL32mi, 0 }, 222 { X86::SHL64rCL, X86::SHL64mCL, 0 }, 223 { X86::SHL64ri, X86::SHL64mi, 0 }, 224 { X86::SHL8rCL, X86::SHL8mCL, 0 }, 225 { X86::SHL8ri, X86::SHL8mi, 0 }, 226 { X86::SHLD16rrCL, X86::SHLD16mrCL, 0 }, 227 { X86::SHLD16rri8, X86::SHLD16mri8, 0 }, 228 { X86::SHLD32rrCL, X86::SHLD32mrCL, 0 }, 229 { X86::SHLD32rri8, X86::SHLD32mri8, 0 }, 230 { X86::SHLD64rrCL, X86::SHLD64mrCL, 0 }, 231 { X86::SHLD64rri8, X86::SHLD64mri8, 0 }, 232 { X86::SHR16r1, X86::SHR16m1, 0 }, 233 { X86::SHR16rCL, X86::SHR16mCL, 0 }, 234 { X86::SHR16ri, X86::SHR16mi, 0 }, 235 { X86::SHR32r1, X86::SHR32m1, 0 }, 236 { X86::SHR32rCL, X86::SHR32mCL, 0 }, 237 { X86::SHR32ri, X86::SHR32mi, 0 }, 238 { X86::SHR64r1, X86::SHR64m1, 0 }, 239 { X86::SHR64rCL, X86::SHR64mCL, 0 }, 240 { X86::SHR64ri, X86::SHR64mi, 0 }, 241 { X86::SHR8r1, X86::SHR8m1, 0 }, 242 { X86::SHR8rCL, X86::SHR8mCL, 0 }, 243 { X86::SHR8ri, X86::SHR8mi, 0 }, 244 { X86::SHRD16rrCL, X86::SHRD16mrCL, 0 }, 245 { X86::SHRD16rri8, X86::SHRD16mri8, 0 }, 246 { X86::SHRD32rrCL, X86::SHRD32mrCL, 0 }, 247 { X86::SHRD32rri8, X86::SHRD32mri8, 0 }, 248 { X86::SHRD64rrCL, X86::SHRD64mrCL, 0 }, 249 { X86::SHRD64rri8, X86::SHRD64mri8, 0 }, 250 { X86::SUB16ri, X86::SUB16mi, 0 }, 251 { X86::SUB16ri8, X86::SUB16mi8, 0 }, 252 { X86::SUB16rr, X86::SUB16mr, 0 }, 253 { X86::SUB32ri, X86::SUB32mi, 0 }, 254 { X86::SUB32ri8, X86::SUB32mi8, 0 }, 255 { X86::SUB32rr, X86::SUB32mr, 0 }, 256 { X86::SUB64ri32, X86::SUB64mi32, 0 }, 257 { X86::SUB64ri8, X86::SUB64mi8, 0 }, 258 { X86::SUB64rr, X86::SUB64mr, 0 }, 259 { X86::SUB8ri, X86::SUB8mi, 0 }, 260 { X86::SUB8rr, X86::SUB8mr, 0 }, 261 { X86::XOR16ri, X86::XOR16mi, 0 }, 262 { X86::XOR16ri8, X86::XOR16mi8, 0 }, 263 { X86::XOR16rr, X86::XOR16mr, 0 }, 264 { X86::XOR32ri, X86::XOR32mi, 0 }, 265 { X86::XOR32ri8, X86::XOR32mi8, 0 }, 266 { X86::XOR32rr, X86::XOR32mr, 0 }, 267 { X86::XOR64ri32, X86::XOR64mi32, 0 }, 268 { X86::XOR64ri8, X86::XOR64mi8, 0 }, 269 { X86::XOR64rr, X86::XOR64mr, 0 }, 270 { X86::XOR8ri, X86::XOR8mi, 0 }, 271 { X86::XOR8rr, X86::XOR8mr, 0 } 272 }; 273 274 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable2Addr) { 275 AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable, 276 Entry.RegOp, Entry.MemOp, 277 // Index 0, folded load and store, no alignment requirement. 278 Entry.Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE); 279 } 280 281 static const X86MemoryFoldTableEntry MemoryFoldTable0[] = { 282 { X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD }, 283 { X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD }, 284 { X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD }, 285 { X86::CALL32r, X86::CALL32m, TB_FOLDED_LOAD }, 286 { X86::CALL64r, X86::CALL64m, TB_FOLDED_LOAD }, 287 { X86::CMP16ri, X86::CMP16mi, TB_FOLDED_LOAD }, 288 { X86::CMP16ri8, X86::CMP16mi8, TB_FOLDED_LOAD }, 289 { X86::CMP16rr, X86::CMP16mr, TB_FOLDED_LOAD }, 290 { X86::CMP32ri, X86::CMP32mi, TB_FOLDED_LOAD }, 291 { X86::CMP32ri8, X86::CMP32mi8, TB_FOLDED_LOAD }, 292 { X86::CMP32rr, X86::CMP32mr, TB_FOLDED_LOAD }, 293 { X86::CMP64ri32, X86::CMP64mi32, TB_FOLDED_LOAD }, 294 { X86::CMP64ri8, X86::CMP64mi8, TB_FOLDED_LOAD }, 295 { X86::CMP64rr, X86::CMP64mr, TB_FOLDED_LOAD }, 296 { X86::CMP8ri, X86::CMP8mi, TB_FOLDED_LOAD }, 297 { X86::CMP8rr, X86::CMP8mr, TB_FOLDED_LOAD }, 298 { X86::DIV16r, X86::DIV16m, TB_FOLDED_LOAD }, 299 { X86::DIV32r, X86::DIV32m, TB_FOLDED_LOAD }, 300 { X86::DIV64r, X86::DIV64m, TB_FOLDED_LOAD }, 301 { X86::DIV8r, X86::DIV8m, TB_FOLDED_LOAD }, 302 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, TB_FOLDED_STORE }, 303 { X86::IDIV16r, X86::IDIV16m, TB_FOLDED_LOAD }, 304 { X86::IDIV32r, X86::IDIV32m, TB_FOLDED_LOAD }, 305 { X86::IDIV64r, X86::IDIV64m, TB_FOLDED_LOAD }, 306 { X86::IDIV8r, X86::IDIV8m, TB_FOLDED_LOAD }, 307 { X86::IMUL16r, X86::IMUL16m, TB_FOLDED_LOAD }, 308 { X86::IMUL32r, X86::IMUL32m, TB_FOLDED_LOAD }, 309 { X86::IMUL64r, X86::IMUL64m, TB_FOLDED_LOAD }, 310 { X86::IMUL8r, X86::IMUL8m, TB_FOLDED_LOAD }, 311 { X86::JMP32r, X86::JMP32m, TB_FOLDED_LOAD }, 312 { X86::JMP64r, X86::JMP64m, TB_FOLDED_LOAD }, 313 { X86::MOV16ri, X86::MOV16mi, TB_FOLDED_STORE }, 314 { X86::MOV16rr, X86::MOV16mr, TB_FOLDED_STORE }, 315 { X86::MOV32ri, X86::MOV32mi, TB_FOLDED_STORE }, 316 { X86::MOV32rr, X86::MOV32mr, TB_FOLDED_STORE }, 317 { X86::MOV64ri32, X86::MOV64mi32, TB_FOLDED_STORE }, 318 { X86::MOV64rr, X86::MOV64mr, TB_FOLDED_STORE }, 319 { X86::MOV8ri, X86::MOV8mi, TB_FOLDED_STORE }, 320 { X86::MOV8rr, X86::MOV8mr, TB_FOLDED_STORE }, 321 { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, TB_FOLDED_STORE }, 322 { X86::MOVAPDrr, X86::MOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 323 { X86::MOVAPSrr, X86::MOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 324 { X86::MOVDQArr, X86::MOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 325 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, TB_FOLDED_STORE }, 326 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, TB_FOLDED_STORE }, 327 { X86::MOVSDto64rr, X86::MOVSDto64mr, TB_FOLDED_STORE }, 328 { X86::MOVSS2DIrr, X86::MOVSS2DImr, TB_FOLDED_STORE }, 329 { X86::MOVUPDrr, X86::MOVUPDmr, TB_FOLDED_STORE }, 330 { X86::MOVUPSrr, X86::MOVUPSmr, TB_FOLDED_STORE }, 331 { X86::MUL16r, X86::MUL16m, TB_FOLDED_LOAD }, 332 { X86::MUL32r, X86::MUL32m, TB_FOLDED_LOAD }, 333 { X86::MUL64r, X86::MUL64m, TB_FOLDED_LOAD }, 334 { X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD }, 335 { X86::PEXTRDrr, X86::PEXTRDmr, TB_FOLDED_STORE }, 336 { X86::PEXTRQrr, X86::PEXTRQmr, TB_FOLDED_STORE }, 337 { X86::PUSH16r, X86::PUSH16rmm, TB_FOLDED_LOAD }, 338 { X86::PUSH32r, X86::PUSH32rmm, TB_FOLDED_LOAD }, 339 { X86::PUSH64r, X86::PUSH64rmm, TB_FOLDED_LOAD }, 340 { X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE }, 341 { X86::SETAr, X86::SETAm, TB_FOLDED_STORE }, 342 { X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE }, 343 { X86::SETBr, X86::SETBm, TB_FOLDED_STORE }, 344 { X86::SETEr, X86::SETEm, TB_FOLDED_STORE }, 345 { X86::SETGEr, X86::SETGEm, TB_FOLDED_STORE }, 346 { X86::SETGr, X86::SETGm, TB_FOLDED_STORE }, 347 { X86::SETLEr, X86::SETLEm, TB_FOLDED_STORE }, 348 { X86::SETLr, X86::SETLm, TB_FOLDED_STORE }, 349 { X86::SETNEr, X86::SETNEm, TB_FOLDED_STORE }, 350 { X86::SETNOr, X86::SETNOm, TB_FOLDED_STORE }, 351 { X86::SETNPr, X86::SETNPm, TB_FOLDED_STORE }, 352 { X86::SETNSr, X86::SETNSm, TB_FOLDED_STORE }, 353 { X86::SETOr, X86::SETOm, TB_FOLDED_STORE }, 354 { X86::SETPr, X86::SETPm, TB_FOLDED_STORE }, 355 { X86::SETSr, X86::SETSm, TB_FOLDED_STORE }, 356 { X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD }, 357 { X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD }, 358 { X86::TAILJMPr64_REX, X86::TAILJMPm64_REX, TB_FOLDED_LOAD }, 359 { X86::TEST16ri, X86::TEST16mi, TB_FOLDED_LOAD }, 360 { X86::TEST32ri, X86::TEST32mi, TB_FOLDED_LOAD }, 361 { X86::TEST64ri32, X86::TEST64mi32, TB_FOLDED_LOAD }, 362 { X86::TEST8ri, X86::TEST8mi, TB_FOLDED_LOAD }, 363 364 // AVX 128-bit versions of foldable instructions 365 { X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE }, 366 { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 367 { X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 368 { X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 369 { X86::VMOVDQArr, X86::VMOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 370 { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr, TB_FOLDED_STORE }, 371 { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE }, 372 { X86::VMOVSDto64rr,X86::VMOVSDto64mr, TB_FOLDED_STORE }, 373 { X86::VMOVSS2DIrr, X86::VMOVSS2DImr, TB_FOLDED_STORE }, 374 { X86::VMOVUPDrr, X86::VMOVUPDmr, TB_FOLDED_STORE }, 375 { X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE }, 376 { X86::VPEXTRDrr, X86::VPEXTRDmr, TB_FOLDED_STORE }, 377 { X86::VPEXTRQrr, X86::VPEXTRQmr, TB_FOLDED_STORE }, 378 379 // AVX 256-bit foldable instructions 380 { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 381 { X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 382 { X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 383 { X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 384 { X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE }, 385 { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE }, 386 387 // AVX-512 foldable instructions 388 { X86::VMOVPDI2DIZrr, X86::VMOVPDI2DIZmr, TB_FOLDED_STORE }, 389 { X86::VMOVAPDZrr, X86::VMOVAPDZmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 390 { X86::VMOVAPSZrr, X86::VMOVAPSZmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 391 { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 392 { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 393 { X86::VMOVUPDZrr, X86::VMOVUPDZmr, TB_FOLDED_STORE }, 394 { X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE }, 395 { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zmr, TB_FOLDED_STORE }, 396 { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zmr, TB_FOLDED_STORE }, 397 { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zmr, TB_FOLDED_STORE }, 398 { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE }, 399 400 // AVX-512 foldable instructions (256-bit versions) 401 { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 402 { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 403 { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 404 { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 405 { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256mr, TB_FOLDED_STORE }, 406 { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE }, 407 { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256mr, TB_FOLDED_STORE }, 408 { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256mr, TB_FOLDED_STORE }, 409 { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256mr, TB_FOLDED_STORE }, 410 { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256mr, TB_FOLDED_STORE }, 411 412 // AVX-512 foldable instructions (128-bit versions) 413 { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 414 { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 415 { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 416 { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 417 { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128mr, TB_FOLDED_STORE }, 418 { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128mr, TB_FOLDED_STORE }, 419 { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128mr, TB_FOLDED_STORE }, 420 { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128mr, TB_FOLDED_STORE }, 421 { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128mr, TB_FOLDED_STORE }, 422 { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE }, 423 424 // F16C foldable instructions 425 { X86::VCVTPS2PHrr, X86::VCVTPS2PHmr, TB_FOLDED_STORE }, 426 { X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE } 427 }; 428 429 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable0) { 430 AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable, 431 Entry.RegOp, Entry.MemOp, TB_INDEX_0 | Entry.Flags); 432 } 433 434 static const X86MemoryFoldTableEntry MemoryFoldTable1[] = { 435 { X86::BSF16rr, X86::BSF16rm, 0 }, 436 { X86::BSF32rr, X86::BSF32rm, 0 }, 437 { X86::BSF64rr, X86::BSF64rm, 0 }, 438 { X86::BSR16rr, X86::BSR16rm, 0 }, 439 { X86::BSR32rr, X86::BSR32rm, 0 }, 440 { X86::BSR64rr, X86::BSR64rm, 0 }, 441 { X86::CMP16rr, X86::CMP16rm, 0 }, 442 { X86::CMP32rr, X86::CMP32rm, 0 }, 443 { X86::CMP64rr, X86::CMP64rm, 0 }, 444 { X86::CMP8rr, X86::CMP8rm, 0 }, 445 { X86::CVTSD2SSrr, X86::CVTSD2SSrm, 0 }, 446 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm, 0 }, 447 { X86::CVTSI2SDrr, X86::CVTSI2SDrm, 0 }, 448 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm, 0 }, 449 { X86::CVTSI2SSrr, X86::CVTSI2SSrm, 0 }, 450 { X86::CVTSS2SDrr, X86::CVTSS2SDrm, 0 }, 451 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm, 0 }, 452 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 }, 453 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 }, 454 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 }, 455 { X86::IMUL16rri, X86::IMUL16rmi, 0 }, 456 { X86::IMUL16rri8, X86::IMUL16rmi8, 0 }, 457 { X86::IMUL32rri, X86::IMUL32rmi, 0 }, 458 { X86::IMUL32rri8, X86::IMUL32rmi8, 0 }, 459 { X86::IMUL64rri32, X86::IMUL64rmi32, 0 }, 460 { X86::IMUL64rri8, X86::IMUL64rmi8, 0 }, 461 { X86::Int_COMISDrr, X86::Int_COMISDrm, 0 }, 462 { X86::Int_COMISSrr, X86::Int_COMISSrm, 0 }, 463 { X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 }, 464 { X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 }, 465 { X86::CVTSS2SI64rr, X86::CVTSS2SI64rm, 0 }, 466 { X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 }, 467 { X86::CVTDQ2PDrr, X86::CVTDQ2PDrm, TB_ALIGN_16 }, 468 { X86::CVTDQ2PSrr, X86::CVTDQ2PSrm, TB_ALIGN_16 }, 469 { X86::CVTPD2DQrr, X86::CVTPD2DQrm, TB_ALIGN_16 }, 470 { X86::CVTPD2PSrr, X86::CVTPD2PSrm, TB_ALIGN_16 }, 471 { X86::CVTPS2DQrr, X86::CVTPS2DQrm, TB_ALIGN_16 }, 472 { X86::CVTPS2PDrr, X86::CVTPS2PDrm, TB_ALIGN_16 }, 473 { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 }, 474 { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 }, 475 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 }, 476 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 }, 477 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 }, 478 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm, 0 }, 479 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm, 0 }, 480 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 }, 481 { X86::MOV16rr, X86::MOV16rm, 0 }, 482 { X86::MOV32rr, X86::MOV32rm, 0 }, 483 { X86::MOV64rr, X86::MOV64rm, 0 }, 484 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 }, 485 { X86::MOV64toSDrr, X86::MOV64toSDrm, 0 }, 486 { X86::MOV8rr, X86::MOV8rm, 0 }, 487 { X86::MOVAPDrr, X86::MOVAPDrm, TB_ALIGN_16 }, 488 { X86::MOVAPSrr, X86::MOVAPSrm, TB_ALIGN_16 }, 489 { X86::MOVDDUPrr, X86::MOVDDUPrm, 0 }, 490 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 }, 491 { X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 }, 492 { X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 }, 493 { X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 }, 494 { X86::MOVSLDUPrr, X86::MOVSLDUPrm, TB_ALIGN_16 }, 495 { X86::MOVSX16rr8, X86::MOVSX16rm8, 0 }, 496 { X86::MOVSX32rr16, X86::MOVSX32rm16, 0 }, 497 { X86::MOVSX32rr8, X86::MOVSX32rm8, 0 }, 498 { X86::MOVSX64rr16, X86::MOVSX64rm16, 0 }, 499 { X86::MOVSX64rr32, X86::MOVSX64rm32, 0 }, 500 { X86::MOVSX64rr8, X86::MOVSX64rm8, 0 }, 501 { X86::MOVUPDrr, X86::MOVUPDrm, TB_ALIGN_16 }, 502 { X86::MOVUPSrr, X86::MOVUPSrm, 0 }, 503 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, TB_ALIGN_16 }, 504 { X86::MOVZX16rr8, X86::MOVZX16rm8, 0 }, 505 { X86::MOVZX32rr16, X86::MOVZX32rm16, 0 }, 506 { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 }, 507 { X86::MOVZX32rr8, X86::MOVZX32rm8, 0 }, 508 { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 }, 509 { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 }, 510 { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 }, 511 { X86::PCMPESTRIrr, X86::PCMPESTRIrm, TB_ALIGN_16 }, 512 { X86::PCMPESTRM128rr, X86::PCMPESTRM128rm, TB_ALIGN_16 }, 513 { X86::PCMPISTRIrr, X86::PCMPISTRIrm, TB_ALIGN_16 }, 514 { X86::PCMPISTRM128rr, X86::PCMPISTRM128rm, TB_ALIGN_16 }, 515 { X86::PHMINPOSUWrr128, X86::PHMINPOSUWrm128, TB_ALIGN_16 }, 516 { X86::PMOVSXBDrr, X86::PMOVSXBDrm, TB_ALIGN_16 }, 517 { X86::PMOVSXBQrr, X86::PMOVSXBQrm, TB_ALIGN_16 }, 518 { X86::PMOVSXBWrr, X86::PMOVSXBWrm, TB_ALIGN_16 }, 519 { X86::PMOVSXDQrr, X86::PMOVSXDQrm, TB_ALIGN_16 }, 520 { X86::PMOVSXWDrr, X86::PMOVSXWDrm, TB_ALIGN_16 }, 521 { X86::PMOVSXWQrr, X86::PMOVSXWQrm, TB_ALIGN_16 }, 522 { X86::PMOVZXBDrr, X86::PMOVZXBDrm, TB_ALIGN_16 }, 523 { X86::PMOVZXBQrr, X86::PMOVZXBQrm, TB_ALIGN_16 }, 524 { X86::PMOVZXBWrr, X86::PMOVZXBWrm, TB_ALIGN_16 }, 525 { X86::PMOVZXDQrr, X86::PMOVZXDQrm, TB_ALIGN_16 }, 526 { X86::PMOVZXWDrr, X86::PMOVZXWDrm, TB_ALIGN_16 }, 527 { X86::PMOVZXWQrr, X86::PMOVZXWQrm, TB_ALIGN_16 }, 528 { X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 }, 529 { X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 }, 530 { X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 }, 531 { X86::PTESTrr, X86::PTESTrm, TB_ALIGN_16 }, 532 { X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 }, 533 { X86::RCPSSr, X86::RCPSSm, 0 }, 534 { X86::RCPSSr_Int, X86::RCPSSm_Int, 0 }, 535 { X86::ROUNDPDr, X86::ROUNDPDm, TB_ALIGN_16 }, 536 { X86::ROUNDPSr, X86::ROUNDPSm, TB_ALIGN_16 }, 537 { X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 }, 538 { X86::RSQRTSSr, X86::RSQRTSSm, 0 }, 539 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int, 0 }, 540 { X86::SQRTPDr, X86::SQRTPDm, TB_ALIGN_16 }, 541 { X86::SQRTPSr, X86::SQRTPSm, TB_ALIGN_16 }, 542 { X86::SQRTSDr, X86::SQRTSDm, 0 }, 543 { X86::SQRTSDr_Int, X86::SQRTSDm_Int, 0 }, 544 { X86::SQRTSSr, X86::SQRTSSm, 0 }, 545 { X86::SQRTSSr_Int, X86::SQRTSSm_Int, 0 }, 546 { X86::TEST16rr, X86::TEST16rm, 0 }, 547 { X86::TEST32rr, X86::TEST32rm, 0 }, 548 { X86::TEST64rr, X86::TEST64rm, 0 }, 549 { X86::TEST8rr, X86::TEST8rm, 0 }, 550 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 551 { X86::UCOMISDrr, X86::UCOMISDrm, 0 }, 552 { X86::UCOMISSrr, X86::UCOMISSrm, 0 }, 553 554 // MMX version of foldable instructions 555 { X86::MMX_CVTPD2PIirr, X86::MMX_CVTPD2PIirm, 0 }, 556 { X86::MMX_CVTPI2PDirr, X86::MMX_CVTPI2PDirm, 0 }, 557 { X86::MMX_CVTPS2PIirr, X86::MMX_CVTPS2PIirm, 0 }, 558 { X86::MMX_CVTTPD2PIirr, X86::MMX_CVTTPD2PIirm, 0 }, 559 { X86::MMX_CVTTPS2PIirr, X86::MMX_CVTTPS2PIirm, 0 }, 560 { X86::MMX_MOVD64to64rr, X86::MMX_MOVQ64rm, 0 }, 561 { X86::MMX_PABSBrr64, X86::MMX_PABSBrm64, 0 }, 562 { X86::MMX_PABSDrr64, X86::MMX_PABSDrm64, 0 }, 563 { X86::MMX_PABSWrr64, X86::MMX_PABSWrm64, 0 }, 564 { X86::MMX_PSHUFWri, X86::MMX_PSHUFWmi, 0 }, 565 566 // 3DNow! version of foldable instructions 567 { X86::PF2IDrr, X86::PF2IDrm, 0 }, 568 { X86::PF2IWrr, X86::PF2IWrm, 0 }, 569 { X86::PFRCPrr, X86::PFRCPrm, 0 }, 570 { X86::PFRSQRTrr, X86::PFRSQRTrm, 0 }, 571 { X86::PI2FDrr, X86::PI2FDrm, 0 }, 572 { X86::PI2FWrr, X86::PI2FWrm, 0 }, 573 { X86::PSWAPDrr, X86::PSWAPDrm, 0 }, 574 575 // AVX 128-bit versions of foldable instructions 576 { X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 }, 577 { X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 }, 578 { X86::Int_VUCOMISDrr, X86::Int_VUCOMISDrm, 0 }, 579 { X86::Int_VUCOMISSrr, X86::Int_VUCOMISSrm, 0 }, 580 { X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0 }, 581 { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 }, 582 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0 }, 583 { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm, 0 }, 584 { X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0 }, 585 { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 }, 586 { X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0 }, 587 { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm, 0 }, 588 { X86::VCVTSD2SI64rr, X86::VCVTSD2SI64rm, 0 }, 589 { X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 }, 590 { X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 }, 591 { X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 }, 592 { X86::VCVTDQ2PDrr, X86::VCVTDQ2PDrm, 0 }, 593 { X86::VCVTDQ2PSrr, X86::VCVTDQ2PSrm, 0 }, 594 { X86::VCVTPD2DQrr, X86::VCVTPD2DQXrm, 0 }, 595 { X86::VCVTPD2PSrr, X86::VCVTPD2PSXrm, 0 }, 596 { X86::VCVTPS2DQrr, X86::VCVTPS2DQrm, 0 }, 597 { X86::VCVTPS2PDrr, X86::VCVTPS2PDrm, 0 }, 598 { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, 0 }, 599 { X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, 0 }, 600 { X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 }, 601 { X86::VMOV64toSDrr, X86::VMOV64toSDrm, 0 }, 602 { X86::VMOVAPDrr, X86::VMOVAPDrm, TB_ALIGN_16 }, 603 { X86::VMOVAPSrr, X86::VMOVAPSrm, TB_ALIGN_16 }, 604 { X86::VMOVDDUPrr, X86::VMOVDDUPrm, 0 }, 605 { X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 }, 606 { X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 }, 607 { X86::VMOVDQArr, X86::VMOVDQArm, TB_ALIGN_16 }, 608 { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, 0 }, 609 { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, 0 }, 610 { X86::VMOVUPDrr, X86::VMOVUPDrm, 0 }, 611 { X86::VMOVUPSrr, X86::VMOVUPSrm, 0 }, 612 { X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 }, 613 { X86::VPABSBrr128, X86::VPABSBrm128, 0 }, 614 { X86::VPABSDrr128, X86::VPABSDrm128, 0 }, 615 { X86::VPABSWrr128, X86::VPABSWrm128, 0 }, 616 { X86::VPCMPESTRIrr, X86::VPCMPESTRIrm, 0 }, 617 { X86::VPCMPESTRM128rr, X86::VPCMPESTRM128rm, 0 }, 618 { X86::VPCMPISTRIrr, X86::VPCMPISTRIrm, 0 }, 619 { X86::VPCMPISTRM128rr, X86::VPCMPISTRM128rm, 0 }, 620 { X86::VPHMINPOSUWrr128, X86::VPHMINPOSUWrm128, 0 }, 621 { X86::VPERMILPDri, X86::VPERMILPDmi, 0 }, 622 { X86::VPERMILPSri, X86::VPERMILPSmi, 0 }, 623 { X86::VPMOVSXBDrr, X86::VPMOVSXBDrm, 0 }, 624 { X86::VPMOVSXBQrr, X86::VPMOVSXBQrm, 0 }, 625 { X86::VPMOVSXBWrr, X86::VPMOVSXBWrm, 0 }, 626 { X86::VPMOVSXDQrr, X86::VPMOVSXDQrm, 0 }, 627 { X86::VPMOVSXWDrr, X86::VPMOVSXWDrm, 0 }, 628 { X86::VPMOVSXWQrr, X86::VPMOVSXWQrm, 0 }, 629 { X86::VPMOVZXBDrr, X86::VPMOVZXBDrm, 0 }, 630 { X86::VPMOVZXBQrr, X86::VPMOVZXBQrm, 0 }, 631 { X86::VPMOVZXBWrr, X86::VPMOVZXBWrm, 0 }, 632 { X86::VPMOVZXDQrr, X86::VPMOVZXDQrm, 0 }, 633 { X86::VPMOVZXWDrr, X86::VPMOVZXWDrm, 0 }, 634 { X86::VPMOVZXWQrr, X86::VPMOVZXWQrm, 0 }, 635 { X86::VPSHUFDri, X86::VPSHUFDmi, 0 }, 636 { X86::VPSHUFHWri, X86::VPSHUFHWmi, 0 }, 637 { X86::VPSHUFLWri, X86::VPSHUFLWmi, 0 }, 638 { X86::VPTESTrr, X86::VPTESTrm, 0 }, 639 { X86::VRCPPSr, X86::VRCPPSm, 0 }, 640 { X86::VROUNDPDr, X86::VROUNDPDm, 0 }, 641 { X86::VROUNDPSr, X86::VROUNDPSm, 0 }, 642 { X86::VRSQRTPSr, X86::VRSQRTPSm, 0 }, 643 { X86::VSQRTPDr, X86::VSQRTPDm, 0 }, 644 { X86::VSQRTPSr, X86::VSQRTPSm, 0 }, 645 { X86::VTESTPDrr, X86::VTESTPDrm, 0 }, 646 { X86::VTESTPSrr, X86::VTESTPSrm, 0 }, 647 { X86::VUCOMISDrr, X86::VUCOMISDrm, 0 }, 648 { X86::VUCOMISSrr, X86::VUCOMISSrm, 0 }, 649 650 // AVX 256-bit foldable instructions 651 { X86::VCVTDQ2PDYrr, X86::VCVTDQ2PDYrm, 0 }, 652 { X86::VCVTDQ2PSYrr, X86::VCVTDQ2PSYrm, 0 }, 653 { X86::VCVTPD2DQYrr, X86::VCVTPD2DQYrm, 0 }, 654 { X86::VCVTPD2PSYrr, X86::VCVTPD2PSYrm, 0 }, 655 { X86::VCVTPS2DQYrr, X86::VCVTPS2DQYrm, 0 }, 656 { X86::VCVTPS2PDYrr, X86::VCVTPS2PDYrm, 0 }, 657 { X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0 }, 658 { X86::VCVTTPS2DQYrr, X86::VCVTTPS2DQYrm, 0 }, 659 { X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 }, 660 { X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 }, 661 { X86::VMOVDDUPYrr, X86::VMOVDDUPYrm, 0 }, 662 { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 }, 663 { X86::VMOVSLDUPYrr, X86::VMOVSLDUPYrm, 0 }, 664 { X86::VMOVSHDUPYrr, X86::VMOVSHDUPYrm, 0 }, 665 { X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 }, 666 { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 }, 667 { X86::VPERMILPDYri, X86::VPERMILPDYmi, 0 }, 668 { X86::VPERMILPSYri, X86::VPERMILPSYmi, 0 }, 669 { X86::VPTESTYrr, X86::VPTESTYrm, 0 }, 670 { X86::VRCPPSYr, X86::VRCPPSYm, 0 }, 671 { X86::VROUNDYPDr, X86::VROUNDYPDm, 0 }, 672 { X86::VROUNDYPSr, X86::VROUNDYPSm, 0 }, 673 { X86::VRSQRTPSYr, X86::VRSQRTPSYm, 0 }, 674 { X86::VSQRTPDYr, X86::VSQRTPDYm, 0 }, 675 { X86::VSQRTPSYr, X86::VSQRTPSYm, 0 }, 676 { X86::VTESTPDYrr, X86::VTESTPDYrm, 0 }, 677 { X86::VTESTPSYrr, X86::VTESTPSYrm, 0 }, 678 679 // AVX2 foldable instructions 680 681 // VBROADCASTS{SD}rr register instructions were an AVX2 addition while the 682 // VBROADCASTS{SD}rm memory instructions were available from AVX1. 683 // TB_NO_REVERSE prevents unfolding from introducing an illegal instruction 684 // on AVX1 targets. The VPBROADCAST instructions are all AVX2 instructions 685 // so they don't need an equivalent limitation. 686 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE }, 687 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE }, 688 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE }, 689 { X86::VPABSBrr256, X86::VPABSBrm256, 0 }, 690 { X86::VPABSDrr256, X86::VPABSDrm256, 0 }, 691 { X86::VPABSWrr256, X86::VPABSWrm256, 0 }, 692 { X86::VPBROADCASTBrr, X86::VPBROADCASTBrm, 0 }, 693 { X86::VPBROADCASTBYrr, X86::VPBROADCASTBYrm, 0 }, 694 { X86::VPBROADCASTDrr, X86::VPBROADCASTDrm, 0 }, 695 { X86::VPBROADCASTDYrr, X86::VPBROADCASTDYrm, 0 }, 696 { X86::VPBROADCASTQrr, X86::VPBROADCASTQrm, 0 }, 697 { X86::VPBROADCASTQYrr, X86::VPBROADCASTQYrm, 0 }, 698 { X86::VPBROADCASTWrr, X86::VPBROADCASTWrm, 0 }, 699 { X86::VPBROADCASTWYrr, X86::VPBROADCASTWYrm, 0 }, 700 { X86::VPERMPDYri, X86::VPERMPDYmi, 0 }, 701 { X86::VPERMQYri, X86::VPERMQYmi, 0 }, 702 { X86::VPMOVSXBDYrr, X86::VPMOVSXBDYrm, 0 }, 703 { X86::VPMOVSXBQYrr, X86::VPMOVSXBQYrm, 0 }, 704 { X86::VPMOVSXBWYrr, X86::VPMOVSXBWYrm, 0 }, 705 { X86::VPMOVSXDQYrr, X86::VPMOVSXDQYrm, 0 }, 706 { X86::VPMOVSXWDYrr, X86::VPMOVSXWDYrm, 0 }, 707 { X86::VPMOVSXWQYrr, X86::VPMOVSXWQYrm, 0 }, 708 { X86::VPMOVZXBDYrr, X86::VPMOVZXBDYrm, 0 }, 709 { X86::VPMOVZXBQYrr, X86::VPMOVZXBQYrm, 0 }, 710 { X86::VPMOVZXBWYrr, X86::VPMOVZXBWYrm, 0 }, 711 { X86::VPMOVZXDQYrr, X86::VPMOVZXDQYrm, 0 }, 712 { X86::VPMOVZXWDYrr, X86::VPMOVZXWDYrm, 0 }, 713 { X86::VPMOVZXWQYrr, X86::VPMOVZXWQYrm, 0 }, 714 { X86::VPSHUFDYri, X86::VPSHUFDYmi, 0 }, 715 { X86::VPSHUFHWYri, X86::VPSHUFHWYmi, 0 }, 716 { X86::VPSHUFLWYri, X86::VPSHUFLWYmi, 0 }, 717 718 // XOP foldable instructions 719 { X86::VFRCZPDrr, X86::VFRCZPDrm, 0 }, 720 { X86::VFRCZPDrrY, X86::VFRCZPDrmY, 0 }, 721 { X86::VFRCZPSrr, X86::VFRCZPSrm, 0 }, 722 { X86::VFRCZPSrrY, X86::VFRCZPSrmY, 0 }, 723 { X86::VFRCZSDrr, X86::VFRCZSDrm, 0 }, 724 { X86::VFRCZSSrr, X86::VFRCZSSrm, 0 }, 725 { X86::VPHADDBDrr, X86::VPHADDBDrm, 0 }, 726 { X86::VPHADDBQrr, X86::VPHADDBQrm, 0 }, 727 { X86::VPHADDBWrr, X86::VPHADDBWrm, 0 }, 728 { X86::VPHADDDQrr, X86::VPHADDDQrm, 0 }, 729 { X86::VPHADDWDrr, X86::VPHADDWDrm, 0 }, 730 { X86::VPHADDWQrr, X86::VPHADDWQrm, 0 }, 731 { X86::VPHADDUBDrr, X86::VPHADDUBDrm, 0 }, 732 { X86::VPHADDUBQrr, X86::VPHADDUBQrm, 0 }, 733 { X86::VPHADDUBWrr, X86::VPHADDUBWrm, 0 }, 734 { X86::VPHADDUDQrr, X86::VPHADDUDQrm, 0 }, 735 { X86::VPHADDUWDrr, X86::VPHADDUWDrm, 0 }, 736 { X86::VPHADDUWQrr, X86::VPHADDUWQrm, 0 }, 737 { X86::VPHSUBBWrr, X86::VPHSUBBWrm, 0 }, 738 { X86::VPHSUBDQrr, X86::VPHSUBDQrm, 0 }, 739 { X86::VPHSUBWDrr, X86::VPHSUBWDrm, 0 }, 740 { X86::VPROTBri, X86::VPROTBmi, 0 }, 741 { X86::VPROTBrr, X86::VPROTBmr, 0 }, 742 { X86::VPROTDri, X86::VPROTDmi, 0 }, 743 { X86::VPROTDrr, X86::VPROTDmr, 0 }, 744 { X86::VPROTQri, X86::VPROTQmi, 0 }, 745 { X86::VPROTQrr, X86::VPROTQmr, 0 }, 746 { X86::VPROTWri, X86::VPROTWmi, 0 }, 747 { X86::VPROTWrr, X86::VPROTWmr, 0 }, 748 { X86::VPSHABrr, X86::VPSHABmr, 0 }, 749 { X86::VPSHADrr, X86::VPSHADmr, 0 }, 750 { X86::VPSHAQrr, X86::VPSHAQmr, 0 }, 751 { X86::VPSHAWrr, X86::VPSHAWmr, 0 }, 752 { X86::VPSHLBrr, X86::VPSHLBmr, 0 }, 753 { X86::VPSHLDrr, X86::VPSHLDmr, 0 }, 754 { X86::VPSHLQrr, X86::VPSHLQmr, 0 }, 755 { X86::VPSHLWrr, X86::VPSHLWmr, 0 }, 756 757 // BMI/BMI2/LZCNT/POPCNT/TBM foldable instructions 758 { X86::BEXTR32rr, X86::BEXTR32rm, 0 }, 759 { X86::BEXTR64rr, X86::BEXTR64rm, 0 }, 760 { X86::BEXTRI32ri, X86::BEXTRI32mi, 0 }, 761 { X86::BEXTRI64ri, X86::BEXTRI64mi, 0 }, 762 { X86::BLCFILL32rr, X86::BLCFILL32rm, 0 }, 763 { X86::BLCFILL64rr, X86::BLCFILL64rm, 0 }, 764 { X86::BLCI32rr, X86::BLCI32rm, 0 }, 765 { X86::BLCI64rr, X86::BLCI64rm, 0 }, 766 { X86::BLCIC32rr, X86::BLCIC32rm, 0 }, 767 { X86::BLCIC64rr, X86::BLCIC64rm, 0 }, 768 { X86::BLCMSK32rr, X86::BLCMSK32rm, 0 }, 769 { X86::BLCMSK64rr, X86::BLCMSK64rm, 0 }, 770 { X86::BLCS32rr, X86::BLCS32rm, 0 }, 771 { X86::BLCS64rr, X86::BLCS64rm, 0 }, 772 { X86::BLSFILL32rr, X86::BLSFILL32rm, 0 }, 773 { X86::BLSFILL64rr, X86::BLSFILL64rm, 0 }, 774 { X86::BLSI32rr, X86::BLSI32rm, 0 }, 775 { X86::BLSI64rr, X86::BLSI64rm, 0 }, 776 { X86::BLSIC32rr, X86::BLSIC32rm, 0 }, 777 { X86::BLSIC64rr, X86::BLSIC64rm, 0 }, 778 { X86::BLSMSK32rr, X86::BLSMSK32rm, 0 }, 779 { X86::BLSMSK64rr, X86::BLSMSK64rm, 0 }, 780 { X86::BLSR32rr, X86::BLSR32rm, 0 }, 781 { X86::BLSR64rr, X86::BLSR64rm, 0 }, 782 { X86::BZHI32rr, X86::BZHI32rm, 0 }, 783 { X86::BZHI64rr, X86::BZHI64rm, 0 }, 784 { X86::LZCNT16rr, X86::LZCNT16rm, 0 }, 785 { X86::LZCNT32rr, X86::LZCNT32rm, 0 }, 786 { X86::LZCNT64rr, X86::LZCNT64rm, 0 }, 787 { X86::POPCNT16rr, X86::POPCNT16rm, 0 }, 788 { X86::POPCNT32rr, X86::POPCNT32rm, 0 }, 789 { X86::POPCNT64rr, X86::POPCNT64rm, 0 }, 790 { X86::RORX32ri, X86::RORX32mi, 0 }, 791 { X86::RORX64ri, X86::RORX64mi, 0 }, 792 { X86::SARX32rr, X86::SARX32rm, 0 }, 793 { X86::SARX64rr, X86::SARX64rm, 0 }, 794 { X86::SHRX32rr, X86::SHRX32rm, 0 }, 795 { X86::SHRX64rr, X86::SHRX64rm, 0 }, 796 { X86::SHLX32rr, X86::SHLX32rm, 0 }, 797 { X86::SHLX64rr, X86::SHLX64rm, 0 }, 798 { X86::T1MSKC32rr, X86::T1MSKC32rm, 0 }, 799 { X86::T1MSKC64rr, X86::T1MSKC64rm, 0 }, 800 { X86::TZCNT16rr, X86::TZCNT16rm, 0 }, 801 { X86::TZCNT32rr, X86::TZCNT32rm, 0 }, 802 { X86::TZCNT64rr, X86::TZCNT64rm, 0 }, 803 { X86::TZMSK32rr, X86::TZMSK32rm, 0 }, 804 { X86::TZMSK64rr, X86::TZMSK64rm, 0 }, 805 806 // AVX-512 foldable instructions 807 { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 }, 808 { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 }, 809 { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 }, 810 { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 }, 811 { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 }, 812 { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 }, 813 { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 }, 814 { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 }, 815 { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 }, 816 { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 }, 817 { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 }, 818 { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 }, 819 { X86::VPABSDZrr, X86::VPABSDZrm, 0 }, 820 { X86::VPABSQZrr, X86::VPABSQZrm, 0 }, 821 { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE }, 822 { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE }, 823 824 // AVX-512 foldable instructions (256-bit versions) 825 { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 }, 826 { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 }, 827 { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 }, 828 { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 }, 829 { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 }, 830 { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 }, 831 { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 }, 832 { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 }, 833 { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 }, 834 { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 }, 835 { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE }, 836 { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE }, 837 838 // AVX-512 foldable instructions (256-bit versions) 839 { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 }, 840 { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 }, 841 { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 }, 842 { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 }, 843 { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 }, 844 { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 }, 845 { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 }, 846 { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 }, 847 { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 }, 848 { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 }, 849 { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE }, 850 851 // F16C foldable instructions 852 { X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 }, 853 { X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 }, 854 855 // AES foldable instructions 856 { X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 }, 857 { X86::AESKEYGENASSIST128rr, X86::AESKEYGENASSIST128rm, TB_ALIGN_16 }, 858 { X86::VAESIMCrr, X86::VAESIMCrm, 0 }, 859 { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, 0 } 860 }; 861 862 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable1) { 863 AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable, 864 Entry.RegOp, Entry.MemOp, 865 // Index 1, folded load 866 Entry.Flags | TB_INDEX_1 | TB_FOLDED_LOAD); 867 } 868 869 static const X86MemoryFoldTableEntry MemoryFoldTable2[] = { 870 { X86::ADC32rr, X86::ADC32rm, 0 }, 871 { X86::ADC64rr, X86::ADC64rm, 0 }, 872 { X86::ADD16rr, X86::ADD16rm, 0 }, 873 { X86::ADD16rr_DB, X86::ADD16rm, TB_NO_REVERSE }, 874 { X86::ADD32rr, X86::ADD32rm, 0 }, 875 { X86::ADD32rr_DB, X86::ADD32rm, TB_NO_REVERSE }, 876 { X86::ADD64rr, X86::ADD64rm, 0 }, 877 { X86::ADD64rr_DB, X86::ADD64rm, TB_NO_REVERSE }, 878 { X86::ADD8rr, X86::ADD8rm, 0 }, 879 { X86::ADDPDrr, X86::ADDPDrm, TB_ALIGN_16 }, 880 { X86::ADDPSrr, X86::ADDPSrm, TB_ALIGN_16 }, 881 { X86::ADDSDrr, X86::ADDSDrm, 0 }, 882 { X86::ADDSDrr_Int, X86::ADDSDrm_Int, 0 }, 883 { X86::ADDSSrr, X86::ADDSSrm, 0 }, 884 { X86::ADDSSrr_Int, X86::ADDSSrm_Int, 0 }, 885 { X86::ADDSUBPDrr, X86::ADDSUBPDrm, TB_ALIGN_16 }, 886 { X86::ADDSUBPSrr, X86::ADDSUBPSrm, TB_ALIGN_16 }, 887 { X86::AND16rr, X86::AND16rm, 0 }, 888 { X86::AND32rr, X86::AND32rm, 0 }, 889 { X86::AND64rr, X86::AND64rm, 0 }, 890 { X86::AND8rr, X86::AND8rm, 0 }, 891 { X86::ANDNPDrr, X86::ANDNPDrm, TB_ALIGN_16 }, 892 { X86::ANDNPSrr, X86::ANDNPSrm, TB_ALIGN_16 }, 893 { X86::ANDPDrr, X86::ANDPDrm, TB_ALIGN_16 }, 894 { X86::ANDPSrr, X86::ANDPSrm, TB_ALIGN_16 }, 895 { X86::BLENDPDrri, X86::BLENDPDrmi, TB_ALIGN_16 }, 896 { X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16 }, 897 { X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16 }, 898 { X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16 }, 899 { X86::CMOVA16rr, X86::CMOVA16rm, 0 }, 900 { X86::CMOVA32rr, X86::CMOVA32rm, 0 }, 901 { X86::CMOVA64rr, X86::CMOVA64rm, 0 }, 902 { X86::CMOVAE16rr, X86::CMOVAE16rm, 0 }, 903 { X86::CMOVAE32rr, X86::CMOVAE32rm, 0 }, 904 { X86::CMOVAE64rr, X86::CMOVAE64rm, 0 }, 905 { X86::CMOVB16rr, X86::CMOVB16rm, 0 }, 906 { X86::CMOVB32rr, X86::CMOVB32rm, 0 }, 907 { X86::CMOVB64rr, X86::CMOVB64rm, 0 }, 908 { X86::CMOVBE16rr, X86::CMOVBE16rm, 0 }, 909 { X86::CMOVBE32rr, X86::CMOVBE32rm, 0 }, 910 { X86::CMOVBE64rr, X86::CMOVBE64rm, 0 }, 911 { X86::CMOVE16rr, X86::CMOVE16rm, 0 }, 912 { X86::CMOVE32rr, X86::CMOVE32rm, 0 }, 913 { X86::CMOVE64rr, X86::CMOVE64rm, 0 }, 914 { X86::CMOVG16rr, X86::CMOVG16rm, 0 }, 915 { X86::CMOVG32rr, X86::CMOVG32rm, 0 }, 916 { X86::CMOVG64rr, X86::CMOVG64rm, 0 }, 917 { X86::CMOVGE16rr, X86::CMOVGE16rm, 0 }, 918 { X86::CMOVGE32rr, X86::CMOVGE32rm, 0 }, 919 { X86::CMOVGE64rr, X86::CMOVGE64rm, 0 }, 920 { X86::CMOVL16rr, X86::CMOVL16rm, 0 }, 921 { X86::CMOVL32rr, X86::CMOVL32rm, 0 }, 922 { X86::CMOVL64rr, X86::CMOVL64rm, 0 }, 923 { X86::CMOVLE16rr, X86::CMOVLE16rm, 0 }, 924 { X86::CMOVLE32rr, X86::CMOVLE32rm, 0 }, 925 { X86::CMOVLE64rr, X86::CMOVLE64rm, 0 }, 926 { X86::CMOVNE16rr, X86::CMOVNE16rm, 0 }, 927 { X86::CMOVNE32rr, X86::CMOVNE32rm, 0 }, 928 { X86::CMOVNE64rr, X86::CMOVNE64rm, 0 }, 929 { X86::CMOVNO16rr, X86::CMOVNO16rm, 0 }, 930 { X86::CMOVNO32rr, X86::CMOVNO32rm, 0 }, 931 { X86::CMOVNO64rr, X86::CMOVNO64rm, 0 }, 932 { X86::CMOVNP16rr, X86::CMOVNP16rm, 0 }, 933 { X86::CMOVNP32rr, X86::CMOVNP32rm, 0 }, 934 { X86::CMOVNP64rr, X86::CMOVNP64rm, 0 }, 935 { X86::CMOVNS16rr, X86::CMOVNS16rm, 0 }, 936 { X86::CMOVNS32rr, X86::CMOVNS32rm, 0 }, 937 { X86::CMOVNS64rr, X86::CMOVNS64rm, 0 }, 938 { X86::CMOVO16rr, X86::CMOVO16rm, 0 }, 939 { X86::CMOVO32rr, X86::CMOVO32rm, 0 }, 940 { X86::CMOVO64rr, X86::CMOVO64rm, 0 }, 941 { X86::CMOVP16rr, X86::CMOVP16rm, 0 }, 942 { X86::CMOVP32rr, X86::CMOVP32rm, 0 }, 943 { X86::CMOVP64rr, X86::CMOVP64rm, 0 }, 944 { X86::CMOVS16rr, X86::CMOVS16rm, 0 }, 945 { X86::CMOVS32rr, X86::CMOVS32rm, 0 }, 946 { X86::CMOVS64rr, X86::CMOVS64rm, 0 }, 947 { X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16 }, 948 { X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16 }, 949 { X86::CMPSDrr, X86::CMPSDrm, 0 }, 950 { X86::CMPSSrr, X86::CMPSSrm, 0 }, 951 { X86::CRC32r32r32, X86::CRC32r32m32, 0 }, 952 { X86::CRC32r64r64, X86::CRC32r64m64, 0 }, 953 { X86::DIVPDrr, X86::DIVPDrm, TB_ALIGN_16 }, 954 { X86::DIVPSrr, X86::DIVPSrm, TB_ALIGN_16 }, 955 { X86::DIVSDrr, X86::DIVSDrm, 0 }, 956 { X86::DIVSDrr_Int, X86::DIVSDrm_Int, 0 }, 957 { X86::DIVSSrr, X86::DIVSSrm, 0 }, 958 { X86::DIVSSrr_Int, X86::DIVSSrm_Int, 0 }, 959 { X86::DPPDrri, X86::DPPDrmi, TB_ALIGN_16 }, 960 { X86::DPPSrri, X86::DPPSrmi, TB_ALIGN_16 }, 961 962 // Do not fold Fs* scalar logical op loads because there are no scalar 963 // load variants for these instructions. When folded, the load is required 964 // to be 128-bits, so the load size would not match. 965 966 { X86::FvANDNPDrr, X86::FvANDNPDrm, TB_ALIGN_16 }, 967 { X86::FvANDNPSrr, X86::FvANDNPSrm, TB_ALIGN_16 }, 968 { X86::FvANDPDrr, X86::FvANDPDrm, TB_ALIGN_16 }, 969 { X86::FvANDPSrr, X86::FvANDPSrm, TB_ALIGN_16 }, 970 { X86::FvORPDrr, X86::FvORPDrm, TB_ALIGN_16 }, 971 { X86::FvORPSrr, X86::FvORPSrm, TB_ALIGN_16 }, 972 { X86::FvXORPDrr, X86::FvXORPDrm, TB_ALIGN_16 }, 973 { X86::FvXORPSrr, X86::FvXORPSrm, TB_ALIGN_16 }, 974 { X86::HADDPDrr, X86::HADDPDrm, TB_ALIGN_16 }, 975 { X86::HADDPSrr, X86::HADDPSrm, TB_ALIGN_16 }, 976 { X86::HSUBPDrr, X86::HSUBPDrm, TB_ALIGN_16 }, 977 { X86::HSUBPSrr, X86::HSUBPSrm, TB_ALIGN_16 }, 978 { X86::IMUL16rr, X86::IMUL16rm, 0 }, 979 { X86::IMUL32rr, X86::IMUL32rm, 0 }, 980 { X86::IMUL64rr, X86::IMUL64rm, 0 }, 981 { X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 }, 982 { X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 }, 983 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 }, 984 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 }, 985 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 }, 986 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 }, 987 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 }, 988 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 }, 989 { X86::MAXPDrr, X86::MAXPDrm, TB_ALIGN_16 }, 990 { X86::MAXPSrr, X86::MAXPSrm, TB_ALIGN_16 }, 991 { X86::MAXSDrr, X86::MAXSDrm, 0 }, 992 { X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 }, 993 { X86::MAXSSrr, X86::MAXSSrm, 0 }, 994 { X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 }, 995 { X86::MINPDrr, X86::MINPDrm, TB_ALIGN_16 }, 996 { X86::MINPSrr, X86::MINPSrm, TB_ALIGN_16 }, 997 { X86::MINSDrr, X86::MINSDrm, 0 }, 998 { X86::MINSDrr_Int, X86::MINSDrm_Int, 0 }, 999 { X86::MINSSrr, X86::MINSSrm, 0 }, 1000 { X86::MINSSrr_Int, X86::MINSSrm_Int, 0 }, 1001 { X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 }, 1002 { X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 }, 1003 { X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 }, 1004 { X86::MULSDrr, X86::MULSDrm, 0 }, 1005 { X86::MULSDrr_Int, X86::MULSDrm_Int, 0 }, 1006 { X86::MULSSrr, X86::MULSSrm, 0 }, 1007 { X86::MULSSrr_Int, X86::MULSSrm_Int, 0 }, 1008 { X86::OR16rr, X86::OR16rm, 0 }, 1009 { X86::OR32rr, X86::OR32rm, 0 }, 1010 { X86::OR64rr, X86::OR64rm, 0 }, 1011 { X86::OR8rr, X86::OR8rm, 0 }, 1012 { X86::ORPDrr, X86::ORPDrm, TB_ALIGN_16 }, 1013 { X86::ORPSrr, X86::ORPSrm, TB_ALIGN_16 }, 1014 { X86::PACKSSDWrr, X86::PACKSSDWrm, TB_ALIGN_16 }, 1015 { X86::PACKSSWBrr, X86::PACKSSWBrm, TB_ALIGN_16 }, 1016 { X86::PACKUSDWrr, X86::PACKUSDWrm, TB_ALIGN_16 }, 1017 { X86::PACKUSWBrr, X86::PACKUSWBrm, TB_ALIGN_16 }, 1018 { X86::PADDBrr, X86::PADDBrm, TB_ALIGN_16 }, 1019 { X86::PADDDrr, X86::PADDDrm, TB_ALIGN_16 }, 1020 { X86::PADDQrr, X86::PADDQrm, TB_ALIGN_16 }, 1021 { X86::PADDSBrr, X86::PADDSBrm, TB_ALIGN_16 }, 1022 { X86::PADDSWrr, X86::PADDSWrm, TB_ALIGN_16 }, 1023 { X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 }, 1024 { X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 }, 1025 { X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 }, 1026 { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 }, 1027 { X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 }, 1028 { X86::PANDrr, X86::PANDrm, TB_ALIGN_16 }, 1029 { X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 }, 1030 { X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 }, 1031 { X86::PBLENDVBrr0, X86::PBLENDVBrm0, TB_ALIGN_16 }, 1032 { X86::PBLENDWrri, X86::PBLENDWrmi, TB_ALIGN_16 }, 1033 { X86::PCLMULQDQrr, X86::PCLMULQDQrm, TB_ALIGN_16 }, 1034 { X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 }, 1035 { X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 }, 1036 { X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 }, 1037 { X86::PCMPEQWrr, X86::PCMPEQWrm, TB_ALIGN_16 }, 1038 { X86::PCMPGTBrr, X86::PCMPGTBrm, TB_ALIGN_16 }, 1039 { X86::PCMPGTDrr, X86::PCMPGTDrm, TB_ALIGN_16 }, 1040 { X86::PCMPGTQrr, X86::PCMPGTQrm, TB_ALIGN_16 }, 1041 { X86::PCMPGTWrr, X86::PCMPGTWrm, TB_ALIGN_16 }, 1042 { X86::PHADDDrr, X86::PHADDDrm, TB_ALIGN_16 }, 1043 { X86::PHADDWrr, X86::PHADDWrm, TB_ALIGN_16 }, 1044 { X86::PHADDSWrr128, X86::PHADDSWrm128, TB_ALIGN_16 }, 1045 { X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16 }, 1046 { X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 }, 1047 { X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16 }, 1048 { X86::PINSRBrr, X86::PINSRBrm, 0 }, 1049 { X86::PINSRDrr, X86::PINSRDrm, 0 }, 1050 { X86::PINSRQrr, X86::PINSRQrm, 0 }, 1051 { X86::PINSRWrri, X86::PINSRWrmi, 0 }, 1052 { X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 }, 1053 { X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 }, 1054 { X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 }, 1055 { X86::PMAXUBrr, X86::PMAXUBrm, TB_ALIGN_16 }, 1056 { X86::PMINSWrr, X86::PMINSWrm, TB_ALIGN_16 }, 1057 { X86::PMINUBrr, X86::PMINUBrm, TB_ALIGN_16 }, 1058 { X86::PMINSBrr, X86::PMINSBrm, TB_ALIGN_16 }, 1059 { X86::PMINSDrr, X86::PMINSDrm, TB_ALIGN_16 }, 1060 { X86::PMINUDrr, X86::PMINUDrm, TB_ALIGN_16 }, 1061 { X86::PMINUWrr, X86::PMINUWrm, TB_ALIGN_16 }, 1062 { X86::PMAXSBrr, X86::PMAXSBrm, TB_ALIGN_16 }, 1063 { X86::PMAXSDrr, X86::PMAXSDrm, TB_ALIGN_16 }, 1064 { X86::PMAXUDrr, X86::PMAXUDrm, TB_ALIGN_16 }, 1065 { X86::PMAXUWrr, X86::PMAXUWrm, TB_ALIGN_16 }, 1066 { X86::PMULDQrr, X86::PMULDQrm, TB_ALIGN_16 }, 1067 { X86::PMULHRSWrr128, X86::PMULHRSWrm128, TB_ALIGN_16 }, 1068 { X86::PMULHUWrr, X86::PMULHUWrm, TB_ALIGN_16 }, 1069 { X86::PMULHWrr, X86::PMULHWrm, TB_ALIGN_16 }, 1070 { X86::PMULLDrr, X86::PMULLDrm, TB_ALIGN_16 }, 1071 { X86::PMULLWrr, X86::PMULLWrm, TB_ALIGN_16 }, 1072 { X86::PMULUDQrr, X86::PMULUDQrm, TB_ALIGN_16 }, 1073 { X86::PORrr, X86::PORrm, TB_ALIGN_16 }, 1074 { X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 }, 1075 { X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 }, 1076 { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 }, 1077 { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 }, 1078 { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 }, 1079 { X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 }, 1080 { X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 }, 1081 { X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 }, 1082 { X86::PSRADrr, X86::PSRADrm, TB_ALIGN_16 }, 1083 { X86::PSRAWrr, X86::PSRAWrm, TB_ALIGN_16 }, 1084 { X86::PSRLDrr, X86::PSRLDrm, TB_ALIGN_16 }, 1085 { X86::PSRLQrr, X86::PSRLQrm, TB_ALIGN_16 }, 1086 { X86::PSRLWrr, X86::PSRLWrm, TB_ALIGN_16 }, 1087 { X86::PSUBBrr, X86::PSUBBrm, TB_ALIGN_16 }, 1088 { X86::PSUBDrr, X86::PSUBDrm, TB_ALIGN_16 }, 1089 { X86::PSUBQrr, X86::PSUBQrm, TB_ALIGN_16 }, 1090 { X86::PSUBSBrr, X86::PSUBSBrm, TB_ALIGN_16 }, 1091 { X86::PSUBSWrr, X86::PSUBSWrm, TB_ALIGN_16 }, 1092 { X86::PSUBUSBrr, X86::PSUBUSBrm, TB_ALIGN_16 }, 1093 { X86::PSUBUSWrr, X86::PSUBUSWrm, TB_ALIGN_16 }, 1094 { X86::PSUBWrr, X86::PSUBWrm, TB_ALIGN_16 }, 1095 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, TB_ALIGN_16 }, 1096 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, TB_ALIGN_16 }, 1097 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm, TB_ALIGN_16 }, 1098 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm, TB_ALIGN_16 }, 1099 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm, TB_ALIGN_16 }, 1100 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm, TB_ALIGN_16 }, 1101 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, TB_ALIGN_16 }, 1102 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, TB_ALIGN_16 }, 1103 { X86::PXORrr, X86::PXORrm, TB_ALIGN_16 }, 1104 { X86::ROUNDSDr, X86::ROUNDSDm, 0 }, 1105 { X86::ROUNDSSr, X86::ROUNDSSm, 0 }, 1106 { X86::SBB32rr, X86::SBB32rm, 0 }, 1107 { X86::SBB64rr, X86::SBB64rm, 0 }, 1108 { X86::SHUFPDrri, X86::SHUFPDrmi, TB_ALIGN_16 }, 1109 { X86::SHUFPSrri, X86::SHUFPSrmi, TB_ALIGN_16 }, 1110 { X86::SUB16rr, X86::SUB16rm, 0 }, 1111 { X86::SUB32rr, X86::SUB32rm, 0 }, 1112 { X86::SUB64rr, X86::SUB64rm, 0 }, 1113 { X86::SUB8rr, X86::SUB8rm, 0 }, 1114 { X86::SUBPDrr, X86::SUBPDrm, TB_ALIGN_16 }, 1115 { X86::SUBPSrr, X86::SUBPSrm, TB_ALIGN_16 }, 1116 { X86::SUBSDrr, X86::SUBSDrm, 0 }, 1117 { X86::SUBSDrr_Int, X86::SUBSDrm_Int, 0 }, 1118 { X86::SUBSSrr, X86::SUBSSrm, 0 }, 1119 { X86::SUBSSrr_Int, X86::SUBSSrm_Int, 0 }, 1120 // FIXME: TEST*rr -> swapped operand of TEST*mr. 1121 { X86::UNPCKHPDrr, X86::UNPCKHPDrm, TB_ALIGN_16 }, 1122 { X86::UNPCKHPSrr, X86::UNPCKHPSrm, TB_ALIGN_16 }, 1123 { X86::UNPCKLPDrr, X86::UNPCKLPDrm, TB_ALIGN_16 }, 1124 { X86::UNPCKLPSrr, X86::UNPCKLPSrm, TB_ALIGN_16 }, 1125 { X86::XOR16rr, X86::XOR16rm, 0 }, 1126 { X86::XOR32rr, X86::XOR32rm, 0 }, 1127 { X86::XOR64rr, X86::XOR64rm, 0 }, 1128 { X86::XOR8rr, X86::XOR8rm, 0 }, 1129 { X86::XORPDrr, X86::XORPDrm, TB_ALIGN_16 }, 1130 { X86::XORPSrr, X86::XORPSrm, TB_ALIGN_16 }, 1131 1132 // MMX version of foldable instructions 1133 { X86::MMX_CVTPI2PSirr, X86::MMX_CVTPI2PSirm, 0 }, 1134 { X86::MMX_PACKSSDWirr, X86::MMX_PACKSSDWirm, 0 }, 1135 { X86::MMX_PACKSSWBirr, X86::MMX_PACKSSWBirm, 0 }, 1136 { X86::MMX_PACKUSWBirr, X86::MMX_PACKUSWBirm, 0 }, 1137 { X86::MMX_PADDBirr, X86::MMX_PADDBirm, 0 }, 1138 { X86::MMX_PADDDirr, X86::MMX_PADDDirm, 0 }, 1139 { X86::MMX_PADDQirr, X86::MMX_PADDQirm, 0 }, 1140 { X86::MMX_PADDSBirr, X86::MMX_PADDSBirm, 0 }, 1141 { X86::MMX_PADDSWirr, X86::MMX_PADDSWirm, 0 }, 1142 { X86::MMX_PADDUSBirr, X86::MMX_PADDUSBirm, 0 }, 1143 { X86::MMX_PADDUSWirr, X86::MMX_PADDUSWirm, 0 }, 1144 { X86::MMX_PADDWirr, X86::MMX_PADDWirm, 0 }, 1145 { X86::MMX_PALIGNR64irr, X86::MMX_PALIGNR64irm, 0 }, 1146 { X86::MMX_PANDNirr, X86::MMX_PANDNirm, 0 }, 1147 { X86::MMX_PANDirr, X86::MMX_PANDirm, 0 }, 1148 { X86::MMX_PAVGBirr, X86::MMX_PAVGBirm, 0 }, 1149 { X86::MMX_PAVGWirr, X86::MMX_PAVGWirm, 0 }, 1150 { X86::MMX_PCMPEQBirr, X86::MMX_PCMPEQBirm, 0 }, 1151 { X86::MMX_PCMPEQDirr, X86::MMX_PCMPEQDirm, 0 }, 1152 { X86::MMX_PCMPEQWirr, X86::MMX_PCMPEQWirm, 0 }, 1153 { X86::MMX_PCMPGTBirr, X86::MMX_PCMPGTBirm, 0 }, 1154 { X86::MMX_PCMPGTDirr, X86::MMX_PCMPGTDirm, 0 }, 1155 { X86::MMX_PCMPGTWirr, X86::MMX_PCMPGTWirm, 0 }, 1156 { X86::MMX_PHADDSWrr64, X86::MMX_PHADDSWrm64, 0 }, 1157 { X86::MMX_PHADDWrr64, X86::MMX_PHADDWrm64, 0 }, 1158 { X86::MMX_PHADDrr64, X86::MMX_PHADDrm64, 0 }, 1159 { X86::MMX_PHSUBDrr64, X86::MMX_PHSUBDrm64, 0 }, 1160 { X86::MMX_PHSUBSWrr64, X86::MMX_PHSUBSWrm64, 0 }, 1161 { X86::MMX_PHSUBWrr64, X86::MMX_PHSUBWrm64, 0 }, 1162 { X86::MMX_PINSRWirri, X86::MMX_PINSRWirmi, 0 }, 1163 { X86::MMX_PMADDUBSWrr64, X86::MMX_PMADDUBSWrm64, 0 }, 1164 { X86::MMX_PMADDWDirr, X86::MMX_PMADDWDirm, 0 }, 1165 { X86::MMX_PMAXSWirr, X86::MMX_PMAXSWirm, 0 }, 1166 { X86::MMX_PMAXUBirr, X86::MMX_PMAXUBirm, 0 }, 1167 { X86::MMX_PMINSWirr, X86::MMX_PMINSWirm, 0 }, 1168 { X86::MMX_PMINUBirr, X86::MMX_PMINUBirm, 0 }, 1169 { X86::MMX_PMULHRSWrr64, X86::MMX_PMULHRSWrm64, 0 }, 1170 { X86::MMX_PMULHUWirr, X86::MMX_PMULHUWirm, 0 }, 1171 { X86::MMX_PMULHWirr, X86::MMX_PMULHWirm, 0 }, 1172 { X86::MMX_PMULLWirr, X86::MMX_PMULLWirm, 0 }, 1173 { X86::MMX_PMULUDQirr, X86::MMX_PMULUDQirm, 0 }, 1174 { X86::MMX_PORirr, X86::MMX_PORirm, 0 }, 1175 { X86::MMX_PSADBWirr, X86::MMX_PSADBWirm, 0 }, 1176 { X86::MMX_PSHUFBrr64, X86::MMX_PSHUFBrm64, 0 }, 1177 { X86::MMX_PSIGNBrr64, X86::MMX_PSIGNBrm64, 0 }, 1178 { X86::MMX_PSIGNDrr64, X86::MMX_PSIGNDrm64, 0 }, 1179 { X86::MMX_PSIGNWrr64, X86::MMX_PSIGNWrm64, 0 }, 1180 { X86::MMX_PSLLDrr, X86::MMX_PSLLDrm, 0 }, 1181 { X86::MMX_PSLLQrr, X86::MMX_PSLLQrm, 0 }, 1182 { X86::MMX_PSLLWrr, X86::MMX_PSLLWrm, 0 }, 1183 { X86::MMX_PSRADrr, X86::MMX_PSRADrm, 0 }, 1184 { X86::MMX_PSRAWrr, X86::MMX_PSRAWrm, 0 }, 1185 { X86::MMX_PSRLDrr, X86::MMX_PSRLDrm, 0 }, 1186 { X86::MMX_PSRLQrr, X86::MMX_PSRLQrm, 0 }, 1187 { X86::MMX_PSRLWrr, X86::MMX_PSRLWrm, 0 }, 1188 { X86::MMX_PSUBBirr, X86::MMX_PSUBBirm, 0 }, 1189 { X86::MMX_PSUBDirr, X86::MMX_PSUBDirm, 0 }, 1190 { X86::MMX_PSUBQirr, X86::MMX_PSUBQirm, 0 }, 1191 { X86::MMX_PSUBSBirr, X86::MMX_PSUBSBirm, 0 }, 1192 { X86::MMX_PSUBSWirr, X86::MMX_PSUBSWirm, 0 }, 1193 { X86::MMX_PSUBUSBirr, X86::MMX_PSUBUSBirm, 0 }, 1194 { X86::MMX_PSUBUSWirr, X86::MMX_PSUBUSWirm, 0 }, 1195 { X86::MMX_PSUBWirr, X86::MMX_PSUBWirm, 0 }, 1196 { X86::MMX_PUNPCKHBWirr, X86::MMX_PUNPCKHBWirm, 0 }, 1197 { X86::MMX_PUNPCKHDQirr, X86::MMX_PUNPCKHDQirm, 0 }, 1198 { X86::MMX_PUNPCKHWDirr, X86::MMX_PUNPCKHWDirm, 0 }, 1199 { X86::MMX_PUNPCKLBWirr, X86::MMX_PUNPCKLBWirm, 0 }, 1200 { X86::MMX_PUNPCKLDQirr, X86::MMX_PUNPCKLDQirm, 0 }, 1201 { X86::MMX_PUNPCKLWDirr, X86::MMX_PUNPCKLWDirm, 0 }, 1202 { X86::MMX_PXORirr, X86::MMX_PXORirm, 0 }, 1203 1204 // 3DNow! version of foldable instructions 1205 { X86::PAVGUSBrr, X86::PAVGUSBrm, 0 }, 1206 { X86::PFACCrr, X86::PFACCrm, 0 }, 1207 { X86::PFADDrr, X86::PFADDrm, 0 }, 1208 { X86::PFCMPEQrr, X86::PFCMPEQrm, 0 }, 1209 { X86::PFCMPGErr, X86::PFCMPGErm, 0 }, 1210 { X86::PFCMPGTrr, X86::PFCMPGTrm, 0 }, 1211 { X86::PFMAXrr, X86::PFMAXrm, 0 }, 1212 { X86::PFMINrr, X86::PFMINrm, 0 }, 1213 { X86::PFMULrr, X86::PFMULrm, 0 }, 1214 { X86::PFNACCrr, X86::PFNACCrm, 0 }, 1215 { X86::PFPNACCrr, X86::PFPNACCrm, 0 }, 1216 { X86::PFRCPIT1rr, X86::PFRCPIT1rm, 0 }, 1217 { X86::PFRCPIT2rr, X86::PFRCPIT2rm, 0 }, 1218 { X86::PFRSQIT1rr, X86::PFRSQIT1rm, 0 }, 1219 { X86::PFSUBrr, X86::PFSUBrm, 0 }, 1220 { X86::PFSUBRrr, X86::PFSUBRrm, 0 }, 1221 { X86::PMULHRWrr, X86::PMULHRWrm, 0 }, 1222 1223 // AVX 128-bit versions of foldable instructions 1224 { X86::VCVTSD2SSrr, X86::VCVTSD2SSrm, 0 }, 1225 { X86::Int_VCVTSD2SSrr, X86::Int_VCVTSD2SSrm, 0 }, 1226 { X86::VCVTSI2SD64rr, X86::VCVTSI2SD64rm, 0 }, 1227 { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm, 0 }, 1228 { X86::VCVTSI2SDrr, X86::VCVTSI2SDrm, 0 }, 1229 { X86::Int_VCVTSI2SDrr, X86::Int_VCVTSI2SDrm, 0 }, 1230 { X86::VCVTSI2SS64rr, X86::VCVTSI2SS64rm, 0 }, 1231 { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm, 0 }, 1232 { X86::VCVTSI2SSrr, X86::VCVTSI2SSrm, 0 }, 1233 { X86::Int_VCVTSI2SSrr, X86::Int_VCVTSI2SSrm, 0 }, 1234 { X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 }, 1235 { X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 }, 1236 { X86::VRCPSSr, X86::VRCPSSm, 0 }, 1237 { X86::VRCPSSr_Int, X86::VRCPSSm_Int, 0 }, 1238 { X86::VRSQRTSSr, X86::VRSQRTSSm, 0 }, 1239 { X86::VRSQRTSSr_Int, X86::VRSQRTSSm_Int, 0 }, 1240 { X86::VSQRTSDr, X86::VSQRTSDm, 0 }, 1241 { X86::VSQRTSDr_Int, X86::VSQRTSDm_Int, 0 }, 1242 { X86::VSQRTSSr, X86::VSQRTSSm, 0 }, 1243 { X86::VSQRTSSr_Int, X86::VSQRTSSm_Int, 0 }, 1244 { X86::VADDPDrr, X86::VADDPDrm, 0 }, 1245 { X86::VADDPSrr, X86::VADDPSrm, 0 }, 1246 { X86::VADDSDrr, X86::VADDSDrm, 0 }, 1247 { X86::VADDSDrr_Int, X86::VADDSDrm_Int, 0 }, 1248 { X86::VADDSSrr, X86::VADDSSrm, 0 }, 1249 { X86::VADDSSrr_Int, X86::VADDSSrm_Int, 0 }, 1250 { X86::VADDSUBPDrr, X86::VADDSUBPDrm, 0 }, 1251 { X86::VADDSUBPSrr, X86::VADDSUBPSrm, 0 }, 1252 { X86::VANDNPDrr, X86::VANDNPDrm, 0 }, 1253 { X86::VANDNPSrr, X86::VANDNPSrm, 0 }, 1254 { X86::VANDPDrr, X86::VANDPDrm, 0 }, 1255 { X86::VANDPSrr, X86::VANDPSrm, 0 }, 1256 { X86::VBLENDPDrri, X86::VBLENDPDrmi, 0 }, 1257 { X86::VBLENDPSrri, X86::VBLENDPSrmi, 0 }, 1258 { X86::VBLENDVPDrr, X86::VBLENDVPDrm, 0 }, 1259 { X86::VBLENDVPSrr, X86::VBLENDVPSrm, 0 }, 1260 { X86::VCMPPDrri, X86::VCMPPDrmi, 0 }, 1261 { X86::VCMPPSrri, X86::VCMPPSrmi, 0 }, 1262 { X86::VCMPSDrr, X86::VCMPSDrm, 0 }, 1263 { X86::VCMPSSrr, X86::VCMPSSrm, 0 }, 1264 { X86::VDIVPDrr, X86::VDIVPDrm, 0 }, 1265 { X86::VDIVPSrr, X86::VDIVPSrm, 0 }, 1266 { X86::VDIVSDrr, X86::VDIVSDrm, 0 }, 1267 { X86::VDIVSDrr_Int, X86::VDIVSDrm_Int, 0 }, 1268 { X86::VDIVSSrr, X86::VDIVSSrm, 0 }, 1269 { X86::VDIVSSrr_Int, X86::VDIVSSrm_Int, 0 }, 1270 { X86::VDPPDrri, X86::VDPPDrmi, 0 }, 1271 { X86::VDPPSrri, X86::VDPPSrmi, 0 }, 1272 // Do not fold VFs* loads because there are no scalar load variants for 1273 // these instructions. When folded, the load is required to be 128-bits, so 1274 // the load size would not match. 1275 { X86::VFvANDNPDrr, X86::VFvANDNPDrm, 0 }, 1276 { X86::VFvANDNPSrr, X86::VFvANDNPSrm, 0 }, 1277 { X86::VFvANDPDrr, X86::VFvANDPDrm, 0 }, 1278 { X86::VFvANDPSrr, X86::VFvANDPSrm, 0 }, 1279 { X86::VFvORPDrr, X86::VFvORPDrm, 0 }, 1280 { X86::VFvORPSrr, X86::VFvORPSrm, 0 }, 1281 { X86::VFvXORPDrr, X86::VFvXORPDrm, 0 }, 1282 { X86::VFvXORPSrr, X86::VFvXORPSrm, 0 }, 1283 { X86::VHADDPDrr, X86::VHADDPDrm, 0 }, 1284 { X86::VHADDPSrr, X86::VHADDPSrm, 0 }, 1285 { X86::VHSUBPDrr, X86::VHSUBPDrm, 0 }, 1286 { X86::VHSUBPSrr, X86::VHSUBPSrm, 0 }, 1287 { X86::Int_VCMPSDrr, X86::Int_VCMPSDrm, 0 }, 1288 { X86::Int_VCMPSSrr, X86::Int_VCMPSSrm, 0 }, 1289 { X86::VMAXPDrr, X86::VMAXPDrm, 0 }, 1290 { X86::VMAXPSrr, X86::VMAXPSrm, 0 }, 1291 { X86::VMAXSDrr, X86::VMAXSDrm, 0 }, 1292 { X86::VMAXSDrr_Int, X86::VMAXSDrm_Int, 0 }, 1293 { X86::VMAXSSrr, X86::VMAXSSrm, 0 }, 1294 { X86::VMAXSSrr_Int, X86::VMAXSSrm_Int, 0 }, 1295 { X86::VMINPDrr, X86::VMINPDrm, 0 }, 1296 { X86::VMINPSrr, X86::VMINPSrm, 0 }, 1297 { X86::VMINSDrr, X86::VMINSDrm, 0 }, 1298 { X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 }, 1299 { X86::VMINSSrr, X86::VMINSSrm, 0 }, 1300 { X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 }, 1301 { X86::VMPSADBWrri, X86::VMPSADBWrmi, 0 }, 1302 { X86::VMULPDrr, X86::VMULPDrm, 0 }, 1303 { X86::VMULPSrr, X86::VMULPSrm, 0 }, 1304 { X86::VMULSDrr, X86::VMULSDrm, 0 }, 1305 { X86::VMULSDrr_Int, X86::VMULSDrm_Int, 0 }, 1306 { X86::VMULSSrr, X86::VMULSSrm, 0 }, 1307 { X86::VMULSSrr_Int, X86::VMULSSrm_Int, 0 }, 1308 { X86::VORPDrr, X86::VORPDrm, 0 }, 1309 { X86::VORPSrr, X86::VORPSrm, 0 }, 1310 { X86::VPACKSSDWrr, X86::VPACKSSDWrm, 0 }, 1311 { X86::VPACKSSWBrr, X86::VPACKSSWBrm, 0 }, 1312 { X86::VPACKUSDWrr, X86::VPACKUSDWrm, 0 }, 1313 { X86::VPACKUSWBrr, X86::VPACKUSWBrm, 0 }, 1314 { X86::VPADDBrr, X86::VPADDBrm, 0 }, 1315 { X86::VPADDDrr, X86::VPADDDrm, 0 }, 1316 { X86::VPADDQrr, X86::VPADDQrm, 0 }, 1317 { X86::VPADDSBrr, X86::VPADDSBrm, 0 }, 1318 { X86::VPADDSWrr, X86::VPADDSWrm, 0 }, 1319 { X86::VPADDUSBrr, X86::VPADDUSBrm, 0 }, 1320 { X86::VPADDUSWrr, X86::VPADDUSWrm, 0 }, 1321 { X86::VPADDWrr, X86::VPADDWrm, 0 }, 1322 { X86::VPALIGNR128rr, X86::VPALIGNR128rm, 0 }, 1323 { X86::VPANDNrr, X86::VPANDNrm, 0 }, 1324 { X86::VPANDrr, X86::VPANDrm, 0 }, 1325 { X86::VPAVGBrr, X86::VPAVGBrm, 0 }, 1326 { X86::VPAVGWrr, X86::VPAVGWrm, 0 }, 1327 { X86::VPBLENDVBrr, X86::VPBLENDVBrm, 0 }, 1328 { X86::VPBLENDWrri, X86::VPBLENDWrmi, 0 }, 1329 { X86::VPCLMULQDQrr, X86::VPCLMULQDQrm, 0 }, 1330 { X86::VPCMPEQBrr, X86::VPCMPEQBrm, 0 }, 1331 { X86::VPCMPEQDrr, X86::VPCMPEQDrm, 0 }, 1332 { X86::VPCMPEQQrr, X86::VPCMPEQQrm, 0 }, 1333 { X86::VPCMPEQWrr, X86::VPCMPEQWrm, 0 }, 1334 { X86::VPCMPGTBrr, X86::VPCMPGTBrm, 0 }, 1335 { X86::VPCMPGTDrr, X86::VPCMPGTDrm, 0 }, 1336 { X86::VPCMPGTQrr, X86::VPCMPGTQrm, 0 }, 1337 { X86::VPCMPGTWrr, X86::VPCMPGTWrm, 0 }, 1338 { X86::VPHADDDrr, X86::VPHADDDrm, 0 }, 1339 { X86::VPHADDSWrr128, X86::VPHADDSWrm128, 0 }, 1340 { X86::VPHADDWrr, X86::VPHADDWrm, 0 }, 1341 { X86::VPHSUBDrr, X86::VPHSUBDrm, 0 }, 1342 { X86::VPHSUBSWrr128, X86::VPHSUBSWrm128, 0 }, 1343 { X86::VPHSUBWrr, X86::VPHSUBWrm, 0 }, 1344 { X86::VPERMILPDrr, X86::VPERMILPDrm, 0 }, 1345 { X86::VPERMILPSrr, X86::VPERMILPSrm, 0 }, 1346 { X86::VPINSRBrr, X86::VPINSRBrm, 0 }, 1347 { X86::VPINSRDrr, X86::VPINSRDrm, 0 }, 1348 { X86::VPINSRQrr, X86::VPINSRQrm, 0 }, 1349 { X86::VPINSRWrri, X86::VPINSRWrmi, 0 }, 1350 { X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, 0 }, 1351 { X86::VPMADDWDrr, X86::VPMADDWDrm, 0 }, 1352 { X86::VPMAXSWrr, X86::VPMAXSWrm, 0 }, 1353 { X86::VPMAXUBrr, X86::VPMAXUBrm, 0 }, 1354 { X86::VPMINSWrr, X86::VPMINSWrm, 0 }, 1355 { X86::VPMINUBrr, X86::VPMINUBrm, 0 }, 1356 { X86::VPMINSBrr, X86::VPMINSBrm, 0 }, 1357 { X86::VPMINSDrr, X86::VPMINSDrm, 0 }, 1358 { X86::VPMINUDrr, X86::VPMINUDrm, 0 }, 1359 { X86::VPMINUWrr, X86::VPMINUWrm, 0 }, 1360 { X86::VPMAXSBrr, X86::VPMAXSBrm, 0 }, 1361 { X86::VPMAXSDrr, X86::VPMAXSDrm, 0 }, 1362 { X86::VPMAXUDrr, X86::VPMAXUDrm, 0 }, 1363 { X86::VPMAXUWrr, X86::VPMAXUWrm, 0 }, 1364 { X86::VPMULDQrr, X86::VPMULDQrm, 0 }, 1365 { X86::VPMULHRSWrr128, X86::VPMULHRSWrm128, 0 }, 1366 { X86::VPMULHUWrr, X86::VPMULHUWrm, 0 }, 1367 { X86::VPMULHWrr, X86::VPMULHWrm, 0 }, 1368 { X86::VPMULLDrr, X86::VPMULLDrm, 0 }, 1369 { X86::VPMULLWrr, X86::VPMULLWrm, 0 }, 1370 { X86::VPMULUDQrr, X86::VPMULUDQrm, 0 }, 1371 { X86::VPORrr, X86::VPORrm, 0 }, 1372 { X86::VPSADBWrr, X86::VPSADBWrm, 0 }, 1373 { X86::VPSHUFBrr, X86::VPSHUFBrm, 0 }, 1374 { X86::VPSIGNBrr, X86::VPSIGNBrm, 0 }, 1375 { X86::VPSIGNWrr, X86::VPSIGNWrm, 0 }, 1376 { X86::VPSIGNDrr, X86::VPSIGNDrm, 0 }, 1377 { X86::VPSLLDrr, X86::VPSLLDrm, 0 }, 1378 { X86::VPSLLQrr, X86::VPSLLQrm, 0 }, 1379 { X86::VPSLLWrr, X86::VPSLLWrm, 0 }, 1380 { X86::VPSRADrr, X86::VPSRADrm, 0 }, 1381 { X86::VPSRAWrr, X86::VPSRAWrm, 0 }, 1382 { X86::VPSRLDrr, X86::VPSRLDrm, 0 }, 1383 { X86::VPSRLQrr, X86::VPSRLQrm, 0 }, 1384 { X86::VPSRLWrr, X86::VPSRLWrm, 0 }, 1385 { X86::VPSUBBrr, X86::VPSUBBrm, 0 }, 1386 { X86::VPSUBDrr, X86::VPSUBDrm, 0 }, 1387 { X86::VPSUBQrr, X86::VPSUBQrm, 0 }, 1388 { X86::VPSUBSBrr, X86::VPSUBSBrm, 0 }, 1389 { X86::VPSUBSWrr, X86::VPSUBSWrm, 0 }, 1390 { X86::VPSUBUSBrr, X86::VPSUBUSBrm, 0 }, 1391 { X86::VPSUBUSWrr, X86::VPSUBUSWrm, 0 }, 1392 { X86::VPSUBWrr, X86::VPSUBWrm, 0 }, 1393 { X86::VPUNPCKHBWrr, X86::VPUNPCKHBWrm, 0 }, 1394 { X86::VPUNPCKHDQrr, X86::VPUNPCKHDQrm, 0 }, 1395 { X86::VPUNPCKHQDQrr, X86::VPUNPCKHQDQrm, 0 }, 1396 { X86::VPUNPCKHWDrr, X86::VPUNPCKHWDrm, 0 }, 1397 { X86::VPUNPCKLBWrr, X86::VPUNPCKLBWrm, 0 }, 1398 { X86::VPUNPCKLDQrr, X86::VPUNPCKLDQrm, 0 }, 1399 { X86::VPUNPCKLQDQrr, X86::VPUNPCKLQDQrm, 0 }, 1400 { X86::VPUNPCKLWDrr, X86::VPUNPCKLWDrm, 0 }, 1401 { X86::VPXORrr, X86::VPXORrm, 0 }, 1402 { X86::VROUNDSDr, X86::VROUNDSDm, 0 }, 1403 { X86::VROUNDSSr, X86::VROUNDSSm, 0 }, 1404 { X86::VSHUFPDrri, X86::VSHUFPDrmi, 0 }, 1405 { X86::VSHUFPSrri, X86::VSHUFPSrmi, 0 }, 1406 { X86::VSUBPDrr, X86::VSUBPDrm, 0 }, 1407 { X86::VSUBPSrr, X86::VSUBPSrm, 0 }, 1408 { X86::VSUBSDrr, X86::VSUBSDrm, 0 }, 1409 { X86::VSUBSDrr_Int, X86::VSUBSDrm_Int, 0 }, 1410 { X86::VSUBSSrr, X86::VSUBSSrm, 0 }, 1411 { X86::VSUBSSrr_Int, X86::VSUBSSrm_Int, 0 }, 1412 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrm, 0 }, 1413 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrm, 0 }, 1414 { X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, 0 }, 1415 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, 0 }, 1416 { X86::VXORPDrr, X86::VXORPDrm, 0 }, 1417 { X86::VXORPSrr, X86::VXORPSrm, 0 }, 1418 1419 // AVX 256-bit foldable instructions 1420 { X86::VADDPDYrr, X86::VADDPDYrm, 0 }, 1421 { X86::VADDPSYrr, X86::VADDPSYrm, 0 }, 1422 { X86::VADDSUBPDYrr, X86::VADDSUBPDYrm, 0 }, 1423 { X86::VADDSUBPSYrr, X86::VADDSUBPSYrm, 0 }, 1424 { X86::VANDNPDYrr, X86::VANDNPDYrm, 0 }, 1425 { X86::VANDNPSYrr, X86::VANDNPSYrm, 0 }, 1426 { X86::VANDPDYrr, X86::VANDPDYrm, 0 }, 1427 { X86::VANDPSYrr, X86::VANDPSYrm, 0 }, 1428 { X86::VBLENDPDYrri, X86::VBLENDPDYrmi, 0 }, 1429 { X86::VBLENDPSYrri, X86::VBLENDPSYrmi, 0 }, 1430 { X86::VBLENDVPDYrr, X86::VBLENDVPDYrm, 0 }, 1431 { X86::VBLENDVPSYrr, X86::VBLENDVPSYrm, 0 }, 1432 { X86::VCMPPDYrri, X86::VCMPPDYrmi, 0 }, 1433 { X86::VCMPPSYrri, X86::VCMPPSYrmi, 0 }, 1434 { X86::VDIVPDYrr, X86::VDIVPDYrm, 0 }, 1435 { X86::VDIVPSYrr, X86::VDIVPSYrm, 0 }, 1436 { X86::VDPPSYrri, X86::VDPPSYrmi, 0 }, 1437 { X86::VHADDPDYrr, X86::VHADDPDYrm, 0 }, 1438 { X86::VHADDPSYrr, X86::VHADDPSYrm, 0 }, 1439 { X86::VHSUBPDYrr, X86::VHSUBPDYrm, 0 }, 1440 { X86::VHSUBPSYrr, X86::VHSUBPSYrm, 0 }, 1441 { X86::VINSERTF128rr, X86::VINSERTF128rm, 0 }, 1442 { X86::VMAXPDYrr, X86::VMAXPDYrm, 0 }, 1443 { X86::VMAXPSYrr, X86::VMAXPSYrm, 0 }, 1444 { X86::VMINPDYrr, X86::VMINPDYrm, 0 }, 1445 { X86::VMINPSYrr, X86::VMINPSYrm, 0 }, 1446 { X86::VMULPDYrr, X86::VMULPDYrm, 0 }, 1447 { X86::VMULPSYrr, X86::VMULPSYrm, 0 }, 1448 { X86::VORPDYrr, X86::VORPDYrm, 0 }, 1449 { X86::VORPSYrr, X86::VORPSYrm, 0 }, 1450 { X86::VPERM2F128rr, X86::VPERM2F128rm, 0 }, 1451 { X86::VPERMILPDYrr, X86::VPERMILPDYrm, 0 }, 1452 { X86::VPERMILPSYrr, X86::VPERMILPSYrm, 0 }, 1453 { X86::VSHUFPDYrri, X86::VSHUFPDYrmi, 0 }, 1454 { X86::VSHUFPSYrri, X86::VSHUFPSYrmi, 0 }, 1455 { X86::VSUBPDYrr, X86::VSUBPDYrm, 0 }, 1456 { X86::VSUBPSYrr, X86::VSUBPSYrm, 0 }, 1457 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrm, 0 }, 1458 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrm, 0 }, 1459 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrm, 0 }, 1460 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrm, 0 }, 1461 { X86::VXORPDYrr, X86::VXORPDYrm, 0 }, 1462 { X86::VXORPSYrr, X86::VXORPSYrm, 0 }, 1463 1464 // AVX2 foldable instructions 1465 { X86::VINSERTI128rr, X86::VINSERTI128rm, 0 }, 1466 { X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, 0 }, 1467 { X86::VPACKSSWBYrr, X86::VPACKSSWBYrm, 0 }, 1468 { X86::VPACKUSDWYrr, X86::VPACKUSDWYrm, 0 }, 1469 { X86::VPACKUSWBYrr, X86::VPACKUSWBYrm, 0 }, 1470 { X86::VPADDBYrr, X86::VPADDBYrm, 0 }, 1471 { X86::VPADDDYrr, X86::VPADDDYrm, 0 }, 1472 { X86::VPADDQYrr, X86::VPADDQYrm, 0 }, 1473 { X86::VPADDSBYrr, X86::VPADDSBYrm, 0 }, 1474 { X86::VPADDSWYrr, X86::VPADDSWYrm, 0 }, 1475 { X86::VPADDUSBYrr, X86::VPADDUSBYrm, 0 }, 1476 { X86::VPADDUSWYrr, X86::VPADDUSWYrm, 0 }, 1477 { X86::VPADDWYrr, X86::VPADDWYrm, 0 }, 1478 { X86::VPALIGNR256rr, X86::VPALIGNR256rm, 0 }, 1479 { X86::VPANDNYrr, X86::VPANDNYrm, 0 }, 1480 { X86::VPANDYrr, X86::VPANDYrm, 0 }, 1481 { X86::VPAVGBYrr, X86::VPAVGBYrm, 0 }, 1482 { X86::VPAVGWYrr, X86::VPAVGWYrm, 0 }, 1483 { X86::VPBLENDDrri, X86::VPBLENDDrmi, 0 }, 1484 { X86::VPBLENDDYrri, X86::VPBLENDDYrmi, 0 }, 1485 { X86::VPBLENDVBYrr, X86::VPBLENDVBYrm, 0 }, 1486 { X86::VPBLENDWYrri, X86::VPBLENDWYrmi, 0 }, 1487 { X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, 0 }, 1488 { X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, 0 }, 1489 { X86::VPCMPEQQYrr, X86::VPCMPEQQYrm, 0 }, 1490 { X86::VPCMPEQWYrr, X86::VPCMPEQWYrm, 0 }, 1491 { X86::VPCMPGTBYrr, X86::VPCMPGTBYrm, 0 }, 1492 { X86::VPCMPGTDYrr, X86::VPCMPGTDYrm, 0 }, 1493 { X86::VPCMPGTQYrr, X86::VPCMPGTQYrm, 0 }, 1494 { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, 0 }, 1495 { X86::VPERM2I128rr, X86::VPERM2I128rm, 0 }, 1496 { X86::VPERMDYrr, X86::VPERMDYrm, 0 }, 1497 { X86::VPERMPSYrr, X86::VPERMPSYrm, 0 }, 1498 { X86::VPHADDDYrr, X86::VPHADDDYrm, 0 }, 1499 { X86::VPHADDSWrr256, X86::VPHADDSWrm256, 0 }, 1500 { X86::VPHADDWYrr, X86::VPHADDWYrm, 0 }, 1501 { X86::VPHSUBDYrr, X86::VPHSUBDYrm, 0 }, 1502 { X86::VPHSUBSWrr256, X86::VPHSUBSWrm256, 0 }, 1503 { X86::VPHSUBWYrr, X86::VPHSUBWYrm, 0 }, 1504 { X86::VPMADDUBSWrr256, X86::VPMADDUBSWrm256, 0 }, 1505 { X86::VPMADDWDYrr, X86::VPMADDWDYrm, 0 }, 1506 { X86::VPMAXSWYrr, X86::VPMAXSWYrm, 0 }, 1507 { X86::VPMAXUBYrr, X86::VPMAXUBYrm, 0 }, 1508 { X86::VPMINSWYrr, X86::VPMINSWYrm, 0 }, 1509 { X86::VPMINUBYrr, X86::VPMINUBYrm, 0 }, 1510 { X86::VPMINSBYrr, X86::VPMINSBYrm, 0 }, 1511 { X86::VPMINSDYrr, X86::VPMINSDYrm, 0 }, 1512 { X86::VPMINUDYrr, X86::VPMINUDYrm, 0 }, 1513 { X86::VPMINUWYrr, X86::VPMINUWYrm, 0 }, 1514 { X86::VPMAXSBYrr, X86::VPMAXSBYrm, 0 }, 1515 { X86::VPMAXSDYrr, X86::VPMAXSDYrm, 0 }, 1516 { X86::VPMAXUDYrr, X86::VPMAXUDYrm, 0 }, 1517 { X86::VPMAXUWYrr, X86::VPMAXUWYrm, 0 }, 1518 { X86::VMPSADBWYrri, X86::VMPSADBWYrmi, 0 }, 1519 { X86::VPMULDQYrr, X86::VPMULDQYrm, 0 }, 1520 { X86::VPMULHRSWrr256, X86::VPMULHRSWrm256, 0 }, 1521 { X86::VPMULHUWYrr, X86::VPMULHUWYrm, 0 }, 1522 { X86::VPMULHWYrr, X86::VPMULHWYrm, 0 }, 1523 { X86::VPMULLDYrr, X86::VPMULLDYrm, 0 }, 1524 { X86::VPMULLWYrr, X86::VPMULLWYrm, 0 }, 1525 { X86::VPMULUDQYrr, X86::VPMULUDQYrm, 0 }, 1526 { X86::VPORYrr, X86::VPORYrm, 0 }, 1527 { X86::VPSADBWYrr, X86::VPSADBWYrm, 0 }, 1528 { X86::VPSHUFBYrr, X86::VPSHUFBYrm, 0 }, 1529 { X86::VPSIGNBYrr, X86::VPSIGNBYrm, 0 }, 1530 { X86::VPSIGNWYrr, X86::VPSIGNWYrm, 0 }, 1531 { X86::VPSIGNDYrr, X86::VPSIGNDYrm, 0 }, 1532 { X86::VPSLLDYrr, X86::VPSLLDYrm, 0 }, 1533 { X86::VPSLLQYrr, X86::VPSLLQYrm, 0 }, 1534 { X86::VPSLLWYrr, X86::VPSLLWYrm, 0 }, 1535 { X86::VPSLLVDrr, X86::VPSLLVDrm, 0 }, 1536 { X86::VPSLLVDYrr, X86::VPSLLVDYrm, 0 }, 1537 { X86::VPSLLVQrr, X86::VPSLLVQrm, 0 }, 1538 { X86::VPSLLVQYrr, X86::VPSLLVQYrm, 0 }, 1539 { X86::VPSRADYrr, X86::VPSRADYrm, 0 }, 1540 { X86::VPSRAWYrr, X86::VPSRAWYrm, 0 }, 1541 { X86::VPSRAVDrr, X86::VPSRAVDrm, 0 }, 1542 { X86::VPSRAVDYrr, X86::VPSRAVDYrm, 0 }, 1543 { X86::VPSRLDYrr, X86::VPSRLDYrm, 0 }, 1544 { X86::VPSRLQYrr, X86::VPSRLQYrm, 0 }, 1545 { X86::VPSRLWYrr, X86::VPSRLWYrm, 0 }, 1546 { X86::VPSRLVDrr, X86::VPSRLVDrm, 0 }, 1547 { X86::VPSRLVDYrr, X86::VPSRLVDYrm, 0 }, 1548 { X86::VPSRLVQrr, X86::VPSRLVQrm, 0 }, 1549 { X86::VPSRLVQYrr, X86::VPSRLVQYrm, 0 }, 1550 { X86::VPSUBBYrr, X86::VPSUBBYrm, 0 }, 1551 { X86::VPSUBDYrr, X86::VPSUBDYrm, 0 }, 1552 { X86::VPSUBQYrr, X86::VPSUBQYrm, 0 }, 1553 { X86::VPSUBSBYrr, X86::VPSUBSBYrm, 0 }, 1554 { X86::VPSUBSWYrr, X86::VPSUBSWYrm, 0 }, 1555 { X86::VPSUBUSBYrr, X86::VPSUBUSBYrm, 0 }, 1556 { X86::VPSUBUSWYrr, X86::VPSUBUSWYrm, 0 }, 1557 { X86::VPSUBWYrr, X86::VPSUBWYrm, 0 }, 1558 { X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, 0 }, 1559 { X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, 0 }, 1560 { X86::VPUNPCKHQDQYrr, X86::VPUNPCKHQDQYrm, 0 }, 1561 { X86::VPUNPCKHWDYrr, X86::VPUNPCKHWDYrm, 0 }, 1562 { X86::VPUNPCKLBWYrr, X86::VPUNPCKLBWYrm, 0 }, 1563 { X86::VPUNPCKLDQYrr, X86::VPUNPCKLDQYrm, 0 }, 1564 { X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, 0 }, 1565 { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, 0 }, 1566 { X86::VPXORYrr, X86::VPXORYrm, 0 }, 1567 1568 // FMA4 foldable patterns 1569 { X86::VFMADDSS4rr, X86::VFMADDSS4mr, TB_ALIGN_NONE }, 1570 { X86::VFMADDSD4rr, X86::VFMADDSD4mr, TB_ALIGN_NONE }, 1571 { X86::VFMADDPS4rr, X86::VFMADDPS4mr, TB_ALIGN_NONE }, 1572 { X86::VFMADDPD4rr, X86::VFMADDPD4mr, TB_ALIGN_NONE }, 1573 { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, TB_ALIGN_NONE }, 1574 { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, TB_ALIGN_NONE }, 1575 { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, TB_ALIGN_NONE }, 1576 { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, TB_ALIGN_NONE }, 1577 { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, TB_ALIGN_NONE }, 1578 { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, TB_ALIGN_NONE }, 1579 { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, TB_ALIGN_NONE }, 1580 { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, TB_ALIGN_NONE }, 1581 { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, TB_ALIGN_NONE }, 1582 { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, TB_ALIGN_NONE }, 1583 { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, TB_ALIGN_NONE }, 1584 { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, TB_ALIGN_NONE }, 1585 { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, TB_ALIGN_NONE }, 1586 { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, TB_ALIGN_NONE }, 1587 { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, TB_ALIGN_NONE }, 1588 { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, TB_ALIGN_NONE }, 1589 { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, TB_ALIGN_NONE }, 1590 { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, TB_ALIGN_NONE }, 1591 { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, TB_ALIGN_NONE }, 1592 { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, TB_ALIGN_NONE }, 1593 { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, TB_ALIGN_NONE }, 1594 { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, TB_ALIGN_NONE }, 1595 { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, TB_ALIGN_NONE }, 1596 { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, TB_ALIGN_NONE }, 1597 { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, TB_ALIGN_NONE }, 1598 { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, TB_ALIGN_NONE }, 1599 { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, TB_ALIGN_NONE }, 1600 { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_NONE }, 1601 1602 // XOP foldable instructions 1603 { X86::VPCMOVrr, X86::VPCMOVmr, 0 }, 1604 { X86::VPCMOVrrY, X86::VPCMOVmrY, 0 }, 1605 { X86::VPCOMBri, X86::VPCOMBmi, 0 }, 1606 { X86::VPCOMDri, X86::VPCOMDmi, 0 }, 1607 { X86::VPCOMQri, X86::VPCOMQmi, 0 }, 1608 { X86::VPCOMWri, X86::VPCOMWmi, 0 }, 1609 { X86::VPCOMUBri, X86::VPCOMUBmi, 0 }, 1610 { X86::VPCOMUDri, X86::VPCOMUDmi, 0 }, 1611 { X86::VPCOMUQri, X86::VPCOMUQmi, 0 }, 1612 { X86::VPCOMUWri, X86::VPCOMUWmi, 0 }, 1613 { X86::VPERMIL2PDrr, X86::VPERMIL2PDmr, 0 }, 1614 { X86::VPERMIL2PDrrY, X86::VPERMIL2PDmrY, 0 }, 1615 { X86::VPERMIL2PSrr, X86::VPERMIL2PSmr, 0 }, 1616 { X86::VPERMIL2PSrrY, X86::VPERMIL2PSmrY, 0 }, 1617 { X86::VPMACSDDrr, X86::VPMACSDDrm, 0 }, 1618 { X86::VPMACSDQHrr, X86::VPMACSDQHrm, 0 }, 1619 { X86::VPMACSDQLrr, X86::VPMACSDQLrm, 0 }, 1620 { X86::VPMACSSDDrr, X86::VPMACSSDDrm, 0 }, 1621 { X86::VPMACSSDQHrr, X86::VPMACSSDQHrm, 0 }, 1622 { X86::VPMACSSDQLrr, X86::VPMACSSDQLrm, 0 }, 1623 { X86::VPMACSSWDrr, X86::VPMACSSWDrm, 0 }, 1624 { X86::VPMACSSWWrr, X86::VPMACSSWWrm, 0 }, 1625 { X86::VPMACSWDrr, X86::VPMACSWDrm, 0 }, 1626 { X86::VPMACSWWrr, X86::VPMACSWWrm, 0 }, 1627 { X86::VPMADCSSWDrr, X86::VPMADCSSWDrm, 0 }, 1628 { X86::VPMADCSWDrr, X86::VPMADCSWDrm, 0 }, 1629 { X86::VPPERMrr, X86::VPPERMmr, 0 }, 1630 { X86::VPROTBrr, X86::VPROTBrm, 0 }, 1631 { X86::VPROTDrr, X86::VPROTDrm, 0 }, 1632 { X86::VPROTQrr, X86::VPROTQrm, 0 }, 1633 { X86::VPROTWrr, X86::VPROTWrm, 0 }, 1634 { X86::VPSHABrr, X86::VPSHABrm, 0 }, 1635 { X86::VPSHADrr, X86::VPSHADrm, 0 }, 1636 { X86::VPSHAQrr, X86::VPSHAQrm, 0 }, 1637 { X86::VPSHAWrr, X86::VPSHAWrm, 0 }, 1638 { X86::VPSHLBrr, X86::VPSHLBrm, 0 }, 1639 { X86::VPSHLDrr, X86::VPSHLDrm, 0 }, 1640 { X86::VPSHLQrr, X86::VPSHLQrm, 0 }, 1641 { X86::VPSHLWrr, X86::VPSHLWrm, 0 }, 1642 1643 // BMI/BMI2 foldable instructions 1644 { X86::ANDN32rr, X86::ANDN32rm, 0 }, 1645 { X86::ANDN64rr, X86::ANDN64rm, 0 }, 1646 { X86::MULX32rr, X86::MULX32rm, 0 }, 1647 { X86::MULX64rr, X86::MULX64rm, 0 }, 1648 { X86::PDEP32rr, X86::PDEP32rm, 0 }, 1649 { X86::PDEP64rr, X86::PDEP64rm, 0 }, 1650 { X86::PEXT32rr, X86::PEXT32rm, 0 }, 1651 { X86::PEXT64rr, X86::PEXT64rm, 0 }, 1652 1653 // ADX foldable instructions 1654 { X86::ADCX32rr, X86::ADCX32rm, 0 }, 1655 { X86::ADCX64rr, X86::ADCX64rm, 0 }, 1656 { X86::ADOX32rr, X86::ADOX32rm, 0 }, 1657 { X86::ADOX64rr, X86::ADOX64rm, 0 }, 1658 1659 // AVX-512 foldable instructions 1660 { X86::VADDPSZrr, X86::VADDPSZrm, 0 }, 1661 { X86::VADDPDZrr, X86::VADDPDZrm, 0 }, 1662 { X86::VSUBPSZrr, X86::VSUBPSZrm, 0 }, 1663 { X86::VSUBPDZrr, X86::VSUBPDZrm, 0 }, 1664 { X86::VMULPSZrr, X86::VMULPSZrm, 0 }, 1665 { X86::VMULPDZrr, X86::VMULPDZrm, 0 }, 1666 { X86::VDIVPSZrr, X86::VDIVPSZrm, 0 }, 1667 { X86::VDIVPDZrr, X86::VDIVPDZrm, 0 }, 1668 { X86::VMINPSZrr, X86::VMINPSZrm, 0 }, 1669 { X86::VMINPDZrr, X86::VMINPDZrm, 0 }, 1670 { X86::VMAXPSZrr, X86::VMAXPSZrm, 0 }, 1671 { X86::VMAXPDZrr, X86::VMAXPDZrm, 0 }, 1672 { X86::VPADDDZrr, X86::VPADDDZrm, 0 }, 1673 { X86::VPADDQZrr, X86::VPADDQZrm, 0 }, 1674 { X86::VPERMPDZri, X86::VPERMPDZmi, 0 }, 1675 { X86::VPERMPSZrr, X86::VPERMPSZrm, 0 }, 1676 { X86::VPMAXSDZrr, X86::VPMAXSDZrm, 0 }, 1677 { X86::VPMAXSQZrr, X86::VPMAXSQZrm, 0 }, 1678 { X86::VPMAXUDZrr, X86::VPMAXUDZrm, 0 }, 1679 { X86::VPMAXUQZrr, X86::VPMAXUQZrm, 0 }, 1680 { X86::VPMINSDZrr, X86::VPMINSDZrm, 0 }, 1681 { X86::VPMINSQZrr, X86::VPMINSQZrm, 0 }, 1682 { X86::VPMINUDZrr, X86::VPMINUDZrm, 0 }, 1683 { X86::VPMINUQZrr, X86::VPMINUQZrm, 0 }, 1684 { X86::VPMULDQZrr, X86::VPMULDQZrm, 0 }, 1685 { X86::VPSLLVDZrr, X86::VPSLLVDZrm, 0 }, 1686 { X86::VPSLLVQZrr, X86::VPSLLVQZrm, 0 }, 1687 { X86::VPSRAVDZrr, X86::VPSRAVDZrm, 0 }, 1688 { X86::VPSRLVDZrr, X86::VPSRLVDZrm, 0 }, 1689 { X86::VPSRLVQZrr, X86::VPSRLVQZrm, 0 }, 1690 { X86::VPSUBDZrr, X86::VPSUBDZrm, 0 }, 1691 { X86::VPSUBQZrr, X86::VPSUBQZrm, 0 }, 1692 { X86::VSHUFPDZrri, X86::VSHUFPDZrmi, 0 }, 1693 { X86::VSHUFPSZrri, X86::VSHUFPSZrmi, 0 }, 1694 { X86::VALIGNQZrri, X86::VALIGNQZrmi, 0 }, 1695 { X86::VALIGNDZrri, X86::VALIGNDZrmi, 0 }, 1696 { X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 }, 1697 { X86::VBROADCASTSSZrkz, X86::VBROADCASTSSZmkz, TB_NO_REVERSE }, 1698 { X86::VBROADCASTSDZrkz, X86::VBROADCASTSDZmkz, TB_NO_REVERSE }, 1699 1700 // AVX-512{F,VL} foldable instructions 1701 { X86::VBROADCASTSSZ256rkz, X86::VBROADCASTSSZ256mkz, TB_NO_REVERSE }, 1702 { X86::VBROADCASTSDZ256rkz, X86::VBROADCASTSDZ256mkz, TB_NO_REVERSE }, 1703 { X86::VBROADCASTSSZ128rkz, X86::VBROADCASTSSZ128mkz, TB_NO_REVERSE }, 1704 1705 // AVX-512{F,VL} foldable instructions 1706 { X86::VADDPDZ128rr, X86::VADDPDZ128rm, 0 }, 1707 { X86::VADDPDZ256rr, X86::VADDPDZ256rm, 0 }, 1708 { X86::VADDPSZ128rr, X86::VADDPSZ128rm, 0 }, 1709 { X86::VADDPSZ256rr, X86::VADDPSZ256rm, 0 }, 1710 1711 // AES foldable instructions 1712 { X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 }, 1713 { X86::AESDECrr, X86::AESDECrm, TB_ALIGN_16 }, 1714 { X86::AESENCLASTrr, X86::AESENCLASTrm, TB_ALIGN_16 }, 1715 { X86::AESENCrr, X86::AESENCrm, TB_ALIGN_16 }, 1716 { X86::VAESDECLASTrr, X86::VAESDECLASTrm, 0 }, 1717 { X86::VAESDECrr, X86::VAESDECrm, 0 }, 1718 { X86::VAESENCLASTrr, X86::VAESENCLASTrm, 0 }, 1719 { X86::VAESENCrr, X86::VAESENCrm, 0 }, 1720 1721 // SHA foldable instructions 1722 { X86::SHA1MSG1rr, X86::SHA1MSG1rm, TB_ALIGN_16 }, 1723 { X86::SHA1MSG2rr, X86::SHA1MSG2rm, TB_ALIGN_16 }, 1724 { X86::SHA1NEXTErr, X86::SHA1NEXTErm, TB_ALIGN_16 }, 1725 { X86::SHA1RNDS4rri, X86::SHA1RNDS4rmi, TB_ALIGN_16 }, 1726 { X86::SHA256MSG1rr, X86::SHA256MSG1rm, TB_ALIGN_16 }, 1727 { X86::SHA256MSG2rr, X86::SHA256MSG2rm, TB_ALIGN_16 }, 1728 { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 } 1729 }; 1730 1731 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable2) { 1732 AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable, 1733 Entry.RegOp, Entry.MemOp, 1734 // Index 2, folded load 1735 Entry.Flags | TB_INDEX_2 | TB_FOLDED_LOAD); 1736 } 1737 1738 static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { 1739 // FMA foldable instructions 1740 { X86::VFMADDSSr231r, X86::VFMADDSSr231m, TB_ALIGN_NONE }, 1741 { X86::VFMADDSSr231r_Int, X86::VFMADDSSr231m_Int, TB_ALIGN_NONE }, 1742 { X86::VFMADDSDr231r, X86::VFMADDSDr231m, TB_ALIGN_NONE }, 1743 { X86::VFMADDSDr231r_Int, X86::VFMADDSDr231m_Int, TB_ALIGN_NONE }, 1744 { X86::VFMADDSSr132r, X86::VFMADDSSr132m, TB_ALIGN_NONE }, 1745 { X86::VFMADDSSr132r_Int, X86::VFMADDSSr132m_Int, TB_ALIGN_NONE }, 1746 { X86::VFMADDSDr132r, X86::VFMADDSDr132m, TB_ALIGN_NONE }, 1747 { X86::VFMADDSDr132r_Int, X86::VFMADDSDr132m_Int, TB_ALIGN_NONE }, 1748 { X86::VFMADDSSr213r, X86::VFMADDSSr213m, TB_ALIGN_NONE }, 1749 { X86::VFMADDSSr213r_Int, X86::VFMADDSSr213m_Int, TB_ALIGN_NONE }, 1750 { X86::VFMADDSDr213r, X86::VFMADDSDr213m, TB_ALIGN_NONE }, 1751 { X86::VFMADDSDr213r_Int, X86::VFMADDSDr213m_Int, TB_ALIGN_NONE }, 1752 1753 { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_NONE }, 1754 { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_NONE }, 1755 { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_NONE }, 1756 { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_NONE }, 1757 { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_NONE }, 1758 { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_NONE }, 1759 { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_NONE }, 1760 { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_NONE }, 1761 { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_NONE }, 1762 { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_NONE }, 1763 { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_NONE }, 1764 { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_NONE }, 1765 1766 { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, TB_ALIGN_NONE }, 1767 { X86::VFNMADDSSr231r_Int, X86::VFNMADDSSr231m_Int, TB_ALIGN_NONE }, 1768 { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, TB_ALIGN_NONE }, 1769 { X86::VFNMADDSDr231r_Int, X86::VFNMADDSDr231m_Int, TB_ALIGN_NONE }, 1770 { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, TB_ALIGN_NONE }, 1771 { X86::VFNMADDSSr132r_Int, X86::VFNMADDSSr132m_Int, TB_ALIGN_NONE }, 1772 { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, TB_ALIGN_NONE }, 1773 { X86::VFNMADDSDr132r_Int, X86::VFNMADDSDr132m_Int, TB_ALIGN_NONE }, 1774 { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, TB_ALIGN_NONE }, 1775 { X86::VFNMADDSSr213r_Int, X86::VFNMADDSSr213m_Int, TB_ALIGN_NONE }, 1776 { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, TB_ALIGN_NONE }, 1777 { X86::VFNMADDSDr213r_Int, X86::VFNMADDSDr213m_Int, TB_ALIGN_NONE }, 1778 1779 { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_NONE }, 1780 { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_NONE }, 1781 { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_NONE }, 1782 { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_NONE }, 1783 { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_NONE }, 1784 { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_NONE }, 1785 { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_NONE }, 1786 { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_NONE }, 1787 { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_NONE }, 1788 { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_NONE }, 1789 { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_NONE }, 1790 { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_NONE }, 1791 1792 { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, TB_ALIGN_NONE }, 1793 { X86::VFMSUBSSr231r_Int, X86::VFMSUBSSr231m_Int, TB_ALIGN_NONE }, 1794 { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, TB_ALIGN_NONE }, 1795 { X86::VFMSUBSDr231r_Int, X86::VFMSUBSDr231m_Int, TB_ALIGN_NONE }, 1796 { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, TB_ALIGN_NONE }, 1797 { X86::VFMSUBSSr132r_Int, X86::VFMSUBSSr132m_Int, TB_ALIGN_NONE }, 1798 { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, TB_ALIGN_NONE }, 1799 { X86::VFMSUBSDr132r_Int, X86::VFMSUBSDr132m_Int, TB_ALIGN_NONE }, 1800 { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, TB_ALIGN_NONE }, 1801 { X86::VFMSUBSSr213r_Int, X86::VFMSUBSSr213m_Int, TB_ALIGN_NONE }, 1802 { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, TB_ALIGN_NONE }, 1803 { X86::VFMSUBSDr213r_Int, X86::VFMSUBSDr213m_Int, TB_ALIGN_NONE }, 1804 1805 { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_NONE }, 1806 { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_NONE }, 1807 { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_NONE }, 1808 { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_NONE }, 1809 { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_NONE }, 1810 { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_NONE }, 1811 { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_NONE }, 1812 { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_NONE }, 1813 { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_NONE }, 1814 { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_NONE }, 1815 { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_NONE }, 1816 { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_NONE }, 1817 1818 { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, TB_ALIGN_NONE }, 1819 { X86::VFNMSUBSSr231r_Int, X86::VFNMSUBSSr231m_Int, TB_ALIGN_NONE }, 1820 { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, TB_ALIGN_NONE }, 1821 { X86::VFNMSUBSDr231r_Int, X86::VFNMSUBSDr231m_Int, TB_ALIGN_NONE }, 1822 { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, TB_ALIGN_NONE }, 1823 { X86::VFNMSUBSSr132r_Int, X86::VFNMSUBSSr132m_Int, TB_ALIGN_NONE }, 1824 { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, TB_ALIGN_NONE }, 1825 { X86::VFNMSUBSDr132r_Int, X86::VFNMSUBSDr132m_Int, TB_ALIGN_NONE }, 1826 { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, TB_ALIGN_NONE }, 1827 { X86::VFNMSUBSSr213r_Int, X86::VFNMSUBSSr213m_Int, TB_ALIGN_NONE }, 1828 { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, TB_ALIGN_NONE }, 1829 { X86::VFNMSUBSDr213r_Int, X86::VFNMSUBSDr213m_Int, TB_ALIGN_NONE }, 1830 1831 { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_NONE }, 1832 { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_NONE }, 1833 { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_NONE }, 1834 { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_NONE }, 1835 { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_NONE }, 1836 { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_NONE }, 1837 { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_NONE }, 1838 { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_NONE }, 1839 { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_NONE }, 1840 { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_NONE }, 1841 { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_NONE }, 1842 { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_NONE }, 1843 1844 { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_NONE }, 1845 { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_NONE }, 1846 { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_NONE }, 1847 { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_NONE }, 1848 { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_NONE }, 1849 { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_NONE }, 1850 { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_NONE }, 1851 { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_NONE }, 1852 { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_NONE }, 1853 { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_NONE }, 1854 { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_NONE }, 1855 { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_NONE }, 1856 1857 { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_NONE }, 1858 { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_NONE }, 1859 { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_NONE }, 1860 { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_NONE }, 1861 { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_NONE }, 1862 { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_NONE }, 1863 { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_NONE }, 1864 { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_NONE }, 1865 { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_NONE }, 1866 { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_NONE }, 1867 { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_NONE }, 1868 { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_NONE }, 1869 1870 // FMA4 foldable patterns 1871 { X86::VFMADDSS4rr, X86::VFMADDSS4rm, TB_ALIGN_NONE }, 1872 { X86::VFMADDSD4rr, X86::VFMADDSD4rm, TB_ALIGN_NONE }, 1873 { X86::VFMADDPS4rr, X86::VFMADDPS4rm, TB_ALIGN_NONE }, 1874 { X86::VFMADDPD4rr, X86::VFMADDPD4rm, TB_ALIGN_NONE }, 1875 { X86::VFMADDPS4rrY, X86::VFMADDPS4rmY, TB_ALIGN_NONE }, 1876 { X86::VFMADDPD4rrY, X86::VFMADDPD4rmY, TB_ALIGN_NONE }, 1877 { X86::VFNMADDSS4rr, X86::VFNMADDSS4rm, TB_ALIGN_NONE }, 1878 { X86::VFNMADDSD4rr, X86::VFNMADDSD4rm, TB_ALIGN_NONE }, 1879 { X86::VFNMADDPS4rr, X86::VFNMADDPS4rm, TB_ALIGN_NONE }, 1880 { X86::VFNMADDPD4rr, X86::VFNMADDPD4rm, TB_ALIGN_NONE }, 1881 { X86::VFNMADDPS4rrY, X86::VFNMADDPS4rmY, TB_ALIGN_NONE }, 1882 { X86::VFNMADDPD4rrY, X86::VFNMADDPD4rmY, TB_ALIGN_NONE }, 1883 { X86::VFMSUBSS4rr, X86::VFMSUBSS4rm, TB_ALIGN_NONE }, 1884 { X86::VFMSUBSD4rr, X86::VFMSUBSD4rm, TB_ALIGN_NONE }, 1885 { X86::VFMSUBPS4rr, X86::VFMSUBPS4rm, TB_ALIGN_NONE }, 1886 { X86::VFMSUBPD4rr, X86::VFMSUBPD4rm, TB_ALIGN_NONE }, 1887 { X86::VFMSUBPS4rrY, X86::VFMSUBPS4rmY, TB_ALIGN_NONE }, 1888 { X86::VFMSUBPD4rrY, X86::VFMSUBPD4rmY, TB_ALIGN_NONE }, 1889 { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4rm, TB_ALIGN_NONE }, 1890 { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4rm, TB_ALIGN_NONE }, 1891 { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4rm, TB_ALIGN_NONE }, 1892 { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4rm, TB_ALIGN_NONE }, 1893 { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4rmY, TB_ALIGN_NONE }, 1894 { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4rmY, TB_ALIGN_NONE }, 1895 { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4rm, TB_ALIGN_NONE }, 1896 { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4rm, TB_ALIGN_NONE }, 1897 { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4rmY, TB_ALIGN_NONE }, 1898 { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4rmY, TB_ALIGN_NONE }, 1899 { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4rm, TB_ALIGN_NONE }, 1900 { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_NONE }, 1901 { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_NONE }, 1902 { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_NONE }, 1903 1904 // XOP foldable instructions 1905 { X86::VPCMOVrr, X86::VPCMOVrm, 0 }, 1906 { X86::VPCMOVrrY, X86::VPCMOVrmY, 0 }, 1907 { X86::VPERMIL2PDrr, X86::VPERMIL2PDrm, 0 }, 1908 { X86::VPERMIL2PDrrY, X86::VPERMIL2PDrmY, 0 }, 1909 { X86::VPERMIL2PSrr, X86::VPERMIL2PSrm, 0 }, 1910 { X86::VPERMIL2PSrrY, X86::VPERMIL2PSrmY, 0 }, 1911 { X86::VPPERMrr, X86::VPPERMrm, 0 }, 1912 1913 // AVX-512 VPERMI instructions with 3 source operands. 1914 { X86::VPERMI2Drr, X86::VPERMI2Drm, 0 }, 1915 { X86::VPERMI2Qrr, X86::VPERMI2Qrm, 0 }, 1916 { X86::VPERMI2PSrr, X86::VPERMI2PSrm, 0 }, 1917 { X86::VPERMI2PDrr, X86::VPERMI2PDrm, 0 }, 1918 { X86::VBLENDMPDZrr, X86::VBLENDMPDZrm, 0 }, 1919 { X86::VBLENDMPSZrr, X86::VBLENDMPSZrm, 0 }, 1920 { X86::VPBLENDMDZrr, X86::VPBLENDMDZrm, 0 }, 1921 { X86::VPBLENDMQZrr, X86::VPBLENDMQZrm, 0 }, 1922 { X86::VBROADCASTSSZrk, X86::VBROADCASTSSZmk, TB_NO_REVERSE }, 1923 { X86::VBROADCASTSDZrk, X86::VBROADCASTSDZmk, TB_NO_REVERSE }, 1924 { X86::VBROADCASTSSZ256rk, X86::VBROADCASTSSZ256mk, TB_NO_REVERSE }, 1925 { X86::VBROADCASTSDZ256rk, X86::VBROADCASTSDZ256mk, TB_NO_REVERSE }, 1926 { X86::VBROADCASTSSZ128rk, X86::VBROADCASTSSZ128mk, TB_NO_REVERSE }, 1927 // AVX-512 arithmetic instructions 1928 { X86::VADDPSZrrkz, X86::VADDPSZrmkz, 0 }, 1929 { X86::VADDPDZrrkz, X86::VADDPDZrmkz, 0 }, 1930 { X86::VSUBPSZrrkz, X86::VSUBPSZrmkz, 0 }, 1931 { X86::VSUBPDZrrkz, X86::VSUBPDZrmkz, 0 }, 1932 { X86::VMULPSZrrkz, X86::VMULPSZrmkz, 0 }, 1933 { X86::VMULPDZrrkz, X86::VMULPDZrmkz, 0 }, 1934 { X86::VDIVPSZrrkz, X86::VDIVPSZrmkz, 0 }, 1935 { X86::VDIVPDZrrkz, X86::VDIVPDZrmkz, 0 }, 1936 { X86::VMINPSZrrkz, X86::VMINPSZrmkz, 0 }, 1937 { X86::VMINPDZrrkz, X86::VMINPDZrmkz, 0 }, 1938 { X86::VMAXPSZrrkz, X86::VMAXPSZrmkz, 0 }, 1939 { X86::VMAXPDZrrkz, X86::VMAXPDZrmkz, 0 }, 1940 // AVX-512{F,VL} arithmetic instructions 256-bit 1941 { X86::VADDPSZ256rrkz, X86::VADDPSZ256rmkz, 0 }, 1942 { X86::VADDPDZ256rrkz, X86::VADDPDZ256rmkz, 0 }, 1943 { X86::VSUBPSZ256rrkz, X86::VSUBPSZ256rmkz, 0 }, 1944 { X86::VSUBPDZ256rrkz, X86::VSUBPDZ256rmkz, 0 }, 1945 { X86::VMULPSZ256rrkz, X86::VMULPSZ256rmkz, 0 }, 1946 { X86::VMULPDZ256rrkz, X86::VMULPDZ256rmkz, 0 }, 1947 { X86::VDIVPSZ256rrkz, X86::VDIVPSZ256rmkz, 0 }, 1948 { X86::VDIVPDZ256rrkz, X86::VDIVPDZ256rmkz, 0 }, 1949 { X86::VMINPSZ256rrkz, X86::VMINPSZ256rmkz, 0 }, 1950 { X86::VMINPDZ256rrkz, X86::VMINPDZ256rmkz, 0 }, 1951 { X86::VMAXPSZ256rrkz, X86::VMAXPSZ256rmkz, 0 }, 1952 { X86::VMAXPDZ256rrkz, X86::VMAXPDZ256rmkz, 0 }, 1953 // AVX-512{F,VL} arithmetic instructions 128-bit 1954 { X86::VADDPSZ128rrkz, X86::VADDPSZ128rmkz, 0 }, 1955 { X86::VADDPDZ128rrkz, X86::VADDPDZ128rmkz, 0 }, 1956 { X86::VSUBPSZ128rrkz, X86::VSUBPSZ128rmkz, 0 }, 1957 { X86::VSUBPDZ128rrkz, X86::VSUBPDZ128rmkz, 0 }, 1958 { X86::VMULPSZ128rrkz, X86::VMULPSZ128rmkz, 0 }, 1959 { X86::VMULPDZ128rrkz, X86::VMULPDZ128rmkz, 0 }, 1960 { X86::VDIVPSZ128rrkz, X86::VDIVPSZ128rmkz, 0 }, 1961 { X86::VDIVPDZ128rrkz, X86::VDIVPDZ128rmkz, 0 }, 1962 { X86::VMINPSZ128rrkz, X86::VMINPSZ128rmkz, 0 }, 1963 { X86::VMINPDZ128rrkz, X86::VMINPDZ128rmkz, 0 }, 1964 { X86::VMAXPSZ128rrkz, X86::VMAXPSZ128rmkz, 0 }, 1965 { X86::VMAXPDZ128rrkz, X86::VMAXPDZ128rmkz, 0 } 1966 }; 1967 1968 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable3) { 1969 AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable, 1970 Entry.RegOp, Entry.MemOp, 1971 // Index 3, folded load 1972 Entry.Flags | TB_INDEX_3 | TB_FOLDED_LOAD); 1973 } 1974 1975 static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { 1976 // AVX-512 foldable instructions 1977 { X86::VADDPSZrrk, X86::VADDPSZrmk, 0 }, 1978 { X86::VADDPDZrrk, X86::VADDPDZrmk, 0 }, 1979 { X86::VSUBPSZrrk, X86::VSUBPSZrmk, 0 }, 1980 { X86::VSUBPDZrrk, X86::VSUBPDZrmk, 0 }, 1981 { X86::VMULPSZrrk, X86::VMULPSZrmk, 0 }, 1982 { X86::VMULPDZrrk, X86::VMULPDZrmk, 0 }, 1983 { X86::VDIVPSZrrk, X86::VDIVPSZrmk, 0 }, 1984 { X86::VDIVPDZrrk, X86::VDIVPDZrmk, 0 }, 1985 { X86::VMINPSZrrk, X86::VMINPSZrmk, 0 }, 1986 { X86::VMINPDZrrk, X86::VMINPDZrmk, 0 }, 1987 { X86::VMAXPSZrrk, X86::VMAXPSZrmk, 0 }, 1988 { X86::VMAXPDZrrk, X86::VMAXPDZrmk, 0 }, 1989 // AVX-512{F,VL} foldable instructions 256-bit 1990 { X86::VADDPSZ256rrk, X86::VADDPSZ256rmk, 0 }, 1991 { X86::VADDPDZ256rrk, X86::VADDPDZ256rmk, 0 }, 1992 { X86::VSUBPSZ256rrk, X86::VSUBPSZ256rmk, 0 }, 1993 { X86::VSUBPDZ256rrk, X86::VSUBPDZ256rmk, 0 }, 1994 { X86::VMULPSZ256rrk, X86::VMULPSZ256rmk, 0 }, 1995 { X86::VMULPDZ256rrk, X86::VMULPDZ256rmk, 0 }, 1996 { X86::VDIVPSZ256rrk, X86::VDIVPSZ256rmk, 0 }, 1997 { X86::VDIVPDZ256rrk, X86::VDIVPDZ256rmk, 0 }, 1998 { X86::VMINPSZ256rrk, X86::VMINPSZ256rmk, 0 }, 1999 { X86::VMINPDZ256rrk, X86::VMINPDZ256rmk, 0 }, 2000 { X86::VMAXPSZ256rrk, X86::VMAXPSZ256rmk, 0 }, 2001 { X86::VMAXPDZ256rrk, X86::VMAXPDZ256rmk, 0 }, 2002 // AVX-512{F,VL} foldable instructions 128-bit 2003 { X86::VADDPSZ128rrk, X86::VADDPSZ128rmk, 0 }, 2004 { X86::VADDPDZ128rrk, X86::VADDPDZ128rmk, 0 }, 2005 { X86::VSUBPSZ128rrk, X86::VSUBPSZ128rmk, 0 }, 2006 { X86::VSUBPDZ128rrk, X86::VSUBPDZ128rmk, 0 }, 2007 { X86::VMULPSZ128rrk, X86::VMULPSZ128rmk, 0 }, 2008 { X86::VMULPDZ128rrk, X86::VMULPDZ128rmk, 0 }, 2009 { X86::VDIVPSZ128rrk, X86::VDIVPSZ128rmk, 0 }, 2010 { X86::VDIVPDZ128rrk, X86::VDIVPDZ128rmk, 0 }, 2011 { X86::VMINPSZ128rrk, X86::VMINPSZ128rmk, 0 }, 2012 { X86::VMINPDZ128rrk, X86::VMINPDZ128rmk, 0 }, 2013 { X86::VMAXPSZ128rrk, X86::VMAXPSZ128rmk, 0 }, 2014 { X86::VMAXPDZ128rrk, X86::VMAXPDZ128rmk, 0 } 2015 }; 2016 2017 for (X86MemoryFoldTableEntry Entry : MemoryFoldTable4) { 2018 AddTableEntry(RegOp2MemOpTable4, MemOp2RegOpTable, 2019 Entry.RegOp, Entry.MemOp, 2020 // Index 4, folded load 2021 Entry.Flags | TB_INDEX_4 | TB_FOLDED_LOAD); 2022 } 2023} 2024 2025void 2026X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable, 2027 MemOp2RegOpTableType &M2RTable, 2028 unsigned RegOp, unsigned MemOp, unsigned Flags) { 2029 if ((Flags & TB_NO_FORWARD) == 0) { 2030 assert(!R2MTable.count(RegOp) && "Duplicate entry!"); 2031 R2MTable[RegOp] = std::make_pair(MemOp, Flags); 2032 } 2033 if ((Flags & TB_NO_REVERSE) == 0) { 2034 assert(!M2RTable.count(MemOp) && 2035 "Duplicated entries in unfolding maps?"); 2036 M2RTable[MemOp] = std::make_pair(RegOp, Flags); 2037 } 2038} 2039 2040bool 2041X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 2042 unsigned &SrcReg, unsigned &DstReg, 2043 unsigned &SubIdx) const { 2044 switch (MI.getOpcode()) { 2045 default: break; 2046 case X86::MOVSX16rr8: 2047 case X86::MOVZX16rr8: 2048 case X86::MOVSX32rr8: 2049 case X86::MOVZX32rr8: 2050 case X86::MOVSX64rr8: 2051 if (!Subtarget.is64Bit()) 2052 // It's not always legal to reference the low 8-bit of the larger 2053 // register in 32-bit mode. 2054 return false; 2055 case X86::MOVSX32rr16: 2056 case X86::MOVZX32rr16: 2057 case X86::MOVSX64rr16: 2058 case X86::MOVSX64rr32: { 2059 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 2060 // Be conservative. 2061 return false; 2062 SrcReg = MI.getOperand(1).getReg(); 2063 DstReg = MI.getOperand(0).getReg(); 2064 switch (MI.getOpcode()) { 2065 default: llvm_unreachable("Unreachable!"); 2066 case X86::MOVSX16rr8: 2067 case X86::MOVZX16rr8: 2068 case X86::MOVSX32rr8: 2069 case X86::MOVZX32rr8: 2070 case X86::MOVSX64rr8: 2071 SubIdx = X86::sub_8bit; 2072 break; 2073 case X86::MOVSX32rr16: 2074 case X86::MOVZX32rr16: 2075 case X86::MOVSX64rr16: 2076 SubIdx = X86::sub_16bit; 2077 break; 2078 case X86::MOVSX64rr32: 2079 SubIdx = X86::sub_32bit; 2080 break; 2081 } 2082 return true; 2083 } 2084 } 2085 return false; 2086} 2087 2088int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const { 2089 const MachineFunction *MF = MI->getParent()->getParent(); 2090 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2091 2092 if (MI->getOpcode() == getCallFrameSetupOpcode() || 2093 MI->getOpcode() == getCallFrameDestroyOpcode()) { 2094 unsigned StackAlign = TFI->getStackAlignment(); 2095 int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign * 2096 StackAlign; 2097 2098 SPAdj -= MI->getOperand(1).getImm(); 2099 2100 if (MI->getOpcode() == getCallFrameSetupOpcode()) 2101 return SPAdj; 2102 else 2103 return -SPAdj; 2104 } 2105 2106 // To know whether a call adjusts the stack, we need information 2107 // that is bound to the following ADJCALLSTACKUP pseudo. 2108 // Look for the next ADJCALLSTACKUP that follows the call. 2109 if (MI->isCall()) { 2110 const MachineBasicBlock* MBB = MI->getParent(); 2111 auto I = ++MachineBasicBlock::const_iterator(MI); 2112 for (auto E = MBB->end(); I != E; ++I) { 2113 if (I->getOpcode() == getCallFrameDestroyOpcode() || 2114 I->isCall()) 2115 break; 2116 } 2117 2118 // If we could not find a frame destroy opcode, then it has already 2119 // been simplified, so we don't care. 2120 if (I->getOpcode() != getCallFrameDestroyOpcode()) 2121 return 0; 2122 2123 return -(I->getOperand(1).getImm()); 2124 } 2125 2126 // Currently handle only PUSHes we can reasonably expect to see 2127 // in call sequences 2128 switch (MI->getOpcode()) { 2129 default: 2130 return 0; 2131 case X86::PUSH32i8: 2132 case X86::PUSH32r: 2133 case X86::PUSH32rmm: 2134 case X86::PUSH32rmr: 2135 case X86::PUSHi32: 2136 return 4; 2137 } 2138} 2139 2140/// Return true and the FrameIndex if the specified 2141/// operand and follow operands form a reference to the stack frame. 2142bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op, 2143 int &FrameIndex) const { 2144 if (MI->getOperand(Op+X86::AddrBaseReg).isFI() && 2145 MI->getOperand(Op+X86::AddrScaleAmt).isImm() && 2146 MI->getOperand(Op+X86::AddrIndexReg).isReg() && 2147 MI->getOperand(Op+X86::AddrDisp).isImm() && 2148 MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 && 2149 MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 && 2150 MI->getOperand(Op+X86::AddrDisp).getImm() == 0) { 2151 FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex(); 2152 return true; 2153 } 2154 return false; 2155} 2156 2157static bool isFrameLoadOpcode(int Opcode) { 2158 switch (Opcode) { 2159 default: 2160 return false; 2161 case X86::MOV8rm: 2162 case X86::MOV16rm: 2163 case X86::MOV32rm: 2164 case X86::MOV64rm: 2165 case X86::LD_Fp64m: 2166 case X86::MOVSSrm: 2167 case X86::MOVSDrm: 2168 case X86::MOVAPSrm: 2169 case X86::MOVAPDrm: 2170 case X86::MOVDQArm: 2171 case X86::VMOVSSrm: 2172 case X86::VMOVSDrm: 2173 case X86::VMOVAPSrm: 2174 case X86::VMOVAPDrm: 2175 case X86::VMOVDQArm: 2176 case X86::VMOVUPSYrm: 2177 case X86::VMOVAPSYrm: 2178 case X86::VMOVUPDYrm: 2179 case X86::VMOVAPDYrm: 2180 case X86::VMOVDQUYrm: 2181 case X86::VMOVDQAYrm: 2182 case X86::MMX_MOVD64rm: 2183 case X86::MMX_MOVQ64rm: 2184 case X86::VMOVAPSZrm: 2185 case X86::VMOVUPSZrm: 2186 return true; 2187 } 2188} 2189 2190static bool isFrameStoreOpcode(int Opcode) { 2191 switch (Opcode) { 2192 default: break; 2193 case X86::MOV8mr: 2194 case X86::MOV16mr: 2195 case X86::MOV32mr: 2196 case X86::MOV64mr: 2197 case X86::ST_FpP64m: 2198 case X86::MOVSSmr: 2199 case X86::MOVSDmr: 2200 case X86::MOVAPSmr: 2201 case X86::MOVAPDmr: 2202 case X86::MOVDQAmr: 2203 case X86::VMOVSSmr: 2204 case X86::VMOVSDmr: 2205 case X86::VMOVAPSmr: 2206 case X86::VMOVAPDmr: 2207 case X86::VMOVDQAmr: 2208 case X86::VMOVUPSYmr: 2209 case X86::VMOVAPSYmr: 2210 case X86::VMOVUPDYmr: 2211 case X86::VMOVAPDYmr: 2212 case X86::VMOVDQUYmr: 2213 case X86::VMOVDQAYmr: 2214 case X86::VMOVUPSZmr: 2215 case X86::VMOVAPSZmr: 2216 case X86::MMX_MOVD64mr: 2217 case X86::MMX_MOVQ64mr: 2218 case X86::MMX_MOVNTQmr: 2219 return true; 2220 } 2221 return false; 2222} 2223 2224unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 2225 int &FrameIndex) const { 2226 if (isFrameLoadOpcode(MI->getOpcode())) 2227 if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) 2228 return MI->getOperand(0).getReg(); 2229 return 0; 2230} 2231 2232unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 2233 int &FrameIndex) const { 2234 if (isFrameLoadOpcode(MI->getOpcode())) { 2235 unsigned Reg; 2236 if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) 2237 return Reg; 2238 // Check for post-frame index elimination operations 2239 const MachineMemOperand *Dummy; 2240 return hasLoadFromStackSlot(MI, Dummy, FrameIndex); 2241 } 2242 return 0; 2243} 2244 2245unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI, 2246 int &FrameIndex) const { 2247 if (isFrameStoreOpcode(MI->getOpcode())) 2248 if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 && 2249 isFrameOperand(MI, 0, FrameIndex)) 2250 return MI->getOperand(X86::AddrNumOperands).getReg(); 2251 return 0; 2252} 2253 2254unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 2255 int &FrameIndex) const { 2256 if (isFrameStoreOpcode(MI->getOpcode())) { 2257 unsigned Reg; 2258 if ((Reg = isStoreToStackSlot(MI, FrameIndex))) 2259 return Reg; 2260 // Check for post-frame index elimination operations 2261 const MachineMemOperand *Dummy; 2262 return hasStoreToStackSlot(MI, Dummy, FrameIndex); 2263 } 2264 return 0; 2265} 2266 2267/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. 2268static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 2269 // Don't waste compile time scanning use-def chains of physregs. 2270 if (!TargetRegisterInfo::isVirtualRegister(BaseReg)) 2271 return false; 2272 bool isPICBase = false; 2273 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), 2274 E = MRI.def_instr_end(); I != E; ++I) { 2275 MachineInstr *DefMI = &*I; 2276 if (DefMI->getOpcode() != X86::MOVPC32r) 2277 return false; 2278 assert(!isPICBase && "More than one PIC base?"); 2279 isPICBase = true; 2280 } 2281 return isPICBase; 2282} 2283 2284bool 2285X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 2286 AliasAnalysis *AA) const { 2287 switch (MI->getOpcode()) { 2288 default: break; 2289 case X86::MOV8rm: 2290 case X86::MOV16rm: 2291 case X86::MOV32rm: 2292 case X86::MOV64rm: 2293 case X86::LD_Fp64m: 2294 case X86::MOVSSrm: 2295 case X86::MOVSDrm: 2296 case X86::MOVAPSrm: 2297 case X86::MOVUPSrm: 2298 case X86::MOVAPDrm: 2299 case X86::MOVDQArm: 2300 case X86::MOVDQUrm: 2301 case X86::VMOVSSrm: 2302 case X86::VMOVSDrm: 2303 case X86::VMOVAPSrm: 2304 case X86::VMOVUPSrm: 2305 case X86::VMOVAPDrm: 2306 case X86::VMOVDQArm: 2307 case X86::VMOVDQUrm: 2308 case X86::VMOVAPSYrm: 2309 case X86::VMOVUPSYrm: 2310 case X86::VMOVAPDYrm: 2311 case X86::VMOVDQAYrm: 2312 case X86::VMOVDQUYrm: 2313 case X86::MMX_MOVD64rm: 2314 case X86::MMX_MOVQ64rm: 2315 case X86::FsVMOVAPSrm: 2316 case X86::FsVMOVAPDrm: 2317 case X86::FsMOVAPSrm: 2318 case X86::FsMOVAPDrm: 2319 // AVX-512 2320 case X86::VMOVAPDZ128rm: 2321 case X86::VMOVAPDZ256rm: 2322 case X86::VMOVAPDZrm: 2323 case X86::VMOVAPSZ128rm: 2324 case X86::VMOVAPSZ256rm: 2325 case X86::VMOVAPSZrm: 2326 case X86::VMOVDQA32Z128rm: 2327 case X86::VMOVDQA32Z256rm: 2328 case X86::VMOVDQA32Zrm: 2329 case X86::VMOVDQA64Z128rm: 2330 case X86::VMOVDQA64Z256rm: 2331 case X86::VMOVDQA64Zrm: 2332 case X86::VMOVDQU16Z128rm: 2333 case X86::VMOVDQU16Z256rm: 2334 case X86::VMOVDQU16Zrm: 2335 case X86::VMOVDQU32Z128rm: 2336 case X86::VMOVDQU32Z256rm: 2337 case X86::VMOVDQU32Zrm: 2338 case X86::VMOVDQU64Z128rm: 2339 case X86::VMOVDQU64Z256rm: 2340 case X86::VMOVDQU64Zrm: 2341 case X86::VMOVDQU8Z128rm: 2342 case X86::VMOVDQU8Z256rm: 2343 case X86::VMOVDQU8Zrm: 2344 case X86::VMOVUPSZ128rm: 2345 case X86::VMOVUPSZ256rm: 2346 case X86::VMOVUPSZrm: { 2347 // Loads from constant pools are trivially rematerializable. 2348 if (MI->getOperand(1+X86::AddrBaseReg).isReg() && 2349 MI->getOperand(1+X86::AddrScaleAmt).isImm() && 2350 MI->getOperand(1+X86::AddrIndexReg).isReg() && 2351 MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 && 2352 MI->isInvariantLoad(AA)) { 2353 unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg(); 2354 if (BaseReg == 0 || BaseReg == X86::RIP) 2355 return true; 2356 // Allow re-materialization of PIC load. 2357 if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal()) 2358 return false; 2359 const MachineFunction &MF = *MI->getParent()->getParent(); 2360 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2361 return regIsPICBase(BaseReg, MRI); 2362 } 2363 return false; 2364 } 2365 2366 case X86::LEA32r: 2367 case X86::LEA64r: { 2368 if (MI->getOperand(1+X86::AddrScaleAmt).isImm() && 2369 MI->getOperand(1+X86::AddrIndexReg).isReg() && 2370 MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 && 2371 !MI->getOperand(1+X86::AddrDisp).isReg()) { 2372 // lea fi#, lea GV, etc. are all rematerializable. 2373 if (!MI->getOperand(1+X86::AddrBaseReg).isReg()) 2374 return true; 2375 unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg(); 2376 if (BaseReg == 0) 2377 return true; 2378 // Allow re-materialization of lea PICBase + x. 2379 const MachineFunction &MF = *MI->getParent()->getParent(); 2380 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2381 return regIsPICBase(BaseReg, MRI); 2382 } 2383 return false; 2384 } 2385 } 2386 2387 // All other instructions marked M_REMATERIALIZABLE are always trivially 2388 // rematerializable. 2389 return true; 2390} 2391 2392bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 2393 MachineBasicBlock::iterator I) const { 2394 MachineBasicBlock::iterator E = MBB.end(); 2395 2396 // For compile time consideration, if we are not able to determine the 2397 // safety after visiting 4 instructions in each direction, we will assume 2398 // it's not safe. 2399 MachineBasicBlock::iterator Iter = I; 2400 for (unsigned i = 0; Iter != E && i < 4; ++i) { 2401 bool SeenDef = false; 2402 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 2403 MachineOperand &MO = Iter->getOperand(j); 2404 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 2405 SeenDef = true; 2406 if (!MO.isReg()) 2407 continue; 2408 if (MO.getReg() == X86::EFLAGS) { 2409 if (MO.isUse()) 2410 return false; 2411 SeenDef = true; 2412 } 2413 } 2414 2415 if (SeenDef) 2416 // This instruction defines EFLAGS, no need to look any further. 2417 return true; 2418 ++Iter; 2419 // Skip over DBG_VALUE. 2420 while (Iter != E && Iter->isDebugValue()) 2421 ++Iter; 2422 } 2423 2424 // It is safe to clobber EFLAGS at the end of a block of no successor has it 2425 // live in. 2426 if (Iter == E) { 2427 for (MachineBasicBlock *S : MBB.successors()) 2428 if (S->isLiveIn(X86::EFLAGS)) 2429 return false; 2430 return true; 2431 } 2432 2433 MachineBasicBlock::iterator B = MBB.begin(); 2434 Iter = I; 2435 for (unsigned i = 0; i < 4; ++i) { 2436 // If we make it to the beginning of the block, it's safe to clobber 2437 // EFLAGS iff EFLAGS is not live-in. 2438 if (Iter == B) 2439 return !MBB.isLiveIn(X86::EFLAGS); 2440 2441 --Iter; 2442 // Skip over DBG_VALUE. 2443 while (Iter != B && Iter->isDebugValue()) 2444 --Iter; 2445 2446 bool SawKill = false; 2447 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 2448 MachineOperand &MO = Iter->getOperand(j); 2449 // A register mask may clobber EFLAGS, but we should still look for a 2450 // live EFLAGS def. 2451 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 2452 SawKill = true; 2453 if (MO.isReg() && MO.getReg() == X86::EFLAGS) { 2454 if (MO.isDef()) return MO.isDead(); 2455 if (MO.isKill()) SawKill = true; 2456 } 2457 } 2458 2459 if (SawKill) 2460 // This instruction kills EFLAGS and doesn't redefine it, so 2461 // there's no need to look further. 2462 return true; 2463 } 2464 2465 // Conservative answer. 2466 return false; 2467} 2468 2469void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 2470 MachineBasicBlock::iterator I, 2471 unsigned DestReg, unsigned SubIdx, 2472 const MachineInstr *Orig, 2473 const TargetRegisterInfo &TRI) const { 2474 bool ClobbersEFLAGS = false; 2475 for (const MachineOperand &MO : Orig->operands()) { 2476 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { 2477 ClobbersEFLAGS = true; 2478 break; 2479 } 2480 } 2481 2482 if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) { 2483 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side 2484 // effects. 2485 int Value; 2486 switch (Orig->getOpcode()) { 2487 case X86::MOV32r0: Value = 0; break; 2488 case X86::MOV32r1: Value = 1; break; 2489 case X86::MOV32r_1: Value = -1; break; 2490 default: 2491 llvm_unreachable("Unexpected instruction!"); 2492 } 2493 2494 DebugLoc DL = Orig->getDebugLoc(); 2495 BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0)) 2496 .addImm(Value); 2497 } else { 2498 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 2499 MBB.insert(I, MI); 2500 } 2501 2502 MachineInstr *NewMI = std::prev(I); 2503 NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 2504} 2505 2506/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. 2507bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr *MI) const { 2508 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 2509 MachineOperand &MO = MI->getOperand(i); 2510 if (MO.isReg() && MO.isDef() && 2511 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 2512 return true; 2513 } 2514 } 2515 return false; 2516} 2517 2518/// Check whether the shift count for a machine operand is non-zero. 2519inline static unsigned getTruncatedShiftCount(MachineInstr *MI, 2520 unsigned ShiftAmtOperandIdx) { 2521 // The shift count is six bits with the REX.W prefix and five bits without. 2522 unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31; 2523 unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm(); 2524 return Imm & ShiftCountMask; 2525} 2526 2527/// Check whether the given shift count is appropriate 2528/// can be represented by a LEA instruction. 2529inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { 2530 // Left shift instructions can be transformed into load-effective-address 2531 // instructions if we can encode them appropriately. 2532 // A LEA instruction utilizes a SIB byte to encode its scale factor. 2533 // The SIB.scale field is two bits wide which means that we can encode any 2534 // shift amount less than 4. 2535 return ShAmt < 4 && ShAmt > 0; 2536} 2537 2538bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, 2539 unsigned Opc, bool AllowSP, 2540 unsigned &NewSrc, bool &isKill, bool &isUndef, 2541 MachineOperand &ImplicitOp) const { 2542 MachineFunction &MF = *MI->getParent()->getParent(); 2543 const TargetRegisterClass *RC; 2544 if (AllowSP) { 2545 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; 2546 } else { 2547 RC = Opc != X86::LEA32r ? 2548 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; 2549 } 2550 unsigned SrcReg = Src.getReg(); 2551 2552 // For both LEA64 and LEA32 the register already has essentially the right 2553 // type (32-bit or 64-bit) we may just need to forbid SP. 2554 if (Opc != X86::LEA64_32r) { 2555 NewSrc = SrcReg; 2556 isKill = Src.isKill(); 2557 isUndef = Src.isUndef(); 2558 2559 if (TargetRegisterInfo::isVirtualRegister(NewSrc) && 2560 !MF.getRegInfo().constrainRegClass(NewSrc, RC)) 2561 return false; 2562 2563 return true; 2564 } 2565 2566 // This is for an LEA64_32r and incoming registers are 32-bit. One way or 2567 // another we need to add 64-bit registers to the final MI. 2568 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { 2569 ImplicitOp = Src; 2570 ImplicitOp.setImplicit(); 2571 2572 NewSrc = getX86SubSuperRegister(Src.getReg(), 64); 2573 MachineBasicBlock::LivenessQueryResult LQR = 2574 MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI); 2575 2576 switch (LQR) { 2577 case MachineBasicBlock::LQR_Unknown: 2578 // We can't give sane liveness flags to the instruction, abandon LEA 2579 // formation. 2580 return false; 2581 case MachineBasicBlock::LQR_Live: 2582 isKill = MI->killsRegister(SrcReg); 2583 isUndef = false; 2584 break; 2585 default: 2586 // The physreg itself is dead, so we have to use it as an <undef>. 2587 isKill = false; 2588 isUndef = true; 2589 break; 2590 } 2591 } else { 2592 // Virtual register of the wrong class, we have to create a temporary 64-bit 2593 // vreg to feed into the LEA. 2594 NewSrc = MF.getRegInfo().createVirtualRegister(RC); 2595 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 2596 get(TargetOpcode::COPY)) 2597 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) 2598 .addOperand(Src); 2599 2600 // Which is obviously going to be dead after we're done with it. 2601 isKill = true; 2602 isUndef = false; 2603 } 2604 2605 // We've set all the parameters without issue. 2606 return true; 2607} 2608 2609/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit 2610/// LEA to form 3-address code by promoting to a 32-bit superregister and then 2611/// truncating back down to a 16-bit subregister. 2612MachineInstr * 2613X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, 2614 MachineFunction::iterator &MFI, 2615 MachineBasicBlock::iterator &MBBI, 2616 LiveVariables *LV) const { 2617 MachineInstr *MI = MBBI; 2618 unsigned Dest = MI->getOperand(0).getReg(); 2619 unsigned Src = MI->getOperand(1).getReg(); 2620 bool isDead = MI->getOperand(0).isDead(); 2621 bool isKill = MI->getOperand(1).isKill(); 2622 2623 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 2624 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 2625 unsigned Opc, leaInReg; 2626 if (Subtarget.is64Bit()) { 2627 Opc = X86::LEA64_32r; 2628 leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 2629 } else { 2630 Opc = X86::LEA32r; 2631 leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 2632 } 2633 2634 // Build and insert into an implicit UNDEF value. This is OK because 2635 // well be shifting and then extracting the lower 16-bits. 2636 // This has the potential to cause partial register stall. e.g. 2637 // movw (%rbp,%rcx,2), %dx 2638 // leal -65(%rdx), %esi 2639 // But testing has shown this *does* help performance in 64-bit mode (at 2640 // least on modern x86 machines). 2641 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg); 2642 MachineInstr *InsMI = 2643 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2644 .addReg(leaInReg, RegState::Define, X86::sub_16bit) 2645 .addReg(Src, getKillRegState(isKill)); 2646 2647 MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(), 2648 get(Opc), leaOutReg); 2649 switch (MIOpc) { 2650 default: llvm_unreachable("Unreachable!"); 2651 case X86::SHL16ri: { 2652 unsigned ShAmt = MI->getOperand(2).getImm(); 2653 MIB.addReg(0).addImm(1 << ShAmt) 2654 .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0); 2655 break; 2656 } 2657 case X86::INC16r: 2658 addRegOffset(MIB, leaInReg, true, 1); 2659 break; 2660 case X86::DEC16r: 2661 addRegOffset(MIB, leaInReg, true, -1); 2662 break; 2663 case X86::ADD16ri: 2664 case X86::ADD16ri8: 2665 case X86::ADD16ri_DB: 2666 case X86::ADD16ri8_DB: 2667 addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm()); 2668 break; 2669 case X86::ADD16rr: 2670 case X86::ADD16rr_DB: { 2671 unsigned Src2 = MI->getOperand(2).getReg(); 2672 bool isKill2 = MI->getOperand(2).isKill(); 2673 unsigned leaInReg2 = 0; 2674 MachineInstr *InsMI2 = nullptr; 2675 if (Src == Src2) { 2676 // ADD16rr %reg1028<kill>, %reg1028 2677 // just a single insert_subreg. 2678 addRegReg(MIB, leaInReg, true, leaInReg, false); 2679 } else { 2680 if (Subtarget.is64Bit()) 2681 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 2682 else 2683 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 2684 // Build and insert into an implicit UNDEF value. This is OK because 2685 // well be shifting and then extracting the lower 16-bits. 2686 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2); 2687 InsMI2 = 2688 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2689 .addReg(leaInReg2, RegState::Define, X86::sub_16bit) 2690 .addReg(Src2, getKillRegState(isKill2)); 2691 addRegReg(MIB, leaInReg, true, leaInReg2, true); 2692 } 2693 if (LV && isKill2 && InsMI2) 2694 LV->replaceKillInstruction(Src2, MI, InsMI2); 2695 break; 2696 } 2697 } 2698 2699 MachineInstr *NewMI = MIB; 2700 MachineInstr *ExtMI = 2701 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2702 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 2703 .addReg(leaOutReg, RegState::Kill, X86::sub_16bit); 2704 2705 if (LV) { 2706 // Update live variables 2707 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 2708 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 2709 if (isKill) 2710 LV->replaceKillInstruction(Src, MI, InsMI); 2711 if (isDead) 2712 LV->replaceKillInstruction(Dest, MI, ExtMI); 2713 } 2714 2715 return ExtMI; 2716} 2717 2718/// This method must be implemented by targets that 2719/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 2720/// may be able to convert a two-address instruction into a true 2721/// three-address instruction on demand. This allows the X86 target (for 2722/// example) to convert ADD and SHL instructions into LEA instructions if they 2723/// would require register copies due to two-addressness. 2724/// 2725/// This method returns a null pointer if the transformation cannot be 2726/// performed, otherwise it returns the new instruction. 2727/// 2728MachineInstr * 2729X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 2730 MachineBasicBlock::iterator &MBBI, 2731 LiveVariables *LV) const { 2732 MachineInstr *MI = MBBI; 2733 2734 // The following opcodes also sets the condition code register(s). Only 2735 // convert them to equivalent lea if the condition code register def's 2736 // are dead! 2737 if (hasLiveCondCodeDef(MI)) 2738 return nullptr; 2739 2740 MachineFunction &MF = *MI->getParent()->getParent(); 2741 // All instructions input are two-addr instructions. Get the known operands. 2742 const MachineOperand &Dest = MI->getOperand(0); 2743 const MachineOperand &Src = MI->getOperand(1); 2744 2745 MachineInstr *NewMI = nullptr; 2746 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 2747 // we have better subtarget support, enable the 16-bit LEA generation here. 2748 // 16-bit LEA is also slow on Core2. 2749 bool DisableLEA16 = true; 2750 bool is64Bit = Subtarget.is64Bit(); 2751 2752 unsigned MIOpc = MI->getOpcode(); 2753 switch (MIOpc) { 2754 default: return nullptr; 2755 case X86::SHL64ri: { 2756 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2757 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2758 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2759 2760 // LEA can't handle RSP. 2761 if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && 2762 !MF.getRegInfo().constrainRegClass(Src.getReg(), 2763 &X86::GR64_NOSPRegClass)) 2764 return nullptr; 2765 2766 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 2767 .addOperand(Dest) 2768 .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); 2769 break; 2770 } 2771 case X86::SHL32ri: { 2772 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2773 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2774 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2775 2776 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2777 2778 // LEA can't handle ESP. 2779 bool isKill, isUndef; 2780 unsigned SrcReg; 2781 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2782 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2783 SrcReg, isKill, isUndef, ImplicitOp)) 2784 return nullptr; 2785 2786 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2787 .addOperand(Dest) 2788 .addReg(0).addImm(1 << ShAmt) 2789 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) 2790 .addImm(0).addReg(0); 2791 if (ImplicitOp.getReg() != 0) 2792 MIB.addOperand(ImplicitOp); 2793 NewMI = MIB; 2794 2795 break; 2796 } 2797 case X86::SHL16ri: { 2798 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2799 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2800 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2801 2802 if (DisableLEA16) 2803 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr; 2804 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2805 .addOperand(Dest) 2806 .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); 2807 break; 2808 } 2809 case X86::INC64r: 2810 case X86::INC32r: { 2811 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 2812 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 2813 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 2814 bool isKill, isUndef; 2815 unsigned SrcReg; 2816 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2817 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2818 SrcReg, isKill, isUndef, ImplicitOp)) 2819 return nullptr; 2820 2821 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2822 .addOperand(Dest) 2823 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); 2824 if (ImplicitOp.getReg() != 0) 2825 MIB.addOperand(ImplicitOp); 2826 2827 NewMI = addOffset(MIB, 1); 2828 break; 2829 } 2830 case X86::INC16r: 2831 if (DisableLEA16) 2832 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2833 : nullptr; 2834 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 2835 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2836 .addOperand(Dest).addOperand(Src), 1); 2837 break; 2838 case X86::DEC64r: 2839 case X86::DEC32r: { 2840 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 2841 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 2842 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 2843 2844 bool isKill, isUndef; 2845 unsigned SrcReg; 2846 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2847 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2848 SrcReg, isKill, isUndef, ImplicitOp)) 2849 return nullptr; 2850 2851 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2852 .addOperand(Dest) 2853 .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); 2854 if (ImplicitOp.getReg() != 0) 2855 MIB.addOperand(ImplicitOp); 2856 2857 NewMI = addOffset(MIB, -1); 2858 2859 break; 2860 } 2861 case X86::DEC16r: 2862 if (DisableLEA16) 2863 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2864 : nullptr; 2865 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 2866 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2867 .addOperand(Dest).addOperand(Src), -1); 2868 break; 2869 case X86::ADD64rr: 2870 case X86::ADD64rr_DB: 2871 case X86::ADD32rr: 2872 case X86::ADD32rr_DB: { 2873 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2874 unsigned Opc; 2875 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) 2876 Opc = X86::LEA64r; 2877 else 2878 Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2879 2880 bool isKill, isUndef; 2881 unsigned SrcReg; 2882 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2883 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 2884 SrcReg, isKill, isUndef, ImplicitOp)) 2885 return nullptr; 2886 2887 const MachineOperand &Src2 = MI->getOperand(2); 2888 bool isKill2, isUndef2; 2889 unsigned SrcReg2; 2890 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); 2891 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, 2892 SrcReg2, isKill2, isUndef2, ImplicitOp2)) 2893 return nullptr; 2894 2895 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2896 .addOperand(Dest); 2897 if (ImplicitOp.getReg() != 0) 2898 MIB.addOperand(ImplicitOp); 2899 if (ImplicitOp2.getReg() != 0) 2900 MIB.addOperand(ImplicitOp2); 2901 2902 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); 2903 2904 // Preserve undefness of the operands. 2905 NewMI->getOperand(1).setIsUndef(isUndef); 2906 NewMI->getOperand(3).setIsUndef(isUndef2); 2907 2908 if (LV && Src2.isKill()) 2909 LV->replaceKillInstruction(SrcReg2, MI, NewMI); 2910 break; 2911 } 2912 case X86::ADD16rr: 2913 case X86::ADD16rr_DB: { 2914 if (DisableLEA16) 2915 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2916 : nullptr; 2917 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2918 unsigned Src2 = MI->getOperand(2).getReg(); 2919 bool isKill2 = MI->getOperand(2).isKill(); 2920 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2921 .addOperand(Dest), 2922 Src.getReg(), Src.isKill(), Src2, isKill2); 2923 2924 // Preserve undefness of the operands. 2925 bool isUndef = MI->getOperand(1).isUndef(); 2926 bool isUndef2 = MI->getOperand(2).isUndef(); 2927 NewMI->getOperand(1).setIsUndef(isUndef); 2928 NewMI->getOperand(3).setIsUndef(isUndef2); 2929 2930 if (LV && isKill2) 2931 LV->replaceKillInstruction(Src2, MI, NewMI); 2932 break; 2933 } 2934 case X86::ADD64ri32: 2935 case X86::ADD64ri8: 2936 case X86::ADD64ri32_DB: 2937 case X86::ADD64ri8_DB: 2938 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2939 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 2940 .addOperand(Dest).addOperand(Src), 2941 MI->getOperand(2).getImm()); 2942 break; 2943 case X86::ADD32ri: 2944 case X86::ADD32ri8: 2945 case X86::ADD32ri_DB: 2946 case X86::ADD32ri8_DB: { 2947 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2948 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2949 2950 bool isKill, isUndef; 2951 unsigned SrcReg; 2952 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2953 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 2954 SrcReg, isKill, isUndef, ImplicitOp)) 2955 return nullptr; 2956 2957 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2958 .addOperand(Dest) 2959 .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); 2960 if (ImplicitOp.getReg() != 0) 2961 MIB.addOperand(ImplicitOp); 2962 2963 NewMI = addOffset(MIB, MI->getOperand(2).getImm()); 2964 break; 2965 } 2966 case X86::ADD16ri: 2967 case X86::ADD16ri8: 2968 case X86::ADD16ri_DB: 2969 case X86::ADD16ri8_DB: 2970 if (DisableLEA16) 2971 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2972 : nullptr; 2973 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2974 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2975 .addOperand(Dest).addOperand(Src), 2976 MI->getOperand(2).getImm()); 2977 break; 2978 } 2979 2980 if (!NewMI) return nullptr; 2981 2982 if (LV) { // Update live variables 2983 if (Src.isKill()) 2984 LV->replaceKillInstruction(Src.getReg(), MI, NewMI); 2985 if (Dest.isDead()) 2986 LV->replaceKillInstruction(Dest.getReg(), MI, NewMI); 2987 } 2988 2989 MFI->insert(MBBI, NewMI); // Insert the new inst 2990 return NewMI; 2991} 2992 2993/// Returns true if the given instruction opcode is FMA3. 2994/// Otherwise, returns false. 2995/// The second parameter is optional and is used as the second return from 2996/// the function. It is set to true if the given instruction has FMA3 opcode 2997/// that is used for lowering of scalar FMA intrinsics, and it is set to false 2998/// otherwise. 2999static bool isFMA3(unsigned Opcode, bool *IsIntrinsic = nullptr) { 3000 if (IsIntrinsic) 3001 *IsIntrinsic = false; 3002 3003 switch (Opcode) { 3004 case X86::VFMADDSDr132r: case X86::VFMADDSDr132m: 3005 case X86::VFMADDSSr132r: case X86::VFMADDSSr132m: 3006 case X86::VFMSUBSDr132r: case X86::VFMSUBSDr132m: 3007 case X86::VFMSUBSSr132r: case X86::VFMSUBSSr132m: 3008 case X86::VFNMADDSDr132r: case X86::VFNMADDSDr132m: 3009 case X86::VFNMADDSSr132r: case X86::VFNMADDSSr132m: 3010 case X86::VFNMSUBSDr132r: case X86::VFNMSUBSDr132m: 3011 case X86::VFNMSUBSSr132r: case X86::VFNMSUBSSr132m: 3012 3013 case X86::VFMADDSDr213r: case X86::VFMADDSDr213m: 3014 case X86::VFMADDSSr213r: case X86::VFMADDSSr213m: 3015 case X86::VFMSUBSDr213r: case X86::VFMSUBSDr213m: 3016 case X86::VFMSUBSSr213r: case X86::VFMSUBSSr213m: 3017 case X86::VFNMADDSDr213r: case X86::VFNMADDSDr213m: 3018 case X86::VFNMADDSSr213r: case X86::VFNMADDSSr213m: 3019 case X86::VFNMSUBSDr213r: case X86::VFNMSUBSDr213m: 3020 case X86::VFNMSUBSSr213r: case X86::VFNMSUBSSr213m: 3021 3022 case X86::VFMADDSDr231r: case X86::VFMADDSDr231m: 3023 case X86::VFMADDSSr231r: case X86::VFMADDSSr231m: 3024 case X86::VFMSUBSDr231r: case X86::VFMSUBSDr231m: 3025 case X86::VFMSUBSSr231r: case X86::VFMSUBSSr231m: 3026 case X86::VFNMADDSDr231r: case X86::VFNMADDSDr231m: 3027 case X86::VFNMADDSSr231r: case X86::VFNMADDSSr231m: 3028 case X86::VFNMSUBSDr231r: case X86::VFNMSUBSDr231m: 3029 case X86::VFNMSUBSSr231r: case X86::VFNMSUBSSr231m: 3030 3031 case X86::VFMADDSUBPDr132r: case X86::VFMADDSUBPDr132m: 3032 case X86::VFMADDSUBPSr132r: case X86::VFMADDSUBPSr132m: 3033 case X86::VFMSUBADDPDr132r: case X86::VFMSUBADDPDr132m: 3034 case X86::VFMSUBADDPSr132r: case X86::VFMSUBADDPSr132m: 3035 case X86::VFMADDSUBPDr132rY: case X86::VFMADDSUBPDr132mY: 3036 case X86::VFMADDSUBPSr132rY: case X86::VFMADDSUBPSr132mY: 3037 case X86::VFMSUBADDPDr132rY: case X86::VFMSUBADDPDr132mY: 3038 case X86::VFMSUBADDPSr132rY: case X86::VFMSUBADDPSr132mY: 3039 3040 case X86::VFMADDPDr132r: case X86::VFMADDPDr132m: 3041 case X86::VFMADDPSr132r: case X86::VFMADDPSr132m: 3042 case X86::VFMSUBPDr132r: case X86::VFMSUBPDr132m: 3043 case X86::VFMSUBPSr132r: case X86::VFMSUBPSr132m: 3044 case X86::VFNMADDPDr132r: case X86::VFNMADDPDr132m: 3045 case X86::VFNMADDPSr132r: case X86::VFNMADDPSr132m: 3046 case X86::VFNMSUBPDr132r: case X86::VFNMSUBPDr132m: 3047 case X86::VFNMSUBPSr132r: case X86::VFNMSUBPSr132m: 3048 case X86::VFMADDPDr132rY: case X86::VFMADDPDr132mY: 3049 case X86::VFMADDPSr132rY: case X86::VFMADDPSr132mY: 3050 case X86::VFMSUBPDr132rY: case X86::VFMSUBPDr132mY: 3051 case X86::VFMSUBPSr132rY: case X86::VFMSUBPSr132mY: 3052 case X86::VFNMADDPDr132rY: case X86::VFNMADDPDr132mY: 3053 case X86::VFNMADDPSr132rY: case X86::VFNMADDPSr132mY: 3054 case X86::VFNMSUBPDr132rY: case X86::VFNMSUBPDr132mY: 3055 case X86::VFNMSUBPSr132rY: case X86::VFNMSUBPSr132mY: 3056 3057 case X86::VFMADDSUBPDr213r: case X86::VFMADDSUBPDr213m: 3058 case X86::VFMADDSUBPSr213r: case X86::VFMADDSUBPSr213m: 3059 case X86::VFMSUBADDPDr213r: case X86::VFMSUBADDPDr213m: 3060 case X86::VFMSUBADDPSr213r: case X86::VFMSUBADDPSr213m: 3061 case X86::VFMADDSUBPDr213rY: case X86::VFMADDSUBPDr213mY: 3062 case X86::VFMADDSUBPSr213rY: case X86::VFMADDSUBPSr213mY: 3063 case X86::VFMSUBADDPDr213rY: case X86::VFMSUBADDPDr213mY: 3064 case X86::VFMSUBADDPSr213rY: case X86::VFMSUBADDPSr213mY: 3065 3066 case X86::VFMADDPDr213r: case X86::VFMADDPDr213m: 3067 case X86::VFMADDPSr213r: case X86::VFMADDPSr213m: 3068 case X86::VFMSUBPDr213r: case X86::VFMSUBPDr213m: 3069 case X86::VFMSUBPSr213r: case X86::VFMSUBPSr213m: 3070 case X86::VFNMADDPDr213r: case X86::VFNMADDPDr213m: 3071 case X86::VFNMADDPSr213r: case X86::VFNMADDPSr213m: 3072 case X86::VFNMSUBPDr213r: case X86::VFNMSUBPDr213m: 3073 case X86::VFNMSUBPSr213r: case X86::VFNMSUBPSr213m: 3074 case X86::VFMADDPDr213rY: case X86::VFMADDPDr213mY: 3075 case X86::VFMADDPSr213rY: case X86::VFMADDPSr213mY: 3076 case X86::VFMSUBPDr213rY: case X86::VFMSUBPDr213mY: 3077 case X86::VFMSUBPSr213rY: case X86::VFMSUBPSr213mY: 3078 case X86::VFNMADDPDr213rY: case X86::VFNMADDPDr213mY: 3079 case X86::VFNMADDPSr213rY: case X86::VFNMADDPSr213mY: 3080 case X86::VFNMSUBPDr213rY: case X86::VFNMSUBPDr213mY: 3081 case X86::VFNMSUBPSr213rY: case X86::VFNMSUBPSr213mY: 3082 3083 case X86::VFMADDSUBPDr231r: case X86::VFMADDSUBPDr231m: 3084 case X86::VFMADDSUBPSr231r: case X86::VFMADDSUBPSr231m: 3085 case X86::VFMSUBADDPDr231r: case X86::VFMSUBADDPDr231m: 3086 case X86::VFMSUBADDPSr231r: case X86::VFMSUBADDPSr231m: 3087 case X86::VFMADDSUBPDr231rY: case X86::VFMADDSUBPDr231mY: 3088 case X86::VFMADDSUBPSr231rY: case X86::VFMADDSUBPSr231mY: 3089 case X86::VFMSUBADDPDr231rY: case X86::VFMSUBADDPDr231mY: 3090 case X86::VFMSUBADDPSr231rY: case X86::VFMSUBADDPSr231mY: 3091 3092 case X86::VFMADDPDr231r: case X86::VFMADDPDr231m: 3093 case X86::VFMADDPSr231r: case X86::VFMADDPSr231m: 3094 case X86::VFMSUBPDr231r: case X86::VFMSUBPDr231m: 3095 case X86::VFMSUBPSr231r: case X86::VFMSUBPSr231m: 3096 case X86::VFNMADDPDr231r: case X86::VFNMADDPDr231m: 3097 case X86::VFNMADDPSr231r: case X86::VFNMADDPSr231m: 3098 case X86::VFNMSUBPDr231r: case X86::VFNMSUBPDr231m: 3099 case X86::VFNMSUBPSr231r: case X86::VFNMSUBPSr231m: 3100 case X86::VFMADDPDr231rY: case X86::VFMADDPDr231mY: 3101 case X86::VFMADDPSr231rY: case X86::VFMADDPSr231mY: 3102 case X86::VFMSUBPDr231rY: case X86::VFMSUBPDr231mY: 3103 case X86::VFMSUBPSr231rY: case X86::VFMSUBPSr231mY: 3104 case X86::VFNMADDPDr231rY: case X86::VFNMADDPDr231mY: 3105 case X86::VFNMADDPSr231rY: case X86::VFNMADDPSr231mY: 3106 case X86::VFNMSUBPDr231rY: case X86::VFNMSUBPDr231mY: 3107 case X86::VFNMSUBPSr231rY: case X86::VFNMSUBPSr231mY: 3108 return true; 3109 3110 case X86::VFMADDSDr132r_Int: case X86::VFMADDSDr132m_Int: 3111 case X86::VFMADDSSr132r_Int: case X86::VFMADDSSr132m_Int: 3112 case X86::VFMSUBSDr132r_Int: case X86::VFMSUBSDr132m_Int: 3113 case X86::VFMSUBSSr132r_Int: case X86::VFMSUBSSr132m_Int: 3114 case X86::VFNMADDSDr132r_Int: case X86::VFNMADDSDr132m_Int: 3115 case X86::VFNMADDSSr132r_Int: case X86::VFNMADDSSr132m_Int: 3116 case X86::VFNMSUBSDr132r_Int: case X86::VFNMSUBSDr132m_Int: 3117 case X86::VFNMSUBSSr132r_Int: case X86::VFNMSUBSSr132m_Int: 3118 3119 case X86::VFMADDSDr213r_Int: case X86::VFMADDSDr213m_Int: 3120 case X86::VFMADDSSr213r_Int: case X86::VFMADDSSr213m_Int: 3121 case X86::VFMSUBSDr213r_Int: case X86::VFMSUBSDr213m_Int: 3122 case X86::VFMSUBSSr213r_Int: case X86::VFMSUBSSr213m_Int: 3123 case X86::VFNMADDSDr213r_Int: case X86::VFNMADDSDr213m_Int: 3124 case X86::VFNMADDSSr213r_Int: case X86::VFNMADDSSr213m_Int: 3125 case X86::VFNMSUBSDr213r_Int: case X86::VFNMSUBSDr213m_Int: 3126 case X86::VFNMSUBSSr213r_Int: case X86::VFNMSUBSSr213m_Int: 3127 3128 case X86::VFMADDSDr231r_Int: case X86::VFMADDSDr231m_Int: 3129 case X86::VFMADDSSr231r_Int: case X86::VFMADDSSr231m_Int: 3130 case X86::VFMSUBSDr231r_Int: case X86::VFMSUBSDr231m_Int: 3131 case X86::VFMSUBSSr231r_Int: case X86::VFMSUBSSr231m_Int: 3132 case X86::VFNMADDSDr231r_Int: case X86::VFNMADDSDr231m_Int: 3133 case X86::VFNMADDSSr231r_Int: case X86::VFNMADDSSr231m_Int: 3134 case X86::VFNMSUBSDr231r_Int: case X86::VFNMSUBSDr231m_Int: 3135 case X86::VFNMSUBSSr231r_Int: case X86::VFNMSUBSSr231m_Int: 3136 if (IsIntrinsic) 3137 *IsIntrinsic = true; 3138 return true; 3139 default: 3140 return false; 3141 } 3142 llvm_unreachable("Opcode not handled by the switch"); 3143} 3144 3145MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI, 3146 bool NewMI, 3147 unsigned OpIdx1, 3148 unsigned OpIdx2) const { 3149 switch (MI->getOpcode()) { 3150 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 3151 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 3152 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 3153 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 3154 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 3155 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 3156 unsigned Opc; 3157 unsigned Size; 3158 switch (MI->getOpcode()) { 3159 default: llvm_unreachable("Unreachable!"); 3160 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 3161 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 3162 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 3163 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 3164 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 3165 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 3166 } 3167 unsigned Amt = MI->getOperand(3).getImm(); 3168 if (NewMI) { 3169 MachineFunction &MF = *MI->getParent()->getParent(); 3170 MI = MF.CloneMachineInstr(MI); 3171 NewMI = false; 3172 } 3173 MI->setDesc(get(Opc)); 3174 MI->getOperand(3).setImm(Size-Amt); 3175 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3176 } 3177 case X86::BLENDPDrri: 3178 case X86::BLENDPSrri: 3179 case X86::PBLENDWrri: 3180 case X86::VBLENDPDrri: 3181 case X86::VBLENDPSrri: 3182 case X86::VBLENDPDYrri: 3183 case X86::VBLENDPSYrri: 3184 case X86::VPBLENDDrri: 3185 case X86::VPBLENDWrri: 3186 case X86::VPBLENDDYrri: 3187 case X86::VPBLENDWYrri:{ 3188 unsigned Mask; 3189 switch (MI->getOpcode()) { 3190 default: llvm_unreachable("Unreachable!"); 3191 case X86::BLENDPDrri: Mask = 0x03; break; 3192 case X86::BLENDPSrri: Mask = 0x0F; break; 3193 case X86::PBLENDWrri: Mask = 0xFF; break; 3194 case X86::VBLENDPDrri: Mask = 0x03; break; 3195 case X86::VBLENDPSrri: Mask = 0x0F; break; 3196 case X86::VBLENDPDYrri: Mask = 0x0F; break; 3197 case X86::VBLENDPSYrri: Mask = 0xFF; break; 3198 case X86::VPBLENDDrri: Mask = 0x0F; break; 3199 case X86::VPBLENDWrri: Mask = 0xFF; break; 3200 case X86::VPBLENDDYrri: Mask = 0xFF; break; 3201 case X86::VPBLENDWYrri: Mask = 0xFF; break; 3202 } 3203 // Only the least significant bits of Imm are used. 3204 unsigned Imm = MI->getOperand(3).getImm() & Mask; 3205 if (NewMI) { 3206 MachineFunction &MF = *MI->getParent()->getParent(); 3207 MI = MF.CloneMachineInstr(MI); 3208 NewMI = false; 3209 } 3210 MI->getOperand(3).setImm(Mask ^ Imm); 3211 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3212 } 3213 case X86::PCLMULQDQrr: 3214 case X86::VPCLMULQDQrr:{ 3215 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] 3216 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] 3217 unsigned Imm = MI->getOperand(3).getImm(); 3218 unsigned Src1Hi = Imm & 0x01; 3219 unsigned Src2Hi = Imm & 0x10; 3220 if (NewMI) { 3221 MachineFunction &MF = *MI->getParent()->getParent(); 3222 MI = MF.CloneMachineInstr(MI); 3223 NewMI = false; 3224 } 3225 MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); 3226 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3227 } 3228 case X86::CMPPDrri: 3229 case X86::CMPPSrri: 3230 case X86::VCMPPDrri: 3231 case X86::VCMPPSrri: 3232 case X86::VCMPPDYrri: 3233 case X86::VCMPPSYrri: { 3234 // Float comparison can be safely commuted for 3235 // Ordered/Unordered/Equal/NotEqual tests 3236 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3237 switch (Imm) { 3238 case 0x00: // EQUAL 3239 case 0x03: // UNORDERED 3240 case 0x04: // NOT EQUAL 3241 case 0x07: // ORDERED 3242 if (NewMI) { 3243 MachineFunction &MF = *MI->getParent()->getParent(); 3244 MI = MF.CloneMachineInstr(MI); 3245 NewMI = false; 3246 } 3247 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3248 default: 3249 return nullptr; 3250 } 3251 } 3252 case X86::VPCOMBri: case X86::VPCOMUBri: 3253 case X86::VPCOMDri: case X86::VPCOMUDri: 3254 case X86::VPCOMQri: case X86::VPCOMUQri: 3255 case X86::VPCOMWri: case X86::VPCOMUWri: { 3256 // Flip comparison mode immediate (if necessary). 3257 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3258 switch (Imm) { 3259 case 0x00: Imm = 0x02; break; // LT -> GT 3260 case 0x01: Imm = 0x03; break; // LE -> GE 3261 case 0x02: Imm = 0x00; break; // GT -> LT 3262 case 0x03: Imm = 0x01; break; // GE -> LE 3263 case 0x04: // EQ 3264 case 0x05: // NE 3265 case 0x06: // FALSE 3266 case 0x07: // TRUE 3267 default: 3268 break; 3269 } 3270 if (NewMI) { 3271 MachineFunction &MF = *MI->getParent()->getParent(); 3272 MI = MF.CloneMachineInstr(MI); 3273 NewMI = false; 3274 } 3275 MI->getOperand(3).setImm(Imm); 3276 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3277 } 3278 case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: 3279 case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: 3280 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: 3281 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr: 3282 case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr: 3283 case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr: 3284 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr: 3285 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr: 3286 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr: 3287 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr: 3288 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr: 3289 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr: 3290 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr: 3291 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr: 3292 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr: 3293 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: { 3294 unsigned Opc; 3295 switch (MI->getOpcode()) { 3296 default: llvm_unreachable("Unreachable!"); 3297 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 3298 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 3299 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 3300 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 3301 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 3302 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 3303 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 3304 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 3305 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 3306 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 3307 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 3308 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 3309 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 3310 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 3311 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 3312 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 3313 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 3314 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 3315 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 3316 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 3317 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 3318 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 3319 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 3320 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 3321 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 3322 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 3323 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 3324 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 3325 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 3326 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 3327 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 3328 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 3329 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break; 3330 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 3331 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 3332 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 3333 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 3334 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 3335 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break; 3336 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 3337 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 3338 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 3339 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break; 3340 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break; 3341 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break; 3342 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break; 3343 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break; 3344 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break; 3345 } 3346 if (NewMI) { 3347 MachineFunction &MF = *MI->getParent()->getParent(); 3348 MI = MF.CloneMachineInstr(MI); 3349 NewMI = false; 3350 } 3351 MI->setDesc(get(Opc)); 3352 // Fallthrough intended. 3353 } 3354 default: 3355 if (isFMA3(MI->getOpcode())) { 3356 unsigned Opc = getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2); 3357 if (Opc == 0) 3358 return nullptr; 3359 if (NewMI) { 3360 MachineFunction &MF = *MI->getParent()->getParent(); 3361 MI = MF.CloneMachineInstr(MI); 3362 NewMI = false; 3363 } 3364 MI->setDesc(get(Opc)); 3365 } 3366 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 3367 } 3368} 3369 3370bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI, 3371 unsigned &SrcOpIdx1, 3372 unsigned &SrcOpIdx2) const { 3373 3374 unsigned RegOpsNum = isMem(MI, 3) ? 2 : 3; 3375 3376 // Only the first RegOpsNum operands are commutable. 3377 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means 3378 // that the operand is not specified/fixed. 3379 if (SrcOpIdx1 != CommuteAnyOperandIndex && 3380 (SrcOpIdx1 < 1 || SrcOpIdx1 > RegOpsNum)) 3381 return false; 3382 if (SrcOpIdx2 != CommuteAnyOperandIndex && 3383 (SrcOpIdx2 < 1 || SrcOpIdx2 > RegOpsNum)) 3384 return false; 3385 3386 // Look for two different register operands assumed to be commutable 3387 // regardless of the FMA opcode. The FMA opcode is adjusted later. 3388 if (SrcOpIdx1 == CommuteAnyOperandIndex || 3389 SrcOpIdx2 == CommuteAnyOperandIndex) { 3390 unsigned CommutableOpIdx1 = SrcOpIdx1; 3391 unsigned CommutableOpIdx2 = SrcOpIdx2; 3392 3393 // At least one of operands to be commuted is not specified and 3394 // this method is free to choose appropriate commutable operands. 3395 if (SrcOpIdx1 == SrcOpIdx2) 3396 // Both of operands are not fixed. By default set one of commutable 3397 // operands to the last register operand of the instruction. 3398 CommutableOpIdx2 = RegOpsNum; 3399 else if (SrcOpIdx2 == CommuteAnyOperandIndex) 3400 // Only one of operands is not fixed. 3401 CommutableOpIdx2 = SrcOpIdx1; 3402 3403 // CommutableOpIdx2 is well defined now. Let's choose another commutable 3404 // operand and assign its index to CommutableOpIdx1. 3405 unsigned Op2Reg = MI->getOperand(CommutableOpIdx2).getReg(); 3406 for (CommutableOpIdx1 = RegOpsNum; CommutableOpIdx1 > 0; CommutableOpIdx1--) { 3407 // The commuted operands must have different registers. 3408 // Otherwise, the commute transformation does not change anything and 3409 // is useless then. 3410 if (Op2Reg != MI->getOperand(CommutableOpIdx1).getReg()) 3411 break; 3412 } 3413 3414 // No appropriate commutable operands were found. 3415 if (CommutableOpIdx1 == 0) 3416 return false; 3417 3418 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2 3419 // to return those values. 3420 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 3421 CommutableOpIdx1, CommutableOpIdx2)) 3422 return false; 3423 } 3424 3425 // Check if we can adjust the opcode to preserve the semantics when 3426 // commute the register operands. 3427 return getFMA3OpcodeToCommuteOperands(MI, SrcOpIdx1, SrcOpIdx2) != 0; 3428} 3429 3430unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI, 3431 unsigned SrcOpIdx1, 3432 unsigned SrcOpIdx2) const { 3433 unsigned Opc = MI->getOpcode(); 3434 3435 // Define the array that holds FMA opcodes in groups 3436 // of 3 opcodes(132, 213, 231) in each group. 3437 static const unsigned RegularOpcodeGroups[][3] = { 3438 { X86::VFMADDSSr132r, X86::VFMADDSSr213r, X86::VFMADDSSr231r }, 3439 { X86::VFMADDSDr132r, X86::VFMADDSDr213r, X86::VFMADDSDr231r }, 3440 { X86::VFMADDPSr132r, X86::VFMADDPSr213r, X86::VFMADDPSr231r }, 3441 { X86::VFMADDPDr132r, X86::VFMADDPDr213r, X86::VFMADDPDr231r }, 3442 { X86::VFMADDPSr132rY, X86::VFMADDPSr213rY, X86::VFMADDPSr231rY }, 3443 { X86::VFMADDPDr132rY, X86::VFMADDPDr213rY, X86::VFMADDPDr231rY }, 3444 { X86::VFMADDSSr132m, X86::VFMADDSSr213m, X86::VFMADDSSr231m }, 3445 { X86::VFMADDSDr132m, X86::VFMADDSDr213m, X86::VFMADDSDr231m }, 3446 { X86::VFMADDPSr132m, X86::VFMADDPSr213m, X86::VFMADDPSr231m }, 3447 { X86::VFMADDPDr132m, X86::VFMADDPDr213m, X86::VFMADDPDr231m }, 3448 { X86::VFMADDPSr132mY, X86::VFMADDPSr213mY, X86::VFMADDPSr231mY }, 3449 { X86::VFMADDPDr132mY, X86::VFMADDPDr213mY, X86::VFMADDPDr231mY }, 3450 3451 { X86::VFMSUBSSr132r, X86::VFMSUBSSr213r, X86::VFMSUBSSr231r }, 3452 { X86::VFMSUBSDr132r, X86::VFMSUBSDr213r, X86::VFMSUBSDr231r }, 3453 { X86::VFMSUBPSr132r, X86::VFMSUBPSr213r, X86::VFMSUBPSr231r }, 3454 { X86::VFMSUBPDr132r, X86::VFMSUBPDr213r, X86::VFMSUBPDr231r }, 3455 { X86::VFMSUBPSr132rY, X86::VFMSUBPSr213rY, X86::VFMSUBPSr231rY }, 3456 { X86::VFMSUBPDr132rY, X86::VFMSUBPDr213rY, X86::VFMSUBPDr231rY }, 3457 { X86::VFMSUBSSr132m, X86::VFMSUBSSr213m, X86::VFMSUBSSr231m }, 3458 { X86::VFMSUBSDr132m, X86::VFMSUBSDr213m, X86::VFMSUBSDr231m }, 3459 { X86::VFMSUBPSr132m, X86::VFMSUBPSr213m, X86::VFMSUBPSr231m }, 3460 { X86::VFMSUBPDr132m, X86::VFMSUBPDr213m, X86::VFMSUBPDr231m }, 3461 { X86::VFMSUBPSr132mY, X86::VFMSUBPSr213mY, X86::VFMSUBPSr231mY }, 3462 { X86::VFMSUBPDr132mY, X86::VFMSUBPDr213mY, X86::VFMSUBPDr231mY }, 3463 3464 { X86::VFNMADDSSr132r, X86::VFNMADDSSr213r, X86::VFNMADDSSr231r }, 3465 { X86::VFNMADDSDr132r, X86::VFNMADDSDr213r, X86::VFNMADDSDr231r }, 3466 { X86::VFNMADDPSr132r, X86::VFNMADDPSr213r, X86::VFNMADDPSr231r }, 3467 { X86::VFNMADDPDr132r, X86::VFNMADDPDr213r, X86::VFNMADDPDr231r }, 3468 { X86::VFNMADDPSr132rY, X86::VFNMADDPSr213rY, X86::VFNMADDPSr231rY }, 3469 { X86::VFNMADDPDr132rY, X86::VFNMADDPDr213rY, X86::VFNMADDPDr231rY }, 3470 { X86::VFNMADDSSr132m, X86::VFNMADDSSr213m, X86::VFNMADDSSr231m }, 3471 { X86::VFNMADDSDr132m, X86::VFNMADDSDr213m, X86::VFNMADDSDr231m }, 3472 { X86::VFNMADDPSr132m, X86::VFNMADDPSr213m, X86::VFNMADDPSr231m }, 3473 { X86::VFNMADDPDr132m, X86::VFNMADDPDr213m, X86::VFNMADDPDr231m }, 3474 { X86::VFNMADDPSr132mY, X86::VFNMADDPSr213mY, X86::VFNMADDPSr231mY }, 3475 { X86::VFNMADDPDr132mY, X86::VFNMADDPDr213mY, X86::VFNMADDPDr231mY }, 3476 3477 { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr213r, X86::VFNMSUBSSr231r }, 3478 { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr213r, X86::VFNMSUBSDr231r }, 3479 { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr213r, X86::VFNMSUBPSr231r }, 3480 { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr213r, X86::VFNMSUBPDr231r }, 3481 { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr231rY }, 3482 { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr231rY }, 3483 { X86::VFNMSUBSSr132m, X86::VFNMSUBSSr213m, X86::VFNMSUBSSr231m }, 3484 { X86::VFNMSUBSDr132m, X86::VFNMSUBSDr213m, X86::VFNMSUBSDr231m }, 3485 { X86::VFNMSUBPSr132m, X86::VFNMSUBPSr213m, X86::VFNMSUBPSr231m }, 3486 { X86::VFNMSUBPDr132m, X86::VFNMSUBPDr213m, X86::VFNMSUBPDr231m }, 3487 { X86::VFNMSUBPSr132mY, X86::VFNMSUBPSr213mY, X86::VFNMSUBPSr231mY }, 3488 { X86::VFNMSUBPDr132mY, X86::VFNMSUBPDr213mY, X86::VFNMSUBPDr231mY }, 3489 3490 { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr231r }, 3491 { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr231r }, 3492 { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr231rY }, 3493 { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr231rY }, 3494 { X86::VFMADDSUBPSr132m, X86::VFMADDSUBPSr213m, X86::VFMADDSUBPSr231m }, 3495 { X86::VFMADDSUBPDr132m, X86::VFMADDSUBPDr213m, X86::VFMADDSUBPDr231m }, 3496 { X86::VFMADDSUBPSr132mY, X86::VFMADDSUBPSr213mY, X86::VFMADDSUBPSr231mY }, 3497 { X86::VFMADDSUBPDr132mY, X86::VFMADDSUBPDr213mY, X86::VFMADDSUBPDr231mY }, 3498 3499 { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr231r }, 3500 { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr231r }, 3501 { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr231rY }, 3502 { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr231rY }, 3503 { X86::VFMSUBADDPSr132m, X86::VFMSUBADDPSr213m, X86::VFMSUBADDPSr231m }, 3504 { X86::VFMSUBADDPDr132m, X86::VFMSUBADDPDr213m, X86::VFMSUBADDPDr231m }, 3505 { X86::VFMSUBADDPSr132mY, X86::VFMSUBADDPSr213mY, X86::VFMSUBADDPSr231mY }, 3506 { X86::VFMSUBADDPDr132mY, X86::VFMSUBADDPDr213mY, X86::VFMSUBADDPDr231mY } 3507 }; 3508 3509 // Define the array that holds FMA*_Int opcodes in groups 3510 // of 3 opcodes(132, 213, 231) in each group. 3511 static const unsigned IntrinOpcodeGroups[][3] = { 3512 { X86::VFMADDSSr132r_Int, X86::VFMADDSSr213r_Int, X86::VFMADDSSr231r_Int }, 3513 { X86::VFMADDSDr132r_Int, X86::VFMADDSDr213r_Int, X86::VFMADDSDr231r_Int }, 3514 { X86::VFMADDSSr132m_Int, X86::VFMADDSSr213m_Int, X86::VFMADDSSr231m_Int }, 3515 { X86::VFMADDSDr132m_Int, X86::VFMADDSDr213m_Int, X86::VFMADDSDr231m_Int }, 3516 3517 { X86::VFMSUBSSr132r_Int, X86::VFMSUBSSr213r_Int, X86::VFMSUBSSr231r_Int }, 3518 { X86::VFMSUBSDr132r_Int, X86::VFMSUBSDr213r_Int, X86::VFMSUBSDr231r_Int }, 3519 { X86::VFMSUBSSr132m_Int, X86::VFMSUBSSr213m_Int, X86::VFMSUBSSr231m_Int }, 3520 { X86::VFMSUBSDr132m_Int, X86::VFMSUBSDr213m_Int, X86::VFMSUBSDr231m_Int }, 3521 3522 { X86::VFNMADDSSr132r_Int, X86::VFNMADDSSr213r_Int, X86::VFNMADDSSr231r_Int }, 3523 { X86::VFNMADDSDr132r_Int, X86::VFNMADDSDr213r_Int, X86::VFNMADDSDr231r_Int }, 3524 { X86::VFNMADDSSr132m_Int, X86::VFNMADDSSr213m_Int, X86::VFNMADDSSr231m_Int }, 3525 { X86::VFNMADDSDr132m_Int, X86::VFNMADDSDr213m_Int, X86::VFNMADDSDr231m_Int }, 3526 3527 { X86::VFNMSUBSSr132r_Int, X86::VFNMSUBSSr213r_Int, X86::VFNMSUBSSr231r_Int }, 3528 { X86::VFNMSUBSDr132r_Int, X86::VFNMSUBSDr213r_Int, X86::VFNMSUBSDr231r_Int }, 3529 { X86::VFNMSUBSSr132m_Int, X86::VFNMSUBSSr213m_Int, X86::VFNMSUBSSr231m_Int }, 3530 { X86::VFNMSUBSDr132m_Int, X86::VFNMSUBSDr213m_Int, X86::VFNMSUBSDr231m_Int }, 3531 }; 3532 3533 const unsigned Form132Index = 0; 3534 const unsigned Form213Index = 1; 3535 const unsigned Form231Index = 2; 3536 const unsigned FormsNum = 3; 3537 3538 bool IsIntrinOpcode; 3539 isFMA3(Opc, &IsIntrinOpcode); 3540 3541 size_t GroupsNum; 3542 const unsigned (*OpcodeGroups)[3]; 3543 if (IsIntrinOpcode) { 3544 GroupsNum = array_lengthof(IntrinOpcodeGroups); 3545 OpcodeGroups = IntrinOpcodeGroups; 3546 } else { 3547 GroupsNum = array_lengthof(RegularOpcodeGroups); 3548 OpcodeGroups = RegularOpcodeGroups; 3549 } 3550 3551 const unsigned *FoundOpcodesGroup = nullptr; 3552 size_t FormIndex; 3553 3554 // Look for the input opcode in the corresponding opcodes table. 3555 for (size_t GroupIndex = 0; GroupIndex < GroupsNum && !FoundOpcodesGroup; 3556 ++GroupIndex) { 3557 for (FormIndex = 0; FormIndex < FormsNum; ++FormIndex) { 3558 if (OpcodeGroups[GroupIndex][FormIndex] == Opc) { 3559 FoundOpcodesGroup = OpcodeGroups[GroupIndex]; 3560 break; 3561 } 3562 } 3563 } 3564 3565 // The input opcode does not match with any of the opcodes from the tables. 3566 // The unsupported FMA opcode must be added to one of the two opcode groups 3567 // defined above. 3568 assert(FoundOpcodesGroup != nullptr && "Unexpected FMA3 opcode"); 3569 3570 // Put the lowest index to SrcOpIdx1 to simplify the checks below. 3571 if (SrcOpIdx1 > SrcOpIdx2) 3572 std::swap(SrcOpIdx1, SrcOpIdx2); 3573 3574 // TODO: Commuting the 1st operand of FMA*_Int requires some additional 3575 // analysis. The commute optimization is legal only if all users of FMA*_Int 3576 // use only the lowest element of the FMA*_Int instruction. Such analysis are 3577 // not implemented yet. So, just return 0 in that case. 3578 // When such analysis are available this place will be the right place for 3579 // calling it. 3580 if (IsIntrinOpcode && SrcOpIdx1 == 1) 3581 return 0; 3582 3583 unsigned Case; 3584 if (SrcOpIdx1 == 1 && SrcOpIdx2 == 2) 3585 Case = 0; 3586 else if (SrcOpIdx1 == 1 && SrcOpIdx2 == 3) 3587 Case = 1; 3588 else if (SrcOpIdx1 == 2 && SrcOpIdx2 == 3) 3589 Case = 2; 3590 else 3591 return 0; 3592 3593 // Define the FMA forms mapping array that helps to map input FMA form 3594 // to output FMA form to preserve the operation semantics after 3595 // commuting the operands. 3596 static const unsigned FormMapping[][3] = { 3597 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2; 3598 // FMA132 A, C, b; ==> FMA231 C, A, b; 3599 // FMA213 B, A, c; ==> FMA213 A, B, c; 3600 // FMA231 C, A, b; ==> FMA132 A, C, b; 3601 { Form231Index, Form213Index, Form132Index }, 3602 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3; 3603 // FMA132 A, c, B; ==> FMA132 B, c, A; 3604 // FMA213 B, a, C; ==> FMA231 C, a, B; 3605 // FMA231 C, a, B; ==> FMA213 B, a, C; 3606 { Form132Index, Form231Index, Form213Index }, 3607 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3; 3608 // FMA132 a, C, B; ==> FMA213 a, B, C; 3609 // FMA213 b, A, C; ==> FMA132 b, C, A; 3610 // FMA231 c, A, B; ==> FMA231 c, B, A; 3611 { Form213Index, Form132Index, Form231Index } 3612 }; 3613 3614 // Everything is ready, just adjust the FMA opcode and return it. 3615 FormIndex = FormMapping[Case][FormIndex]; 3616 return FoundOpcodesGroup[FormIndex]; 3617} 3618 3619bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, 3620 unsigned &SrcOpIdx1, 3621 unsigned &SrcOpIdx2) const { 3622 switch (MI->getOpcode()) { 3623 case X86::CMPPDrri: 3624 case X86::CMPPSrri: 3625 case X86::VCMPPDrri: 3626 case X86::VCMPPSrri: 3627 case X86::VCMPPDYrri: 3628 case X86::VCMPPSYrri: { 3629 // Float comparison can be safely commuted for 3630 // Ordered/Unordered/Equal/NotEqual tests 3631 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3632 switch (Imm) { 3633 case 0x00: // EQUAL 3634 case 0x03: // UNORDERED 3635 case 0x04: // NOT EQUAL 3636 case 0x07: // ORDERED 3637 // The indices of the commutable operands are 1 and 2. 3638 // Assign them to the returned operand indices here. 3639 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2); 3640 } 3641 return false; 3642 } 3643 default: 3644 if (isFMA3(MI->getOpcode())) 3645 return findFMA3CommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 3646 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 3647 } 3648 return false; 3649} 3650 3651static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) { 3652 switch (BrOpc) { 3653 default: return X86::COND_INVALID; 3654 case X86::JE_1: return X86::COND_E; 3655 case X86::JNE_1: return X86::COND_NE; 3656 case X86::JL_1: return X86::COND_L; 3657 case X86::JLE_1: return X86::COND_LE; 3658 case X86::JG_1: return X86::COND_G; 3659 case X86::JGE_1: return X86::COND_GE; 3660 case X86::JB_1: return X86::COND_B; 3661 case X86::JBE_1: return X86::COND_BE; 3662 case X86::JA_1: return X86::COND_A; 3663 case X86::JAE_1: return X86::COND_AE; 3664 case X86::JS_1: return X86::COND_S; 3665 case X86::JNS_1: return X86::COND_NS; 3666 case X86::JP_1: return X86::COND_P; 3667 case X86::JNP_1: return X86::COND_NP; 3668 case X86::JO_1: return X86::COND_O; 3669 case X86::JNO_1: return X86::COND_NO; 3670 } 3671} 3672 3673/// Return condition code of a SET opcode. 3674static X86::CondCode getCondFromSETOpc(unsigned Opc) { 3675 switch (Opc) { 3676 default: return X86::COND_INVALID; 3677 case X86::SETAr: case X86::SETAm: return X86::COND_A; 3678 case X86::SETAEr: case X86::SETAEm: return X86::COND_AE; 3679 case X86::SETBr: case X86::SETBm: return X86::COND_B; 3680 case X86::SETBEr: case X86::SETBEm: return X86::COND_BE; 3681 case X86::SETEr: case X86::SETEm: return X86::COND_E; 3682 case X86::SETGr: case X86::SETGm: return X86::COND_G; 3683 case X86::SETGEr: case X86::SETGEm: return X86::COND_GE; 3684 case X86::SETLr: case X86::SETLm: return X86::COND_L; 3685 case X86::SETLEr: case X86::SETLEm: return X86::COND_LE; 3686 case X86::SETNEr: case X86::SETNEm: return X86::COND_NE; 3687 case X86::SETNOr: case X86::SETNOm: return X86::COND_NO; 3688 case X86::SETNPr: case X86::SETNPm: return X86::COND_NP; 3689 case X86::SETNSr: case X86::SETNSm: return X86::COND_NS; 3690 case X86::SETOr: case X86::SETOm: return X86::COND_O; 3691 case X86::SETPr: case X86::SETPm: return X86::COND_P; 3692 case X86::SETSr: case X86::SETSm: return X86::COND_S; 3693 } 3694} 3695 3696/// Return condition code of a CMov opcode. 3697X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) { 3698 switch (Opc) { 3699 default: return X86::COND_INVALID; 3700 case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm: 3701 case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr: 3702 return X86::COND_A; 3703 case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm: 3704 case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr: 3705 return X86::COND_AE; 3706 case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm: 3707 case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr: 3708 return X86::COND_B; 3709 case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm: 3710 case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr: 3711 return X86::COND_BE; 3712 case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm: 3713 case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr: 3714 return X86::COND_E; 3715 case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm: 3716 case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr: 3717 return X86::COND_G; 3718 case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm: 3719 case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr: 3720 return X86::COND_GE; 3721 case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm: 3722 case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr: 3723 return X86::COND_L; 3724 case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm: 3725 case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr: 3726 return X86::COND_LE; 3727 case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm: 3728 case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr: 3729 return X86::COND_NE; 3730 case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm: 3731 case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr: 3732 return X86::COND_NO; 3733 case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm: 3734 case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr: 3735 return X86::COND_NP; 3736 case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm: 3737 case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr: 3738 return X86::COND_NS; 3739 case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm: 3740 case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr: 3741 return X86::COND_O; 3742 case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm: 3743 case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr: 3744 return X86::COND_P; 3745 case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm: 3746 case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr: 3747 return X86::COND_S; 3748 } 3749} 3750 3751unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 3752 switch (CC) { 3753 default: llvm_unreachable("Illegal condition code!"); 3754 case X86::COND_E: return X86::JE_1; 3755 case X86::COND_NE: return X86::JNE_1; 3756 case X86::COND_L: return X86::JL_1; 3757 case X86::COND_LE: return X86::JLE_1; 3758 case X86::COND_G: return X86::JG_1; 3759 case X86::COND_GE: return X86::JGE_1; 3760 case X86::COND_B: return X86::JB_1; 3761 case X86::COND_BE: return X86::JBE_1; 3762 case X86::COND_A: return X86::JA_1; 3763 case X86::COND_AE: return X86::JAE_1; 3764 case X86::COND_S: return X86::JS_1; 3765 case X86::COND_NS: return X86::JNS_1; 3766 case X86::COND_P: return X86::JP_1; 3767 case X86::COND_NP: return X86::JNP_1; 3768 case X86::COND_O: return X86::JO_1; 3769 case X86::COND_NO: return X86::JNO_1; 3770 } 3771} 3772 3773/// Return the inverse of the specified condition, 3774/// e.g. turning COND_E to COND_NE. 3775X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 3776 switch (CC) { 3777 default: llvm_unreachable("Illegal condition code!"); 3778 case X86::COND_E: return X86::COND_NE; 3779 case X86::COND_NE: return X86::COND_E; 3780 case X86::COND_L: return X86::COND_GE; 3781 case X86::COND_LE: return X86::COND_G; 3782 case X86::COND_G: return X86::COND_LE; 3783 case X86::COND_GE: return X86::COND_L; 3784 case X86::COND_B: return X86::COND_AE; 3785 case X86::COND_BE: return X86::COND_A; 3786 case X86::COND_A: return X86::COND_BE; 3787 case X86::COND_AE: return X86::COND_B; 3788 case X86::COND_S: return X86::COND_NS; 3789 case X86::COND_NS: return X86::COND_S; 3790 case X86::COND_P: return X86::COND_NP; 3791 case X86::COND_NP: return X86::COND_P; 3792 case X86::COND_O: return X86::COND_NO; 3793 case X86::COND_NO: return X86::COND_O; 3794 } 3795} 3796 3797/// Assuming the flags are set by MI(a,b), return the condition code if we 3798/// modify the instructions such that flags are set by MI(b,a). 3799static X86::CondCode getSwappedCondition(X86::CondCode CC) { 3800 switch (CC) { 3801 default: return X86::COND_INVALID; 3802 case X86::COND_E: return X86::COND_E; 3803 case X86::COND_NE: return X86::COND_NE; 3804 case X86::COND_L: return X86::COND_G; 3805 case X86::COND_LE: return X86::COND_GE; 3806 case X86::COND_G: return X86::COND_L; 3807 case X86::COND_GE: return X86::COND_LE; 3808 case X86::COND_B: return X86::COND_A; 3809 case X86::COND_BE: return X86::COND_AE; 3810 case X86::COND_A: return X86::COND_B; 3811 case X86::COND_AE: return X86::COND_BE; 3812 } 3813} 3814 3815/// Return a set opcode for the given condition and 3816/// whether it has memory operand. 3817unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) { 3818 static const uint16_t Opc[16][2] = { 3819 { X86::SETAr, X86::SETAm }, 3820 { X86::SETAEr, X86::SETAEm }, 3821 { X86::SETBr, X86::SETBm }, 3822 { X86::SETBEr, X86::SETBEm }, 3823 { X86::SETEr, X86::SETEm }, 3824 { X86::SETGr, X86::SETGm }, 3825 { X86::SETGEr, X86::SETGEm }, 3826 { X86::SETLr, X86::SETLm }, 3827 { X86::SETLEr, X86::SETLEm }, 3828 { X86::SETNEr, X86::SETNEm }, 3829 { X86::SETNOr, X86::SETNOm }, 3830 { X86::SETNPr, X86::SETNPm }, 3831 { X86::SETNSr, X86::SETNSm }, 3832 { X86::SETOr, X86::SETOm }, 3833 { X86::SETPr, X86::SETPm }, 3834 { X86::SETSr, X86::SETSm } 3835 }; 3836 3837 assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes"); 3838 return Opc[CC][HasMemoryOperand ? 1 : 0]; 3839} 3840 3841/// Return a cmov opcode for the given condition, 3842/// register size in bytes, and operand type. 3843unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes, 3844 bool HasMemoryOperand) { 3845 static const uint16_t Opc[32][3] = { 3846 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr }, 3847 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr }, 3848 { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr }, 3849 { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr }, 3850 { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr }, 3851 { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr }, 3852 { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr }, 3853 { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr }, 3854 { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr }, 3855 { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr }, 3856 { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr }, 3857 { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr }, 3858 { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr }, 3859 { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr }, 3860 { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr }, 3861 { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr }, 3862 { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm }, 3863 { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm }, 3864 { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm }, 3865 { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm }, 3866 { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm }, 3867 { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm }, 3868 { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm }, 3869 { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm }, 3870 { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm }, 3871 { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm }, 3872 { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm }, 3873 { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm }, 3874 { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm }, 3875 { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm }, 3876 { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm }, 3877 { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm } 3878 }; 3879 3880 assert(CC < 16 && "Can only handle standard cond codes"); 3881 unsigned Idx = HasMemoryOperand ? 16+CC : CC; 3882 switch(RegBytes) { 3883 default: llvm_unreachable("Illegal register size!"); 3884 case 2: return Opc[Idx][0]; 3885 case 4: return Opc[Idx][1]; 3886 case 8: return Opc[Idx][2]; 3887 } 3888} 3889 3890bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 3891 if (!MI->isTerminator()) return false; 3892 3893 // Conditional branch is a special case. 3894 if (MI->isBranch() && !MI->isBarrier()) 3895 return true; 3896 if (!MI->isPredicable()) 3897 return true; 3898 return !isPredicated(MI); 3899} 3900 3901bool X86InstrInfo::AnalyzeBranchImpl( 3902 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, 3903 SmallVectorImpl<MachineOperand> &Cond, 3904 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { 3905 3906 // Start from the bottom of the block and work up, examining the 3907 // terminator instructions. 3908 MachineBasicBlock::iterator I = MBB.end(); 3909 MachineBasicBlock::iterator UnCondBrIter = MBB.end(); 3910 while (I != MBB.begin()) { 3911 --I; 3912 if (I->isDebugValue()) 3913 continue; 3914 3915 // Working from the bottom, when we see a non-terminator instruction, we're 3916 // done. 3917 if (!isUnpredicatedTerminator(I)) 3918 break; 3919 3920 // A terminator that isn't a branch can't easily be handled by this 3921 // analysis. 3922 if (!I->isBranch()) 3923 return true; 3924 3925 // Handle unconditional branches. 3926 if (I->getOpcode() == X86::JMP_1) { 3927 UnCondBrIter = I; 3928 3929 if (!AllowModify) { 3930 TBB = I->getOperand(0).getMBB(); 3931 continue; 3932 } 3933 3934 // If the block has any instructions after a JMP, delete them. 3935 while (std::next(I) != MBB.end()) 3936 std::next(I)->eraseFromParent(); 3937 3938 Cond.clear(); 3939 FBB = nullptr; 3940 3941 // Delete the JMP if it's equivalent to a fall-through. 3942 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 3943 TBB = nullptr; 3944 I->eraseFromParent(); 3945 I = MBB.end(); 3946 UnCondBrIter = MBB.end(); 3947 continue; 3948 } 3949 3950 // TBB is used to indicate the unconditional destination. 3951 TBB = I->getOperand(0).getMBB(); 3952 continue; 3953 } 3954 3955 // Handle conditional branches. 3956 X86::CondCode BranchCode = getCondFromBranchOpc(I->getOpcode()); 3957 if (BranchCode == X86::COND_INVALID) 3958 return true; // Can't handle indirect branch. 3959 3960 // Working from the bottom, handle the first conditional branch. 3961 if (Cond.empty()) { 3962 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); 3963 if (AllowModify && UnCondBrIter != MBB.end() && 3964 MBB.isLayoutSuccessor(TargetBB)) { 3965 // If we can modify the code and it ends in something like: 3966 // 3967 // jCC L1 3968 // jmp L2 3969 // L1: 3970 // ... 3971 // L2: 3972 // 3973 // Then we can change this to: 3974 // 3975 // jnCC L2 3976 // L1: 3977 // ... 3978 // L2: 3979 // 3980 // Which is a bit more efficient. 3981 // We conditionally jump to the fall-through block. 3982 BranchCode = GetOppositeBranchCondition(BranchCode); 3983 unsigned JNCC = GetCondBranchFromCond(BranchCode); 3984 MachineBasicBlock::iterator OldInst = I; 3985 3986 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) 3987 .addMBB(UnCondBrIter->getOperand(0).getMBB()); 3988 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) 3989 .addMBB(TargetBB); 3990 3991 OldInst->eraseFromParent(); 3992 UnCondBrIter->eraseFromParent(); 3993 3994 // Restart the analysis. 3995 UnCondBrIter = MBB.end(); 3996 I = MBB.end(); 3997 continue; 3998 } 3999 4000 FBB = TBB; 4001 TBB = I->getOperand(0).getMBB(); 4002 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 4003 CondBranches.push_back(I); 4004 continue; 4005 } 4006 4007 // Handle subsequent conditional branches. Only handle the case where all 4008 // conditional branches branch to the same destination and their condition 4009 // opcodes fit one of the special multi-branch idioms. 4010 assert(Cond.size() == 1); 4011 assert(TBB); 4012 4013 // Only handle the case where all conditional branches branch to the same 4014 // destination. 4015 if (TBB != I->getOperand(0).getMBB()) 4016 return true; 4017 4018 // If the conditions are the same, we can leave them alone. 4019 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 4020 if (OldBranchCode == BranchCode) 4021 continue; 4022 4023 // If they differ, see if they fit one of the known patterns. Theoretically, 4024 // we could handle more patterns here, but we shouldn't expect to see them 4025 // if instruction selection has done a reasonable job. 4026 if ((OldBranchCode == X86::COND_NP && 4027 BranchCode == X86::COND_E) || 4028 (OldBranchCode == X86::COND_E && 4029 BranchCode == X86::COND_NP)) 4030 BranchCode = X86::COND_NP_OR_E; 4031 else if ((OldBranchCode == X86::COND_P && 4032 BranchCode == X86::COND_NE) || 4033 (OldBranchCode == X86::COND_NE && 4034 BranchCode == X86::COND_P)) 4035 BranchCode = X86::COND_NE_OR_P; 4036 else 4037 return true; 4038 4039 // Update the MachineOperand. 4040 Cond[0].setImm(BranchCode); 4041 CondBranches.push_back(I); 4042 } 4043 4044 return false; 4045} 4046 4047bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 4048 MachineBasicBlock *&TBB, 4049 MachineBasicBlock *&FBB, 4050 SmallVectorImpl<MachineOperand> &Cond, 4051 bool AllowModify) const { 4052 SmallVector<MachineInstr *, 4> CondBranches; 4053 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); 4054} 4055 4056bool X86InstrInfo::AnalyzeBranchPredicate(MachineBasicBlock &MBB, 4057 MachineBranchPredicate &MBP, 4058 bool AllowModify) const { 4059 using namespace std::placeholders; 4060 4061 SmallVector<MachineOperand, 4> Cond; 4062 SmallVector<MachineInstr *, 4> CondBranches; 4063 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, 4064 AllowModify)) 4065 return true; 4066 4067 if (Cond.size() != 1) 4068 return true; 4069 4070 assert(MBP.TrueDest && "expected!"); 4071 4072 if (!MBP.FalseDest) 4073 MBP.FalseDest = MBB.getNextNode(); 4074 4075 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4076 4077 MachineInstr *ConditionDef = nullptr; 4078 bool SingleUseCondition = true; 4079 4080 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) { 4081 if (I->modifiesRegister(X86::EFLAGS, TRI)) { 4082 ConditionDef = &*I; 4083 break; 4084 } 4085 4086 if (I->readsRegister(X86::EFLAGS, TRI)) 4087 SingleUseCondition = false; 4088 } 4089 4090 if (!ConditionDef) 4091 return true; 4092 4093 if (SingleUseCondition) { 4094 for (auto *Succ : MBB.successors()) 4095 if (Succ->isLiveIn(X86::EFLAGS)) 4096 SingleUseCondition = false; 4097 } 4098 4099 MBP.ConditionDef = ConditionDef; 4100 MBP.SingleUseCondition = SingleUseCondition; 4101 4102 // Currently we only recognize the simple pattern: 4103 // 4104 // test %reg, %reg 4105 // je %label 4106 // 4107 const unsigned TestOpcode = 4108 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; 4109 4110 if (ConditionDef->getOpcode() == TestOpcode && 4111 ConditionDef->getNumOperands() == 3 && 4112 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && 4113 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { 4114 MBP.LHS = ConditionDef->getOperand(0); 4115 MBP.RHS = MachineOperand::CreateImm(0); 4116 MBP.Predicate = Cond[0].getImm() == X86::COND_NE 4117 ? MachineBranchPredicate::PRED_NE 4118 : MachineBranchPredicate::PRED_EQ; 4119 return false; 4120 } 4121 4122 return true; 4123} 4124 4125unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 4126 MachineBasicBlock::iterator I = MBB.end(); 4127 unsigned Count = 0; 4128 4129 while (I != MBB.begin()) { 4130 --I; 4131 if (I->isDebugValue()) 4132 continue; 4133 if (I->getOpcode() != X86::JMP_1 && 4134 getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 4135 break; 4136 // Remove the branch. 4137 I->eraseFromParent(); 4138 I = MBB.end(); 4139 ++Count; 4140 } 4141 4142 return Count; 4143} 4144 4145unsigned 4146X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 4147 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, 4148 DebugLoc DL) const { 4149 // Shouldn't be a fall through. 4150 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 4151 assert((Cond.size() == 1 || Cond.size() == 0) && 4152 "X86 branch conditions have one component!"); 4153 4154 if (Cond.empty()) { 4155 // Unconditional branch? 4156 assert(!FBB && "Unconditional branch with multiple successors!"); 4157 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); 4158 return 1; 4159 } 4160 4161 // Conditional branch. 4162 unsigned Count = 0; 4163 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 4164 switch (CC) { 4165 case X86::COND_NP_OR_E: 4166 // Synthesize NP_OR_E with two branches. 4167 BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB); 4168 ++Count; 4169 BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB); 4170 ++Count; 4171 break; 4172 case X86::COND_NE_OR_P: 4173 // Synthesize NE_OR_P with two branches. 4174 BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB); 4175 ++Count; 4176 BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB); 4177 ++Count; 4178 break; 4179 default: { 4180 unsigned Opc = GetCondBranchFromCond(CC); 4181 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); 4182 ++Count; 4183 } 4184 } 4185 if (FBB) { 4186 // Two-way Conditional branch. Insert the second branch. 4187 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); 4188 ++Count; 4189 } 4190 return Count; 4191} 4192 4193bool X86InstrInfo:: 4194canInsertSelect(const MachineBasicBlock &MBB, 4195 ArrayRef<MachineOperand> Cond, 4196 unsigned TrueReg, unsigned FalseReg, 4197 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 4198 // Not all subtargets have cmov instructions. 4199 if (!Subtarget.hasCMov()) 4200 return false; 4201 if (Cond.size() != 1) 4202 return false; 4203 // We cannot do the composite conditions, at least not in SSA form. 4204 if ((X86::CondCode)Cond[0].getImm() > X86::COND_S) 4205 return false; 4206 4207 // Check register classes. 4208 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4209 const TargetRegisterClass *RC = 4210 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 4211 if (!RC) 4212 return false; 4213 4214 // We have cmov instructions for 16, 32, and 64 bit general purpose registers. 4215 if (X86::GR16RegClass.hasSubClassEq(RC) || 4216 X86::GR32RegClass.hasSubClassEq(RC) || 4217 X86::GR64RegClass.hasSubClassEq(RC)) { 4218 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy 4219 // Bridge. Probably Ivy Bridge as well. 4220 CondCycles = 2; 4221 TrueCycles = 2; 4222 FalseCycles = 2; 4223 return true; 4224 } 4225 4226 // Can't do vectors. 4227 return false; 4228} 4229 4230void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, 4231 MachineBasicBlock::iterator I, DebugLoc DL, 4232 unsigned DstReg, ArrayRef<MachineOperand> Cond, 4233 unsigned TrueReg, unsigned FalseReg) const { 4234 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4235 assert(Cond.size() == 1 && "Invalid Cond array"); 4236 unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(), 4237 MRI.getRegClass(DstReg)->getSize(), 4238 false/*HasMemoryOperand*/); 4239 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg); 4240} 4241 4242/// Test if the given register is a physical h register. 4243static bool isHReg(unsigned Reg) { 4244 return X86::GR8_ABCD_HRegClass.contains(Reg); 4245} 4246 4247// Try and copy between VR128/VR64 and GR64 registers. 4248static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, 4249 const X86Subtarget &Subtarget) { 4250 4251 // SrcReg(VR128) -> DestReg(GR64) 4252 // SrcReg(VR64) -> DestReg(GR64) 4253 // SrcReg(GR64) -> DestReg(VR128) 4254 // SrcReg(GR64) -> DestReg(VR64) 4255 4256 bool HasAVX = Subtarget.hasAVX(); 4257 bool HasAVX512 = Subtarget.hasAVX512(); 4258 if (X86::GR64RegClass.contains(DestReg)) { 4259 if (X86::VR128XRegClass.contains(SrcReg)) 4260 // Copy from a VR128 register to a GR64 register. 4261 return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr : 4262 X86::MOVPQIto64rr); 4263 if (X86::VR64RegClass.contains(SrcReg)) 4264 // Copy from a VR64 register to a GR64 register. 4265 return X86::MMX_MOVD64from64rr; 4266 } else if (X86::GR64RegClass.contains(SrcReg)) { 4267 // Copy from a GR64 register to a VR128 register. 4268 if (X86::VR128XRegClass.contains(DestReg)) 4269 return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr : 4270 X86::MOV64toPQIrr); 4271 // Copy from a GR64 register to a VR64 register. 4272 if (X86::VR64RegClass.contains(DestReg)) 4273 return X86::MMX_MOVD64to64rr; 4274 } 4275 4276 // SrcReg(FR32) -> DestReg(GR32) 4277 // SrcReg(GR32) -> DestReg(FR32) 4278 4279 if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg)) 4280 // Copy from a FR32 register to a GR32 register. 4281 return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr); 4282 4283 if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg)) 4284 // Copy from a GR32 register to a FR32 register. 4285 return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr); 4286 return 0; 4287} 4288 4289static bool MaskRegClassContains(unsigned Reg) { 4290 return X86::VK8RegClass.contains(Reg) || 4291 X86::VK16RegClass.contains(Reg) || 4292 X86::VK32RegClass.contains(Reg) || 4293 X86::VK64RegClass.contains(Reg) || 4294 X86::VK1RegClass.contains(Reg); 4295} 4296 4297static bool GRRegClassContains(unsigned Reg) { 4298 return X86::GR64RegClass.contains(Reg) || 4299 X86::GR32RegClass.contains(Reg) || 4300 X86::GR16RegClass.contains(Reg) || 4301 X86::GR8RegClass.contains(Reg); 4302} 4303static 4304unsigned copyPhysRegOpcode_AVX512_DQ(unsigned& DestReg, unsigned& SrcReg) { 4305 if (MaskRegClassContains(SrcReg) && X86::GR8RegClass.contains(DestReg)) { 4306 DestReg = getX86SubSuperRegister(DestReg, 32); 4307 return X86::KMOVBrk; 4308 } 4309 if (MaskRegClassContains(DestReg) && X86::GR8RegClass.contains(SrcReg)) { 4310 SrcReg = getX86SubSuperRegister(SrcReg, 32); 4311 return X86::KMOVBkr; 4312 } 4313 return 0; 4314} 4315 4316static 4317unsigned copyPhysRegOpcode_AVX512_BW(unsigned& DestReg, unsigned& SrcReg) { 4318 if (MaskRegClassContains(SrcReg) && MaskRegClassContains(DestReg)) 4319 return X86::KMOVQkk; 4320 if (MaskRegClassContains(SrcReg) && X86::GR32RegClass.contains(DestReg)) 4321 return X86::KMOVDrk; 4322 if (MaskRegClassContains(SrcReg) && X86::GR64RegClass.contains(DestReg)) 4323 return X86::KMOVQrk; 4324 if (MaskRegClassContains(DestReg) && X86::GR32RegClass.contains(SrcReg)) 4325 return X86::KMOVDkr; 4326 if (MaskRegClassContains(DestReg) && X86::GR64RegClass.contains(SrcReg)) 4327 return X86::KMOVQkr; 4328 return 0; 4329} 4330 4331static 4332unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg, 4333 const X86Subtarget &Subtarget) 4334{ 4335 if (Subtarget.hasDQI()) 4336 if (auto Opc = copyPhysRegOpcode_AVX512_DQ(DestReg, SrcReg)) 4337 return Opc; 4338 if (Subtarget.hasBWI()) 4339 if (auto Opc = copyPhysRegOpcode_AVX512_BW(DestReg, SrcReg)) 4340 return Opc; 4341 if (X86::VR128XRegClass.contains(DestReg, SrcReg) || 4342 X86::VR256XRegClass.contains(DestReg, SrcReg) || 4343 X86::VR512RegClass.contains(DestReg, SrcReg)) { 4344 DestReg = get512BitSuperRegister(DestReg); 4345 SrcReg = get512BitSuperRegister(SrcReg); 4346 return X86::VMOVAPSZrr; 4347 } 4348 if (MaskRegClassContains(DestReg) && MaskRegClassContains(SrcReg)) 4349 return X86::KMOVWkk; 4350 if (MaskRegClassContains(DestReg) && GRRegClassContains(SrcReg)) { 4351 SrcReg = getX86SubSuperRegister(SrcReg, 32); 4352 return X86::KMOVWkr; 4353 } 4354 if (GRRegClassContains(DestReg) && MaskRegClassContains(SrcReg)) { 4355 DestReg = getX86SubSuperRegister(DestReg, 32); 4356 return X86::KMOVWrk; 4357 } 4358 return 0; 4359} 4360 4361void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 4362 MachineBasicBlock::iterator MI, DebugLoc DL, 4363 unsigned DestReg, unsigned SrcReg, 4364 bool KillSrc) const { 4365 // First deal with the normal symmetric copies. 4366 bool HasAVX = Subtarget.hasAVX(); 4367 bool HasAVX512 = Subtarget.hasAVX512(); 4368 unsigned Opc = 0; 4369 if (X86::GR64RegClass.contains(DestReg, SrcReg)) 4370 Opc = X86::MOV64rr; 4371 else if (X86::GR32RegClass.contains(DestReg, SrcReg)) 4372 Opc = X86::MOV32rr; 4373 else if (X86::GR16RegClass.contains(DestReg, SrcReg)) 4374 Opc = X86::MOV16rr; 4375 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { 4376 // Copying to or from a physical H register on x86-64 requires a NOREX 4377 // move. Otherwise use a normal move. 4378 if ((isHReg(DestReg) || isHReg(SrcReg)) && 4379 Subtarget.is64Bit()) { 4380 Opc = X86::MOV8rr_NOREX; 4381 // Both operands must be encodable without an REX prefix. 4382 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && 4383 "8-bit H register can not be copied outside GR8_NOREX"); 4384 } else 4385 Opc = X86::MOV8rr; 4386 } 4387 else if (X86::VR64RegClass.contains(DestReg, SrcReg)) 4388 Opc = X86::MMX_MOVQ64rr; 4389 else if (HasAVX512) 4390 Opc = copyPhysRegOpcode_AVX512(DestReg, SrcReg, Subtarget); 4391 else if (X86::VR128RegClass.contains(DestReg, SrcReg)) 4392 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; 4393 else if (X86::VR256RegClass.contains(DestReg, SrcReg)) 4394 Opc = X86::VMOVAPSYrr; 4395 if (!Opc) 4396 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); 4397 4398 if (Opc) { 4399 BuildMI(MBB, MI, DL, get(Opc), DestReg) 4400 .addReg(SrcReg, getKillRegState(KillSrc)); 4401 return; 4402 } 4403 4404 bool FromEFLAGS = SrcReg == X86::EFLAGS; 4405 bool ToEFLAGS = DestReg == X86::EFLAGS; 4406 int Reg = FromEFLAGS ? DestReg : SrcReg; 4407 bool is32 = X86::GR32RegClass.contains(Reg); 4408 bool is64 = X86::GR64RegClass.contains(Reg); 4409 4410 if ((FromEFLAGS || ToEFLAGS) && (is32 || is64)) { 4411 int Mov = is64 ? X86::MOV64rr : X86::MOV32rr; 4412 int Push = is64 ? X86::PUSH64r : X86::PUSH32r; 4413 int PushF = is64 ? X86::PUSHF64 : X86::PUSHF32; 4414 int Pop = is64 ? X86::POP64r : X86::POP32r; 4415 int PopF = is64 ? X86::POPF64 : X86::POPF32; 4416 int AX = is64 ? X86::RAX : X86::EAX; 4417 4418 if (!Subtarget.hasLAHFSAHF()) { 4419 assert(Subtarget.is64Bit() && 4420 "Not having LAHF/SAHF only happens on 64-bit."); 4421 // Moving EFLAGS to / from another register requires a push and a pop. 4422 // Notice that we have to adjust the stack if we don't want to clobber the 4423 // first frame index. See X86FrameLowering.cpp - usesTheStack. 4424 if (FromEFLAGS) { 4425 BuildMI(MBB, MI, DL, get(PushF)); 4426 BuildMI(MBB, MI, DL, get(Pop), DestReg); 4427 } 4428 if (ToEFLAGS) { 4429 BuildMI(MBB, MI, DL, get(Push)) 4430 .addReg(SrcReg, getKillRegState(KillSrc)); 4431 BuildMI(MBB, MI, DL, get(PopF)); 4432 } 4433 return; 4434 } 4435 4436 // The flags need to be saved, but saving EFLAGS with PUSHF/POPF is 4437 // inefficient. Instead: 4438 // - Save the overflow flag OF into AL using SETO, and restore it using a 4439 // signed 8-bit addition of AL and INT8_MAX. 4440 // - Save/restore the bottom 8 EFLAGS bits (CF, PF, AF, ZF, SF) to/from AH 4441 // using LAHF/SAHF. 4442 // - When RAX/EAX is live and isn't the destination register, make sure it 4443 // isn't clobbered by PUSH/POP'ing it before and after saving/restoring 4444 // the flags. 4445 // This approach is ~2.25x faster than using PUSHF/POPF. 4446 // 4447 // This is still somewhat inefficient because we don't know which flags are 4448 // actually live inside EFLAGS. Were we able to do a single SETcc instead of 4449 // SETO+LAHF / ADDB+SAHF the code could be 1.02x faster. 4450 // 4451 // PUSHF/POPF is also potentially incorrect because it affects other flags 4452 // such as TF/IF/DF, which LLVM doesn't model. 4453 // 4454 // Notice that we have to adjust the stack if we don't want to clobber the 4455 // first frame index. 4456 // See X86ISelLowering.cpp - X86::hasCopyImplyingStackAdjustment. 4457 4458 4459 bool AXDead = (Reg == AX) || 4460 (MachineBasicBlock::LQR_Dead == 4461 MBB.computeRegisterLiveness(&getRegisterInfo(), AX, MI)); 4462 if (!AXDead) { 4463 // FIXME: If computeRegisterLiveness() reported LQR_Unknown then AX may 4464 // actually be dead. This is not a problem for correctness as we are just 4465 // (unnecessarily) saving+restoring a dead register. However the 4466 // MachineVerifier expects operands that read from dead registers 4467 // to be marked with the "undef" flag. 4468 // An example of this can be found in 4469 // test/CodeGen/X86/peephole-na-phys-copy-folding.ll and 4470 // test/CodeGen/X86/cmpxchg-clobber-flags.ll when using 4471 // -verify-machineinstrs. 4472 BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true)); 4473 } 4474 if (FromEFLAGS) { 4475 BuildMI(MBB, MI, DL, get(X86::SETOr), X86::AL); 4476 BuildMI(MBB, MI, DL, get(X86::LAHF)); 4477 BuildMI(MBB, MI, DL, get(Mov), Reg).addReg(AX); 4478 } 4479 if (ToEFLAGS) { 4480 BuildMI(MBB, MI, DL, get(Mov), AX).addReg(Reg, getKillRegState(KillSrc)); 4481 BuildMI(MBB, MI, DL, get(X86::ADD8ri), X86::AL) 4482 .addReg(X86::AL) 4483 .addImm(INT8_MAX); 4484 BuildMI(MBB, MI, DL, get(X86::SAHF)); 4485 } 4486 if (!AXDead) 4487 BuildMI(MBB, MI, DL, get(Pop), AX); 4488 return; 4489 } 4490 4491 DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) 4492 << " to " << RI.getName(DestReg) << '\n'); 4493 llvm_unreachable("Cannot emit physreg copy instruction"); 4494} 4495 4496static unsigned getLoadStoreRegOpcode(unsigned Reg, 4497 const TargetRegisterClass *RC, 4498 bool isStackAligned, 4499 const X86Subtarget &STI, 4500 bool load) { 4501 if (STI.hasAVX512()) { 4502 if (X86::VK8RegClass.hasSubClassEq(RC) || 4503 X86::VK16RegClass.hasSubClassEq(RC)) 4504 return load ? X86::KMOVWkm : X86::KMOVWmk; 4505 if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC)) 4506 return load ? X86::VMOVSSZrm : X86::VMOVSSZmr; 4507 if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC)) 4508 return load ? X86::VMOVSDZrm : X86::VMOVSDZmr; 4509 if (X86::VR512RegClass.hasSubClassEq(RC)) 4510 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 4511 } 4512 4513 bool HasAVX = STI.hasAVX(); 4514 switch (RC->getSize()) { 4515 default: 4516 llvm_unreachable("Unknown spill size"); 4517 case 1: 4518 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); 4519 if (STI.is64Bit()) 4520 // Copying to or from a physical H register on x86-64 requires a NOREX 4521 // move. Otherwise use a normal move. 4522 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) 4523 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; 4524 return load ? X86::MOV8rm : X86::MOV8mr; 4525 case 2: 4526 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); 4527 return load ? X86::MOV16rm : X86::MOV16mr; 4528 case 4: 4529 if (X86::GR32RegClass.hasSubClassEq(RC)) 4530 return load ? X86::MOV32rm : X86::MOV32mr; 4531 if (X86::FR32RegClass.hasSubClassEq(RC)) 4532 return load ? 4533 (HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) : 4534 (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr); 4535 if (X86::RFP32RegClass.hasSubClassEq(RC)) 4536 return load ? X86::LD_Fp32m : X86::ST_Fp32m; 4537 llvm_unreachable("Unknown 4-byte regclass"); 4538 case 8: 4539 if (X86::GR64RegClass.hasSubClassEq(RC)) 4540 return load ? X86::MOV64rm : X86::MOV64mr; 4541 if (X86::FR64RegClass.hasSubClassEq(RC)) 4542 return load ? 4543 (HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) : 4544 (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); 4545 if (X86::VR64RegClass.hasSubClassEq(RC)) 4546 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; 4547 if (X86::RFP64RegClass.hasSubClassEq(RC)) 4548 return load ? X86::LD_Fp64m : X86::ST_Fp64m; 4549 llvm_unreachable("Unknown 8-byte regclass"); 4550 case 10: 4551 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); 4552 return load ? X86::LD_Fp80m : X86::ST_FpP80m; 4553 case 16: { 4554 assert((X86::VR128RegClass.hasSubClassEq(RC) || 4555 X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass"); 4556 // If stack is realigned we can use aligned stores. 4557 if (isStackAligned) 4558 return load ? 4559 (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) : 4560 (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); 4561 else 4562 return load ? 4563 (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) : 4564 (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); 4565 } 4566 case 32: 4567 assert((X86::VR256RegClass.hasSubClassEq(RC) || 4568 X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass"); 4569 // If stack is realigned we can use aligned stores. 4570 if (isStackAligned) 4571 return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr; 4572 else 4573 return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr; 4574 case 64: 4575 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); 4576 if (isStackAligned) 4577 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; 4578 else 4579 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 4580 } 4581} 4582 4583bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg, 4584 unsigned &Offset, 4585 const TargetRegisterInfo *TRI) const { 4586 const MCInstrDesc &Desc = MemOp->getDesc(); 4587 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode()); 4588 if (MemRefBegin < 0) 4589 return false; 4590 4591 MemRefBegin += X86II::getOperandBias(Desc); 4592 4593 BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg(); 4594 if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) 4595 return false; 4596 4597 if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != 4598 X86::NoRegister) 4599 return false; 4600 4601 const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp); 4602 4603 // Displacement can be symbolic 4604 if (!DispMO.isImm()) 4605 return false; 4606 4607 Offset = DispMO.getImm(); 4608 4609 return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() == 4610 X86::NoRegister); 4611} 4612 4613static unsigned getStoreRegOpcode(unsigned SrcReg, 4614 const TargetRegisterClass *RC, 4615 bool isStackAligned, 4616 const X86Subtarget &STI) { 4617 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); 4618} 4619 4620 4621static unsigned getLoadRegOpcode(unsigned DestReg, 4622 const TargetRegisterClass *RC, 4623 bool isStackAligned, 4624 const X86Subtarget &STI) { 4625 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); 4626} 4627 4628void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 4629 MachineBasicBlock::iterator MI, 4630 unsigned SrcReg, bool isKill, int FrameIdx, 4631 const TargetRegisterClass *RC, 4632 const TargetRegisterInfo *TRI) const { 4633 const MachineFunction &MF = *MBB.getParent(); 4634 assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() && 4635 "Stack slot too small for store"); 4636 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4637 bool isAligned = 4638 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 4639 RI.canRealignStack(MF); 4640 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 4641 DebugLoc DL = MBB.findDebugLoc(MI); 4642 addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx) 4643 .addReg(SrcReg, getKillRegState(isKill)); 4644} 4645 4646void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 4647 bool isKill, 4648 SmallVectorImpl<MachineOperand> &Addr, 4649 const TargetRegisterClass *RC, 4650 MachineInstr::mmo_iterator MMOBegin, 4651 MachineInstr::mmo_iterator MMOEnd, 4652 SmallVectorImpl<MachineInstr*> &NewMIs) const { 4653 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4654 bool isAligned = MMOBegin != MMOEnd && 4655 (*MMOBegin)->getAlignment() >= Alignment; 4656 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 4657 DebugLoc DL; 4658 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); 4659 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 4660 MIB.addOperand(Addr[i]); 4661 MIB.addReg(SrcReg, getKillRegState(isKill)); 4662 (*MIB).setMemRefs(MMOBegin, MMOEnd); 4663 NewMIs.push_back(MIB); 4664} 4665 4666 4667void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 4668 MachineBasicBlock::iterator MI, 4669 unsigned DestReg, int FrameIdx, 4670 const TargetRegisterClass *RC, 4671 const TargetRegisterInfo *TRI) const { 4672 const MachineFunction &MF = *MBB.getParent(); 4673 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4674 bool isAligned = 4675 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 4676 RI.canRealignStack(MF); 4677 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 4678 DebugLoc DL = MBB.findDebugLoc(MI); 4679 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); 4680} 4681 4682void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 4683 SmallVectorImpl<MachineOperand> &Addr, 4684 const TargetRegisterClass *RC, 4685 MachineInstr::mmo_iterator MMOBegin, 4686 MachineInstr::mmo_iterator MMOEnd, 4687 SmallVectorImpl<MachineInstr*> &NewMIs) const { 4688 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4689 bool isAligned = MMOBegin != MMOEnd && 4690 (*MMOBegin)->getAlignment() >= Alignment; 4691 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 4692 DebugLoc DL; 4693 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 4694 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 4695 MIB.addOperand(Addr[i]); 4696 (*MIB).setMemRefs(MMOBegin, MMOEnd); 4697 NewMIs.push_back(MIB); 4698} 4699 4700bool X86InstrInfo:: 4701analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2, 4702 int &CmpMask, int &CmpValue) const { 4703 switch (MI->getOpcode()) { 4704 default: break; 4705 case X86::CMP64ri32: 4706 case X86::CMP64ri8: 4707 case X86::CMP32ri: 4708 case X86::CMP32ri8: 4709 case X86::CMP16ri: 4710 case X86::CMP16ri8: 4711 case X86::CMP8ri: 4712 SrcReg = MI->getOperand(0).getReg(); 4713 SrcReg2 = 0; 4714 CmpMask = ~0; 4715 CmpValue = MI->getOperand(1).getImm(); 4716 return true; 4717 // A SUB can be used to perform comparison. 4718 case X86::SUB64rm: 4719 case X86::SUB32rm: 4720 case X86::SUB16rm: 4721 case X86::SUB8rm: 4722 SrcReg = MI->getOperand(1).getReg(); 4723 SrcReg2 = 0; 4724 CmpMask = ~0; 4725 CmpValue = 0; 4726 return true; 4727 case X86::SUB64rr: 4728 case X86::SUB32rr: 4729 case X86::SUB16rr: 4730 case X86::SUB8rr: 4731 SrcReg = MI->getOperand(1).getReg(); 4732 SrcReg2 = MI->getOperand(2).getReg(); 4733 CmpMask = ~0; 4734 CmpValue = 0; 4735 return true; 4736 case X86::SUB64ri32: 4737 case X86::SUB64ri8: 4738 case X86::SUB32ri: 4739 case X86::SUB32ri8: 4740 case X86::SUB16ri: 4741 case X86::SUB16ri8: 4742 case X86::SUB8ri: 4743 SrcReg = MI->getOperand(1).getReg(); 4744 SrcReg2 = 0; 4745 CmpMask = ~0; 4746 CmpValue = MI->getOperand(2).getImm(); 4747 return true; 4748 case X86::CMP64rr: 4749 case X86::CMP32rr: 4750 case X86::CMP16rr: 4751 case X86::CMP8rr: 4752 SrcReg = MI->getOperand(0).getReg(); 4753 SrcReg2 = MI->getOperand(1).getReg(); 4754 CmpMask = ~0; 4755 CmpValue = 0; 4756 return true; 4757 case X86::TEST8rr: 4758 case X86::TEST16rr: 4759 case X86::TEST32rr: 4760 case X86::TEST64rr: 4761 SrcReg = MI->getOperand(0).getReg(); 4762 if (MI->getOperand(1).getReg() != SrcReg) return false; 4763 // Compare against zero. 4764 SrcReg2 = 0; 4765 CmpMask = ~0; 4766 CmpValue = 0; 4767 return true; 4768 } 4769 return false; 4770} 4771 4772/// Check whether the first instruction, whose only 4773/// purpose is to update flags, can be made redundant. 4774/// CMPrr can be made redundant by SUBrr if the operands are the same. 4775/// This function can be extended later on. 4776/// SrcReg, SrcRegs: register operands for FlagI. 4777/// ImmValue: immediate for FlagI if it takes an immediate. 4778inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg, 4779 unsigned SrcReg2, int ImmValue, 4780 MachineInstr *OI) { 4781 if (((FlagI->getOpcode() == X86::CMP64rr && 4782 OI->getOpcode() == X86::SUB64rr) || 4783 (FlagI->getOpcode() == X86::CMP32rr && 4784 OI->getOpcode() == X86::SUB32rr)|| 4785 (FlagI->getOpcode() == X86::CMP16rr && 4786 OI->getOpcode() == X86::SUB16rr)|| 4787 (FlagI->getOpcode() == X86::CMP8rr && 4788 OI->getOpcode() == X86::SUB8rr)) && 4789 ((OI->getOperand(1).getReg() == SrcReg && 4790 OI->getOperand(2).getReg() == SrcReg2) || 4791 (OI->getOperand(1).getReg() == SrcReg2 && 4792 OI->getOperand(2).getReg() == SrcReg))) 4793 return true; 4794 4795 if (((FlagI->getOpcode() == X86::CMP64ri32 && 4796 OI->getOpcode() == X86::SUB64ri32) || 4797 (FlagI->getOpcode() == X86::CMP64ri8 && 4798 OI->getOpcode() == X86::SUB64ri8) || 4799 (FlagI->getOpcode() == X86::CMP32ri && 4800 OI->getOpcode() == X86::SUB32ri) || 4801 (FlagI->getOpcode() == X86::CMP32ri8 && 4802 OI->getOpcode() == X86::SUB32ri8) || 4803 (FlagI->getOpcode() == X86::CMP16ri && 4804 OI->getOpcode() == X86::SUB16ri) || 4805 (FlagI->getOpcode() == X86::CMP16ri8 && 4806 OI->getOpcode() == X86::SUB16ri8) || 4807 (FlagI->getOpcode() == X86::CMP8ri && 4808 OI->getOpcode() == X86::SUB8ri)) && 4809 OI->getOperand(1).getReg() == SrcReg && 4810 OI->getOperand(2).getImm() == ImmValue) 4811 return true; 4812 return false; 4813} 4814 4815/// Check whether the definition can be converted 4816/// to remove a comparison against zero. 4817inline static bool isDefConvertible(MachineInstr *MI) { 4818 switch (MI->getOpcode()) { 4819 default: return false; 4820 4821 // The shift instructions only modify ZF if their shift count is non-zero. 4822 // N.B.: The processor truncates the shift count depending on the encoding. 4823 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: 4824 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: 4825 return getTruncatedShiftCount(MI, 2) != 0; 4826 4827 // Some left shift instructions can be turned into LEA instructions but only 4828 // if their flags aren't used. Avoid transforming such instructions. 4829 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ 4830 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 4831 if (isTruncatedShiftCountForLEA(ShAmt)) return false; 4832 return ShAmt != 0; 4833 } 4834 4835 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: 4836 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: 4837 return getTruncatedShiftCount(MI, 3) != 0; 4838 4839 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: 4840 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: 4841 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: 4842 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: 4843 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: 4844 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: 4845 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: 4846 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: 4847 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: 4848 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: 4849 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: 4850 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: 4851 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: 4852 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: 4853 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: 4854 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: 4855 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: 4856 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: 4857 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: 4858 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: 4859 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: 4860 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: 4861 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: 4862 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: 4863 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: 4864 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: 4865 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: 4866 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: 4867 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: 4868 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: 4869 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: 4870 case X86::ADC32ri: case X86::ADC32ri8: 4871 case X86::ADC32rr: case X86::ADC64ri32: 4872 case X86::ADC64ri8: case X86::ADC64rr: 4873 case X86::SBB32ri: case X86::SBB32ri8: 4874 case X86::SBB32rr: case X86::SBB64ri32: 4875 case X86::SBB64ri8: case X86::SBB64rr: 4876 case X86::ANDN32rr: case X86::ANDN32rm: 4877 case X86::ANDN64rr: case X86::ANDN64rm: 4878 case X86::BEXTR32rr: case X86::BEXTR64rr: 4879 case X86::BEXTR32rm: case X86::BEXTR64rm: 4880 case X86::BLSI32rr: case X86::BLSI32rm: 4881 case X86::BLSI64rr: case X86::BLSI64rm: 4882 case X86::BLSMSK32rr:case X86::BLSMSK32rm: 4883 case X86::BLSMSK64rr:case X86::BLSMSK64rm: 4884 case X86::BLSR32rr: case X86::BLSR32rm: 4885 case X86::BLSR64rr: case X86::BLSR64rm: 4886 case X86::BZHI32rr: case X86::BZHI32rm: 4887 case X86::BZHI64rr: case X86::BZHI64rm: 4888 case X86::LZCNT16rr: case X86::LZCNT16rm: 4889 case X86::LZCNT32rr: case X86::LZCNT32rm: 4890 case X86::LZCNT64rr: case X86::LZCNT64rm: 4891 case X86::POPCNT16rr:case X86::POPCNT16rm: 4892 case X86::POPCNT32rr:case X86::POPCNT32rm: 4893 case X86::POPCNT64rr:case X86::POPCNT64rm: 4894 case X86::TZCNT16rr: case X86::TZCNT16rm: 4895 case X86::TZCNT32rr: case X86::TZCNT32rm: 4896 case X86::TZCNT64rr: case X86::TZCNT64rm: 4897 return true; 4898 } 4899} 4900 4901/// Check whether the use can be converted to remove a comparison against zero. 4902static X86::CondCode isUseDefConvertible(MachineInstr *MI) { 4903 switch (MI->getOpcode()) { 4904 default: return X86::COND_INVALID; 4905 case X86::LZCNT16rr: case X86::LZCNT16rm: 4906 case X86::LZCNT32rr: case X86::LZCNT32rm: 4907 case X86::LZCNT64rr: case X86::LZCNT64rm: 4908 return X86::COND_B; 4909 case X86::POPCNT16rr:case X86::POPCNT16rm: 4910 case X86::POPCNT32rr:case X86::POPCNT32rm: 4911 case X86::POPCNT64rr:case X86::POPCNT64rm: 4912 return X86::COND_E; 4913 case X86::TZCNT16rr: case X86::TZCNT16rm: 4914 case X86::TZCNT32rr: case X86::TZCNT32rm: 4915 case X86::TZCNT64rr: case X86::TZCNT64rm: 4916 return X86::COND_B; 4917 } 4918} 4919 4920/// Check if there exists an earlier instruction that 4921/// operates on the same source operands and sets flags in the same way as 4922/// Compare; remove Compare if possible. 4923bool X86InstrInfo:: 4924optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, 4925 int CmpMask, int CmpValue, 4926 const MachineRegisterInfo *MRI) const { 4927 // Check whether we can replace SUB with CMP. 4928 unsigned NewOpcode = 0; 4929 switch (CmpInstr->getOpcode()) { 4930 default: break; 4931 case X86::SUB64ri32: 4932 case X86::SUB64ri8: 4933 case X86::SUB32ri: 4934 case X86::SUB32ri8: 4935 case X86::SUB16ri: 4936 case X86::SUB16ri8: 4937 case X86::SUB8ri: 4938 case X86::SUB64rm: 4939 case X86::SUB32rm: 4940 case X86::SUB16rm: 4941 case X86::SUB8rm: 4942 case X86::SUB64rr: 4943 case X86::SUB32rr: 4944 case X86::SUB16rr: 4945 case X86::SUB8rr: { 4946 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg())) 4947 return false; 4948 // There is no use of the destination register, we can replace SUB with CMP. 4949 switch (CmpInstr->getOpcode()) { 4950 default: llvm_unreachable("Unreachable!"); 4951 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; 4952 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; 4953 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; 4954 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; 4955 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; 4956 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; 4957 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; 4958 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; 4959 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; 4960 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; 4961 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; 4962 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; 4963 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; 4964 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; 4965 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; 4966 } 4967 CmpInstr->setDesc(get(NewOpcode)); 4968 CmpInstr->RemoveOperand(0); 4969 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. 4970 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || 4971 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) 4972 return false; 4973 } 4974 } 4975 4976 // Get the unique definition of SrcReg. 4977 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 4978 if (!MI) return false; 4979 4980 // CmpInstr is the first instruction of the BB. 4981 MachineBasicBlock::iterator I = CmpInstr, Def = MI; 4982 4983 // If we are comparing against zero, check whether we can use MI to update 4984 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. 4985 bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0); 4986 if (IsCmpZero && MI->getParent() != CmpInstr->getParent()) 4987 return false; 4988 4989 // If we have a use of the source register between the def and our compare 4990 // instruction we can eliminate the compare iff the use sets EFLAGS in the 4991 // right way. 4992 bool ShouldUpdateCC = false; 4993 X86::CondCode NewCC = X86::COND_INVALID; 4994 if (IsCmpZero && !isDefConvertible(MI)) { 4995 // Scan forward from the use until we hit the use we're looking for or the 4996 // compare instruction. 4997 for (MachineBasicBlock::iterator J = MI;; ++J) { 4998 // Do we have a convertible instruction? 4999 NewCC = isUseDefConvertible(J); 5000 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && 5001 J->getOperand(1).getReg() == SrcReg) { 5002 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!"); 5003 ShouldUpdateCC = true; // Update CC later on. 5004 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going 5005 // with the new def. 5006 MI = Def = J; 5007 break; 5008 } 5009 5010 if (J == I) 5011 return false; 5012 } 5013 } 5014 5015 // We are searching for an earlier instruction that can make CmpInstr 5016 // redundant and that instruction will be saved in Sub. 5017 MachineInstr *Sub = nullptr; 5018 const TargetRegisterInfo *TRI = &getRegisterInfo(); 5019 5020 // We iterate backward, starting from the instruction before CmpInstr and 5021 // stop when reaching the definition of a source register or done with the BB. 5022 // RI points to the instruction before CmpInstr. 5023 // If the definition is in this basic block, RE points to the definition; 5024 // otherwise, RE is the rend of the basic block. 5025 MachineBasicBlock::reverse_iterator 5026 RI = MachineBasicBlock::reverse_iterator(I), 5027 RE = CmpInstr->getParent() == MI->getParent() ? 5028 MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ : 5029 CmpInstr->getParent()->rend(); 5030 MachineInstr *Movr0Inst = nullptr; 5031 for (; RI != RE; ++RI) { 5032 MachineInstr *Instr = &*RI; 5033 // Check whether CmpInstr can be made redundant by the current instruction. 5034 if (!IsCmpZero && 5035 isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) { 5036 Sub = Instr; 5037 break; 5038 } 5039 5040 if (Instr->modifiesRegister(X86::EFLAGS, TRI) || 5041 Instr->readsRegister(X86::EFLAGS, TRI)) { 5042 // This instruction modifies or uses EFLAGS. 5043 5044 // MOV32r0 etc. are implemented with xor which clobbers condition code. 5045 // They are safe to move up, if the definition to EFLAGS is dead and 5046 // earlier instructions do not read or write EFLAGS. 5047 if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 && 5048 Instr->registerDefIsDead(X86::EFLAGS, TRI)) { 5049 Movr0Inst = Instr; 5050 continue; 5051 } 5052 5053 // We can't remove CmpInstr. 5054 return false; 5055 } 5056 } 5057 5058 // Return false if no candidates exist. 5059 if (!IsCmpZero && !Sub) 5060 return false; 5061 5062 bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 5063 Sub->getOperand(2).getReg() == SrcReg); 5064 5065 // Scan forward from the instruction after CmpInstr for uses of EFLAGS. 5066 // It is safe to remove CmpInstr if EFLAGS is redefined or killed. 5067 // If we are done with the basic block, we need to check whether EFLAGS is 5068 // live-out. 5069 bool IsSafe = false; 5070 SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate; 5071 MachineBasicBlock::iterator E = CmpInstr->getParent()->end(); 5072 for (++I; I != E; ++I) { 5073 const MachineInstr &Instr = *I; 5074 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); 5075 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); 5076 // We should check the usage if this instruction uses and updates EFLAGS. 5077 if (!UseEFLAGS && ModifyEFLAGS) { 5078 // It is safe to remove CmpInstr if EFLAGS is updated again. 5079 IsSafe = true; 5080 break; 5081 } 5082 if (!UseEFLAGS && !ModifyEFLAGS) 5083 continue; 5084 5085 // EFLAGS is used by this instruction. 5086 X86::CondCode OldCC = X86::COND_INVALID; 5087 bool OpcIsSET = false; 5088 if (IsCmpZero || IsSwapped) { 5089 // We decode the condition code from opcode. 5090 if (Instr.isBranch()) 5091 OldCC = getCondFromBranchOpc(Instr.getOpcode()); 5092 else { 5093 OldCC = getCondFromSETOpc(Instr.getOpcode()); 5094 if (OldCC != X86::COND_INVALID) 5095 OpcIsSET = true; 5096 else 5097 OldCC = X86::getCondFromCMovOpc(Instr.getOpcode()); 5098 } 5099 if (OldCC == X86::COND_INVALID) return false; 5100 } 5101 if (IsCmpZero) { 5102 switch (OldCC) { 5103 default: break; 5104 case X86::COND_A: case X86::COND_AE: 5105 case X86::COND_B: case X86::COND_BE: 5106 case X86::COND_G: case X86::COND_GE: 5107 case X86::COND_L: case X86::COND_LE: 5108 case X86::COND_O: case X86::COND_NO: 5109 // CF and OF are used, we can't perform this optimization. 5110 return false; 5111 } 5112 5113 // If we're updating the condition code check if we have to reverse the 5114 // condition. 5115 if (ShouldUpdateCC) 5116 switch (OldCC) { 5117 default: 5118 return false; 5119 case X86::COND_E: 5120 break; 5121 case X86::COND_NE: 5122 NewCC = GetOppositeBranchCondition(NewCC); 5123 break; 5124 } 5125 } else if (IsSwapped) { 5126 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs 5127 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 5128 // We swap the condition code and synthesize the new opcode. 5129 NewCC = getSwappedCondition(OldCC); 5130 if (NewCC == X86::COND_INVALID) return false; 5131 } 5132 5133 if ((ShouldUpdateCC || IsSwapped) && NewCC != OldCC) { 5134 // Synthesize the new opcode. 5135 bool HasMemoryOperand = Instr.hasOneMemOperand(); 5136 unsigned NewOpc; 5137 if (Instr.isBranch()) 5138 NewOpc = GetCondBranchFromCond(NewCC); 5139 else if(OpcIsSET) 5140 NewOpc = getSETFromCond(NewCC, HasMemoryOperand); 5141 else { 5142 unsigned DstReg = Instr.getOperand(0).getReg(); 5143 NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(), 5144 HasMemoryOperand); 5145 } 5146 5147 // Push the MachineInstr to OpsToUpdate. 5148 // If it is safe to remove CmpInstr, the condition code of these 5149 // instructions will be modified. 5150 OpsToUpdate.push_back(std::make_pair(&*I, NewOpc)); 5151 } 5152 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { 5153 // It is safe to remove CmpInstr if EFLAGS is updated again or killed. 5154 IsSafe = true; 5155 break; 5156 } 5157 } 5158 5159 // If EFLAGS is not killed nor re-defined, we should check whether it is 5160 // live-out. If it is live-out, do not optimize. 5161 if ((IsCmpZero || IsSwapped) && !IsSafe) { 5162 MachineBasicBlock *MBB = CmpInstr->getParent(); 5163 for (MachineBasicBlock *Successor : MBB->successors()) 5164 if (Successor->isLiveIn(X86::EFLAGS)) 5165 return false; 5166 } 5167 5168 // The instruction to be updated is either Sub or MI. 5169 Sub = IsCmpZero ? MI : Sub; 5170 // Move Movr0Inst to the appropriate place before Sub. 5171 if (Movr0Inst) { 5172 // Look backwards until we find a def that doesn't use the current EFLAGS. 5173 Def = Sub; 5174 MachineBasicBlock::reverse_iterator 5175 InsertI = MachineBasicBlock::reverse_iterator(++Def), 5176 InsertE = Sub->getParent()->rend(); 5177 for (; InsertI != InsertE; ++InsertI) { 5178 MachineInstr *Instr = &*InsertI; 5179 if (!Instr->readsRegister(X86::EFLAGS, TRI) && 5180 Instr->modifiesRegister(X86::EFLAGS, TRI)) { 5181 Sub->getParent()->remove(Movr0Inst); 5182 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), 5183 Movr0Inst); 5184 break; 5185 } 5186 } 5187 if (InsertI == InsertE) 5188 return false; 5189 } 5190 5191 // Make sure Sub instruction defines EFLAGS and mark the def live. 5192 unsigned i = 0, e = Sub->getNumOperands(); 5193 for (; i != e; ++i) { 5194 MachineOperand &MO = Sub->getOperand(i); 5195 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { 5196 MO.setIsDead(false); 5197 break; 5198 } 5199 } 5200 assert(i != e && "Unable to locate a def EFLAGS operand"); 5201 5202 CmpInstr->eraseFromParent(); 5203 5204 // Modify the condition code of instructions in OpsToUpdate. 5205 for (auto &Op : OpsToUpdate) 5206 Op.first->setDesc(get(Op.second)); 5207 return true; 5208} 5209 5210/// Try to remove the load by folding it to a register 5211/// operand at the use. We fold the load instructions if load defines a virtual 5212/// register, the virtual register is used once in the same BB, and the 5213/// instructions in-between do not load or store, and have no side effects. 5214MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI, 5215 const MachineRegisterInfo *MRI, 5216 unsigned &FoldAsLoadDefReg, 5217 MachineInstr *&DefMI) const { 5218 if (FoldAsLoadDefReg == 0) 5219 return nullptr; 5220 // To be conservative, if there exists another load, clear the load candidate. 5221 if (MI->mayLoad()) { 5222 FoldAsLoadDefReg = 0; 5223 return nullptr; 5224 } 5225 5226 // Check whether we can move DefMI here. 5227 DefMI = MRI->getVRegDef(FoldAsLoadDefReg); 5228 assert(DefMI); 5229 bool SawStore = false; 5230 if (!DefMI->isSafeToMove(nullptr, SawStore)) 5231 return nullptr; 5232 5233 // Collect information about virtual register operands of MI. 5234 unsigned SrcOperandId = 0; 5235 bool FoundSrcOperand = false; 5236 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 5237 MachineOperand &MO = MI->getOperand(i); 5238 if (!MO.isReg()) 5239 continue; 5240 unsigned Reg = MO.getReg(); 5241 if (Reg != FoldAsLoadDefReg) 5242 continue; 5243 // Do not fold if we have a subreg use or a def or multiple uses. 5244 if (MO.getSubReg() || MO.isDef() || FoundSrcOperand) 5245 return nullptr; 5246 5247 SrcOperandId = i; 5248 FoundSrcOperand = true; 5249 } 5250 if (!FoundSrcOperand) 5251 return nullptr; 5252 5253 // Check whether we can fold the def into SrcOperandId. 5254 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI)) { 5255 FoldAsLoadDefReg = 0; 5256 return FoldMI; 5257 } 5258 5259 return nullptr; 5260} 5261 5262/// Expand a single-def pseudo instruction to a two-addr 5263/// instruction with two undef reads of the register being defined. 5264/// This is used for mapping: 5265/// %xmm4 = V_SET0 5266/// to: 5267/// %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef> 5268/// 5269static bool Expand2AddrUndef(MachineInstrBuilder &MIB, 5270 const MCInstrDesc &Desc) { 5271 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 5272 unsigned Reg = MIB->getOperand(0).getReg(); 5273 MIB->setDesc(Desc); 5274 5275 // MachineInstr::addOperand() will insert explicit operands before any 5276 // implicit operands. 5277 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 5278 // But we don't trust that. 5279 assert(MIB->getOperand(1).getReg() == Reg && 5280 MIB->getOperand(2).getReg() == Reg && "Misplaced operand"); 5281 return true; 5282} 5283 5284/// Expand a single-def pseudo instruction to a two-addr 5285/// instruction with two %k0 reads. 5286/// This is used for mapping: 5287/// %k4 = K_SET1 5288/// to: 5289/// %k4 = KXNORrr %k0, %k0 5290static bool Expand2AddrKreg(MachineInstrBuilder &MIB, 5291 const MCInstrDesc &Desc, unsigned Reg) { 5292 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 5293 MIB->setDesc(Desc); 5294 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 5295 return true; 5296} 5297 5298static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, 5299 bool MinusOne) { 5300 MachineBasicBlock &MBB = *MIB->getParent(); 5301 DebugLoc DL = MIB->getDebugLoc(); 5302 unsigned Reg = MIB->getOperand(0).getReg(); 5303 5304 // Insert the XOR. 5305 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) 5306 .addReg(Reg, RegState::Undef) 5307 .addReg(Reg, RegState::Undef); 5308 5309 // Turn the pseudo into an INC or DEC. 5310 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); 5311 MIB.addReg(Reg); 5312 5313 return true; 5314} 5315 5316// LoadStackGuard has so far only been implemented for 64-bit MachO. Different 5317// code sequence is needed for other targets. 5318static void expandLoadStackGuard(MachineInstrBuilder &MIB, 5319 const TargetInstrInfo &TII) { 5320 MachineBasicBlock &MBB = *MIB->getParent(); 5321 DebugLoc DL = MIB->getDebugLoc(); 5322 unsigned Reg = MIB->getOperand(0).getReg(); 5323 const GlobalValue *GV = 5324 cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); 5325 unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant; 5326 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 5327 MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 8, 8); 5328 MachineBasicBlock::iterator I = MIB.getInstr(); 5329 5330 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) 5331 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) 5332 .addMemOperand(MMO); 5333 MIB->setDebugLoc(DL); 5334 MIB->setDesc(TII.get(X86::MOV64rm)); 5335 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); 5336} 5337 5338bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 5339 bool HasAVX = Subtarget.hasAVX(); 5340 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI); 5341 switch (MI->getOpcode()) { 5342 case X86::MOV32r0: 5343 return Expand2AddrUndef(MIB, get(X86::XOR32rr)); 5344 case X86::MOV32r1: 5345 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); 5346 case X86::MOV32r_1: 5347 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); 5348 case X86::SETB_C8r: 5349 return Expand2AddrUndef(MIB, get(X86::SBB8rr)); 5350 case X86::SETB_C16r: 5351 return Expand2AddrUndef(MIB, get(X86::SBB16rr)); 5352 case X86::SETB_C32r: 5353 return Expand2AddrUndef(MIB, get(X86::SBB32rr)); 5354 case X86::SETB_C64r: 5355 return Expand2AddrUndef(MIB, get(X86::SBB64rr)); 5356 case X86::V_SET0: 5357 case X86::FsFLD0SS: 5358 case X86::FsFLD0SD: 5359 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); 5360 case X86::AVX_SET0: 5361 assert(HasAVX && "AVX not supported"); 5362 return Expand2AddrUndef(MIB, get(X86::VXORPSYrr)); 5363 case X86::AVX512_512_SET0: 5364 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 5365 case X86::V_SETALLONES: 5366 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); 5367 case X86::AVX2_SETALLONES: 5368 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); 5369 case X86::TEST8ri_NOREX: 5370 MI->setDesc(get(X86::TEST8ri)); 5371 return true; 5372 case X86::MOV32ri64: 5373 MI->setDesc(get(X86::MOV32ri)); 5374 return true; 5375 5376 // KNL does not recognize dependency-breaking idioms for mask registers, 5377 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. 5378 // Using %k0 as the undef input register is a performance heuristic based 5379 // on the assumption that %k0 is used less frequently than the other mask 5380 // registers, since it is not usable as a write mask. 5381 // FIXME: A more advanced approach would be to choose the best input mask 5382 // register based on context. 5383 case X86::KSET0B: 5384 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); 5385 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); 5386 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); 5387 case X86::KSET1B: 5388 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); 5389 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); 5390 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); 5391 case TargetOpcode::LOAD_STACK_GUARD: 5392 expandLoadStackGuard(MIB, *this); 5393 return true; 5394 } 5395 return false; 5396} 5397 5398static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, 5399 int PtrOffset = 0) { 5400 unsigned NumAddrOps = MOs.size(); 5401 5402 if (NumAddrOps < 4) { 5403 // FrameIndex only - add an immediate offset (whether its zero or not). 5404 for (unsigned i = 0; i != NumAddrOps; ++i) 5405 MIB.addOperand(MOs[i]); 5406 addOffset(MIB, PtrOffset); 5407 } else { 5408 // General Memory Addressing - we need to add any offset to an existing 5409 // offset. 5410 assert(MOs.size() == 5 && "Unexpected memory operand list length"); 5411 for (unsigned i = 0; i != NumAddrOps; ++i) { 5412 const MachineOperand &MO = MOs[i]; 5413 if (i == 3 && PtrOffset != 0) { 5414 MIB.addDisp(MO, PtrOffset); 5415 } else { 5416 MIB.addOperand(MO); 5417 } 5418 } 5419 } 5420} 5421 5422static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 5423 ArrayRef<MachineOperand> MOs, 5424 MachineBasicBlock::iterator InsertPt, 5425 MachineInstr *MI, 5426 const TargetInstrInfo &TII) { 5427 // Create the base instruction with the memory operand as the first part. 5428 // Omit the implicit operands, something BuildMI can't do. 5429 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 5430 MI->getDebugLoc(), true); 5431 MachineInstrBuilder MIB(MF, NewMI); 5432 addOperands(MIB, MOs); 5433 5434 // Loop over the rest of the ri operands, converting them over. 5435 unsigned NumOps = MI->getDesc().getNumOperands()-2; 5436 for (unsigned i = 0; i != NumOps; ++i) { 5437 MachineOperand &MO = MI->getOperand(i+2); 5438 MIB.addOperand(MO); 5439 } 5440 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 5441 MachineOperand &MO = MI->getOperand(i); 5442 MIB.addOperand(MO); 5443 } 5444 5445 MachineBasicBlock *MBB = InsertPt->getParent(); 5446 MBB->insert(InsertPt, NewMI); 5447 5448 return MIB; 5449} 5450 5451static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, 5452 unsigned OpNo, ArrayRef<MachineOperand> MOs, 5453 MachineBasicBlock::iterator InsertPt, 5454 MachineInstr *MI, const TargetInstrInfo &TII, 5455 int PtrOffset = 0) { 5456 // Omit the implicit operands, something BuildMI can't do. 5457 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 5458 MI->getDebugLoc(), true); 5459 MachineInstrBuilder MIB(MF, NewMI); 5460 5461 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 5462 MachineOperand &MO = MI->getOperand(i); 5463 if (i == OpNo) { 5464 assert(MO.isReg() && "Expected to fold into reg operand!"); 5465 addOperands(MIB, MOs, PtrOffset); 5466 } else { 5467 MIB.addOperand(MO); 5468 } 5469 } 5470 5471 MachineBasicBlock *MBB = InsertPt->getParent(); 5472 MBB->insert(InsertPt, NewMI); 5473 5474 return MIB; 5475} 5476 5477static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 5478 ArrayRef<MachineOperand> MOs, 5479 MachineBasicBlock::iterator InsertPt, 5480 MachineInstr *MI) { 5481 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, 5482 MI->getDebugLoc(), TII.get(Opcode)); 5483 addOperands(MIB, MOs); 5484 return MIB.addImm(0); 5485} 5486 5487MachineInstr *X86InstrInfo::foldMemoryOperandCustom( 5488 MachineFunction &MF, MachineInstr *MI, unsigned OpNum, 5489 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 5490 unsigned Size, unsigned Align) const { 5491 switch (MI->getOpcode()) { 5492 case X86::INSERTPSrr: 5493 case X86::VINSERTPSrr: 5494 // Attempt to convert the load of inserted vector into a fold load 5495 // of a single float. 5496 if (OpNum == 2) { 5497 unsigned Imm = MI->getOperand(MI->getNumOperands() - 1).getImm(); 5498 unsigned ZMask = Imm & 15; 5499 unsigned DstIdx = (Imm >> 4) & 3; 5500 unsigned SrcIdx = (Imm >> 6) & 3; 5501 5502 unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize(); 5503 if (Size <= RCSize && 4 <= Align) { 5504 int PtrOffset = SrcIdx * 4; 5505 unsigned NewImm = (DstIdx << 4) | ZMask; 5506 unsigned NewOpCode = 5507 (MI->getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm 5508 : X86::INSERTPSrm); 5509 MachineInstr *NewMI = 5510 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); 5511 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); 5512 return NewMI; 5513 } 5514 } 5515 break; 5516 }; 5517 5518 return nullptr; 5519} 5520 5521MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 5522 MachineFunction &MF, MachineInstr *MI, unsigned OpNum, 5523 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, 5524 unsigned Size, unsigned Align, bool AllowCommute) const { 5525 const DenseMap<unsigned, 5526 std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr; 5527 bool isCallRegIndirect = Subtarget.callRegIndirect(); 5528 bool isTwoAddrFold = false; 5529 5530 // For CPUs that favor the register form of a call or push, 5531 // do not fold loads into calls or pushes, unless optimizing for size 5532 // aggressively. 5533 if (isCallRegIndirect && !MF.getFunction()->optForMinSize() && 5534 (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r || 5535 MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r || 5536 MI->getOpcode() == X86::PUSH64r)) 5537 return nullptr; 5538 5539 unsigned NumOps = MI->getDesc().getNumOperands(); 5540 bool isTwoAddr = NumOps > 1 && 5541 MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 5542 5543 // FIXME: AsmPrinter doesn't know how to handle 5544 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 5545 if (MI->getOpcode() == X86::ADD32ri && 5546 MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 5547 return nullptr; 5548 5549 MachineInstr *NewMI = nullptr; 5550 5551 // Attempt to fold any custom cases we have. 5552 if (MachineInstr *CustomMI = 5553 foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align)) 5554 return CustomMI; 5555 5556 // Folding a memory location into the two-address part of a two-address 5557 // instruction is different than folding it other places. It requires 5558 // replacing the *two* registers with the memory location. 5559 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && 5560 MI->getOperand(0).isReg() && 5561 MI->getOperand(1).isReg() && 5562 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 5563 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 5564 isTwoAddrFold = true; 5565 } else if (OpNum == 0) { 5566 if (MI->getOpcode() == X86::MOV32r0) { 5567 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); 5568 if (NewMI) 5569 return NewMI; 5570 } 5571 5572 OpcodeTablePtr = &RegOp2MemOpTable0; 5573 } else if (OpNum == 1) { 5574 OpcodeTablePtr = &RegOp2MemOpTable1; 5575 } else if (OpNum == 2) { 5576 OpcodeTablePtr = &RegOp2MemOpTable2; 5577 } else if (OpNum == 3) { 5578 OpcodeTablePtr = &RegOp2MemOpTable3; 5579 } else if (OpNum == 4) { 5580 OpcodeTablePtr = &RegOp2MemOpTable4; 5581 } 5582 5583 // If table selected... 5584 if (OpcodeTablePtr) { 5585 // Find the Opcode to fuse 5586 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 5587 OpcodeTablePtr->find(MI->getOpcode()); 5588 if (I != OpcodeTablePtr->end()) { 5589 unsigned Opcode = I->second.first; 5590 unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; 5591 if (Align < MinAlign) 5592 return nullptr; 5593 bool NarrowToMOV32rm = false; 5594 if (Size) { 5595 unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize(); 5596 if (Size < RCSize) { 5597 // Check if it's safe to fold the load. If the size of the object is 5598 // narrower than the load width, then it's not. 5599 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) 5600 return nullptr; 5601 // If this is a 64-bit load, but the spill slot is 32, then we can do 5602 // a 32-bit load which is implicitly zero-extended. This likely is 5603 // due to live interval analysis remat'ing a load from stack slot. 5604 if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg()) 5605 return nullptr; 5606 Opcode = X86::MOV32rm; 5607 NarrowToMOV32rm = true; 5608 } 5609 } 5610 5611 if (isTwoAddrFold) 5612 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); 5613 else 5614 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); 5615 5616 if (NarrowToMOV32rm) { 5617 // If this is the special case where we use a MOV32rm to load a 32-bit 5618 // value and zero-extend the top bits. Change the destination register 5619 // to a 32-bit one. 5620 unsigned DstReg = NewMI->getOperand(0).getReg(); 5621 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 5622 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); 5623 else 5624 NewMI->getOperand(0).setSubReg(X86::sub_32bit); 5625 } 5626 return NewMI; 5627 } 5628 } 5629 5630 // If the instruction and target operand are commutable, commute the 5631 // instruction and try again. 5632 if (AllowCommute) { 5633 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; 5634 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { 5635 bool HasDef = MI->getDesc().getNumDefs(); 5636 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; 5637 unsigned Reg1 = MI->getOperand(CommuteOpIdx1).getReg(); 5638 unsigned Reg2 = MI->getOperand(CommuteOpIdx2).getReg(); 5639 bool Tied1 = 5640 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); 5641 bool Tied2 = 5642 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); 5643 5644 // If either of the commutable operands are tied to the destination 5645 // then we can not commute + fold. 5646 if ((HasDef && Reg0 == Reg1 && Tied1) || 5647 (HasDef && Reg0 == Reg2 && Tied2)) 5648 return nullptr; 5649 5650 MachineInstr *CommutedMI = 5651 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 5652 if (!CommutedMI) { 5653 // Unable to commute. 5654 return nullptr; 5655 } 5656 if (CommutedMI != MI) { 5657 // New instruction. We can't fold from this. 5658 CommutedMI->eraseFromParent(); 5659 return nullptr; 5660 } 5661 5662 // Attempt to fold with the commuted version of the instruction. 5663 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, 5664 Size, Align, /*AllowCommute=*/false); 5665 if (NewMI) 5666 return NewMI; 5667 5668 // Folding failed again - undo the commute before returning. 5669 MachineInstr *UncommutedMI = 5670 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); 5671 if (!UncommutedMI) { 5672 // Unable to commute. 5673 return nullptr; 5674 } 5675 if (UncommutedMI != MI) { 5676 // New instruction. It doesn't need to be kept. 5677 UncommutedMI->eraseFromParent(); 5678 return nullptr; 5679 } 5680 5681 // Return here to prevent duplicate fuse failure report. 5682 return nullptr; 5683 } 5684 } 5685 5686 // No fusion 5687 if (PrintFailedFusing && !MI->isCopy()) 5688 dbgs() << "We failed to fuse operand " << OpNum << " in " << *MI; 5689 return nullptr; 5690} 5691 5692/// Return true for all instructions that only update 5693/// the first 32 or 64-bits of the destination register and leave the rest 5694/// unmodified. This can be used to avoid folding loads if the instructions 5695/// only update part of the destination register, and the non-updated part is 5696/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these 5697/// instructions breaks the partial register dependency and it can improve 5698/// performance. e.g.: 5699/// 5700/// movss (%rdi), %xmm0 5701/// cvtss2sd %xmm0, %xmm0 5702/// 5703/// Instead of 5704/// cvtss2sd (%rdi), %xmm0 5705/// 5706/// FIXME: This should be turned into a TSFlags. 5707/// 5708static bool hasPartialRegUpdate(unsigned Opcode) { 5709 switch (Opcode) { 5710 case X86::CVTSI2SSrr: 5711 case X86::CVTSI2SSrm: 5712 case X86::CVTSI2SS64rr: 5713 case X86::CVTSI2SS64rm: 5714 case X86::CVTSI2SDrr: 5715 case X86::CVTSI2SDrm: 5716 case X86::CVTSI2SD64rr: 5717 case X86::CVTSI2SD64rm: 5718 case X86::CVTSD2SSrr: 5719 case X86::CVTSD2SSrm: 5720 case X86::Int_CVTSD2SSrr: 5721 case X86::Int_CVTSD2SSrm: 5722 case X86::CVTSS2SDrr: 5723 case X86::CVTSS2SDrm: 5724 case X86::Int_CVTSS2SDrr: 5725 case X86::Int_CVTSS2SDrm: 5726 case X86::RCPSSr: 5727 case X86::RCPSSm: 5728 case X86::RCPSSr_Int: 5729 case X86::RCPSSm_Int: 5730 case X86::ROUNDSDr: 5731 case X86::ROUNDSDm: 5732 case X86::ROUNDSDr_Int: 5733 case X86::ROUNDSSr: 5734 case X86::ROUNDSSm: 5735 case X86::ROUNDSSr_Int: 5736 case X86::RSQRTSSr: 5737 case X86::RSQRTSSm: 5738 case X86::RSQRTSSr_Int: 5739 case X86::RSQRTSSm_Int: 5740 case X86::SQRTSSr: 5741 case X86::SQRTSSm: 5742 case X86::SQRTSSr_Int: 5743 case X86::SQRTSSm_Int: 5744 case X86::SQRTSDr: 5745 case X86::SQRTSDm: 5746 case X86::SQRTSDr_Int: 5747 case X86::SQRTSDm_Int: 5748 return true; 5749 } 5750 5751 return false; 5752} 5753 5754/// Inform the ExeDepsFix pass how many idle 5755/// instructions we would like before a partial register update. 5756unsigned X86InstrInfo:: 5757getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, 5758 const TargetRegisterInfo *TRI) const { 5759 if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode())) 5760 return 0; 5761 5762 // If MI is marked as reading Reg, the partial register update is wanted. 5763 const MachineOperand &MO = MI->getOperand(0); 5764 unsigned Reg = MO.getReg(); 5765 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 5766 if (MO.readsReg() || MI->readsVirtualRegister(Reg)) 5767 return 0; 5768 } else { 5769 if (MI->readsRegister(Reg, TRI)) 5770 return 0; 5771 } 5772 5773 // If any of the preceding 16 instructions are reading Reg, insert a 5774 // dependency breaking instruction. The magic number is based on a few 5775 // Nehalem experiments. 5776 return 16; 5777} 5778 5779// Return true for any instruction the copies the high bits of the first source 5780// operand into the unused high bits of the destination operand. 5781static bool hasUndefRegUpdate(unsigned Opcode) { 5782 switch (Opcode) { 5783 case X86::VCVTSI2SSrr: 5784 case X86::VCVTSI2SSrm: 5785 case X86::Int_VCVTSI2SSrr: 5786 case X86::Int_VCVTSI2SSrm: 5787 case X86::VCVTSI2SS64rr: 5788 case X86::VCVTSI2SS64rm: 5789 case X86::Int_VCVTSI2SS64rr: 5790 case X86::Int_VCVTSI2SS64rm: 5791 case X86::VCVTSI2SDrr: 5792 case X86::VCVTSI2SDrm: 5793 case X86::Int_VCVTSI2SDrr: 5794 case X86::Int_VCVTSI2SDrm: 5795 case X86::VCVTSI2SD64rr: 5796 case X86::VCVTSI2SD64rm: 5797 case X86::Int_VCVTSI2SD64rr: 5798 case X86::Int_VCVTSI2SD64rm: 5799 case X86::VCVTSD2SSrr: 5800 case X86::VCVTSD2SSrm: 5801 case X86::Int_VCVTSD2SSrr: 5802 case X86::Int_VCVTSD2SSrm: 5803 case X86::VCVTSS2SDrr: 5804 case X86::VCVTSS2SDrm: 5805 case X86::Int_VCVTSS2SDrr: 5806 case X86::Int_VCVTSS2SDrm: 5807 case X86::VRCPSSr: 5808 case X86::VRCPSSm: 5809 case X86::VRCPSSm_Int: 5810 case X86::VROUNDSDr: 5811 case X86::VROUNDSDm: 5812 case X86::VROUNDSDr_Int: 5813 case X86::VROUNDSSr: 5814 case X86::VROUNDSSm: 5815 case X86::VROUNDSSr_Int: 5816 case X86::VRSQRTSSr: 5817 case X86::VRSQRTSSm: 5818 case X86::VRSQRTSSm_Int: 5819 case X86::VSQRTSSr: 5820 case X86::VSQRTSSm: 5821 case X86::VSQRTSSm_Int: 5822 case X86::VSQRTSDr: 5823 case X86::VSQRTSDm: 5824 case X86::VSQRTSDm_Int: 5825 // AVX-512 5826 case X86::VCVTSD2SSZrr: 5827 case X86::VCVTSD2SSZrm: 5828 case X86::VCVTSS2SDZrr: 5829 case X86::VCVTSS2SDZrm: 5830 return true; 5831 } 5832 5833 return false; 5834} 5835 5836/// Inform the ExeDepsFix pass how many idle instructions we would like before 5837/// certain undef register reads. 5838/// 5839/// This catches the VCVTSI2SD family of instructions: 5840/// 5841/// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14 5842/// 5843/// We should to be careful *not* to catch VXOR idioms which are presumably 5844/// handled specially in the pipeline: 5845/// 5846/// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1 5847/// 5848/// Like getPartialRegUpdateClearance, this makes a strong assumption that the 5849/// high bits that are passed-through are not live. 5850unsigned X86InstrInfo:: 5851getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum, 5852 const TargetRegisterInfo *TRI) const { 5853 if (!hasUndefRegUpdate(MI->getOpcode())) 5854 return 0; 5855 5856 // Set the OpNum parameter to the first source operand. 5857 OpNum = 1; 5858 5859 const MachineOperand &MO = MI->getOperand(OpNum); 5860 if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 5861 // Use the same magic number as getPartialRegUpdateClearance. 5862 return 16; 5863 } 5864 return 0; 5865} 5866 5867void X86InstrInfo:: 5868breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, 5869 const TargetRegisterInfo *TRI) const { 5870 unsigned Reg = MI->getOperand(OpNum).getReg(); 5871 // If MI kills this register, the false dependence is already broken. 5872 if (MI->killsRegister(Reg, TRI)) 5873 return; 5874 5875 if (X86::VR128RegClass.contains(Reg)) { 5876 // These instructions are all floating point domain, so xorps is the best 5877 // choice. 5878 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; 5879 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg) 5880 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 5881 MI->addRegisterKilled(Reg, TRI, true); 5882 } else if (X86::VR256RegClass.contains(Reg)) { 5883 // Use vxorps to clear the full ymm register. 5884 // It wants to read and write the xmm sub-register. 5885 unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); 5886 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg) 5887 .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef) 5888 .addReg(Reg, RegState::ImplicitDefine); 5889 MI->addRegisterKilled(Reg, TRI, true); 5890 } 5891} 5892 5893MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 5894 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, 5895 MachineBasicBlock::iterator InsertPt, int FrameIndex) const { 5896 // Check switch flag 5897 if (NoFusing) 5898 return nullptr; 5899 5900 // Unless optimizing for size, don't fold to avoid partial 5901 // register update stalls 5902 if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode())) 5903 return nullptr; 5904 5905 const MachineFrameInfo *MFI = MF.getFrameInfo(); 5906 unsigned Size = MFI->getObjectSize(FrameIndex); 5907 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 5908 // If the function stack isn't realigned we don't want to fold instructions 5909 // that need increased alignment. 5910 if (!RI.needsStackRealignment(MF)) 5911 Alignment = 5912 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); 5913 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5914 unsigned NewOpc = 0; 5915 unsigned RCSize = 0; 5916 switch (MI->getOpcode()) { 5917 default: return nullptr; 5918 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; 5919 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; 5920 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; 5921 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; 5922 } 5923 // Check if it's safe to fold the load. If the size of the object is 5924 // narrower than the load width, then it's not. 5925 if (Size < RCSize) 5926 return nullptr; 5927 // Change to CMPXXri r, 0 first. 5928 MI->setDesc(get(NewOpc)); 5929 MI->getOperand(1).ChangeToImmediate(0); 5930 } else if (Ops.size() != 1) 5931 return nullptr; 5932 5933 return foldMemoryOperandImpl(MF, MI, Ops[0], 5934 MachineOperand::CreateFI(FrameIndex), InsertPt, 5935 Size, Alignment, /*AllowCommute=*/true); 5936} 5937 5938/// Check if \p LoadMI is a partial register load that we can't fold into \p MI 5939/// because the latter uses contents that wouldn't be defined in the folded 5940/// version. For instance, this transformation isn't legal: 5941/// movss (%rdi), %xmm0 5942/// addps %xmm0, %xmm0 5943/// -> 5944/// addps (%rdi), %xmm0 5945/// 5946/// But this one is: 5947/// movss (%rdi), %xmm0 5948/// addss %xmm0, %xmm0 5949/// -> 5950/// addss (%rdi), %xmm0 5951/// 5952static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, 5953 const MachineInstr &UserMI, 5954 const MachineFunction &MF) { 5955 unsigned Opc = LoadMI.getOpcode(); 5956 unsigned UserOpc = UserMI.getOpcode(); 5957 unsigned RegSize = 5958 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize(); 5959 5960 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm) && RegSize > 4) { 5961 // These instructions only load 32 bits, we can't fold them if the 5962 // destination register is wider than 32 bits (4 bytes), and its user 5963 // instruction isn't scalar (SS). 5964 switch (UserOpc) { 5965 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: 5966 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: 5967 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: 5968 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: 5969 case X86::VFMADDSSr132r_Int: case X86::VFNMADDSSr132r_Int: 5970 case X86::VFMADDSSr213r_Int: case X86::VFNMADDSSr213r_Int: 5971 case X86::VFMADDSSr231r_Int: case X86::VFNMADDSSr231r_Int: 5972 case X86::VFMSUBSSr132r_Int: case X86::VFNMSUBSSr132r_Int: 5973 case X86::VFMSUBSSr213r_Int: case X86::VFNMSUBSSr213r_Int: 5974 case X86::VFMSUBSSr231r_Int: case X86::VFNMSUBSSr231r_Int: 5975 return false; 5976 default: 5977 return true; 5978 } 5979 } 5980 5981 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm) && RegSize > 8) { 5982 // These instructions only load 64 bits, we can't fold them if the 5983 // destination register is wider than 64 bits (8 bytes), and its user 5984 // instruction isn't scalar (SD). 5985 switch (UserOpc) { 5986 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: 5987 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: 5988 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: 5989 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: 5990 case X86::VFMADDSDr132r_Int: case X86::VFNMADDSDr132r_Int: 5991 case X86::VFMADDSDr213r_Int: case X86::VFNMADDSDr213r_Int: 5992 case X86::VFMADDSDr231r_Int: case X86::VFNMADDSDr231r_Int: 5993 case X86::VFMSUBSDr132r_Int: case X86::VFNMSUBSDr132r_Int: 5994 case X86::VFMSUBSDr213r_Int: case X86::VFNMSUBSDr213r_Int: 5995 case X86::VFMSUBSDr231r_Int: case X86::VFNMSUBSDr231r_Int: 5996 return false; 5997 default: 5998 return true; 5999 } 6000 } 6001 6002 return false; 6003} 6004 6005MachineInstr *X86InstrInfo::foldMemoryOperandImpl( 6006 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, 6007 MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { 6008 // If loading from a FrameIndex, fold directly from the FrameIndex. 6009 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 6010 int FrameIndex; 6011 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { 6012 if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF)) 6013 return nullptr; 6014 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex); 6015 } 6016 6017 // Check switch flag 6018 if (NoFusing) return nullptr; 6019 6020 // Avoid partial register update stalls unless optimizing for size. 6021 if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode())) 6022 return nullptr; 6023 6024 // Determine the alignment of the load. 6025 unsigned Alignment = 0; 6026 if (LoadMI->hasOneMemOperand()) 6027 Alignment = (*LoadMI->memoperands_begin())->getAlignment(); 6028 else 6029 switch (LoadMI->getOpcode()) { 6030 case X86::AVX2_SETALLONES: 6031 case X86::AVX_SET0: 6032 Alignment = 32; 6033 break; 6034 case X86::V_SET0: 6035 case X86::V_SETALLONES: 6036 Alignment = 16; 6037 break; 6038 case X86::FsFLD0SD: 6039 Alignment = 8; 6040 break; 6041 case X86::FsFLD0SS: 6042 Alignment = 4; 6043 break; 6044 default: 6045 return nullptr; 6046 } 6047 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 6048 unsigned NewOpc = 0; 6049 switch (MI->getOpcode()) { 6050 default: return nullptr; 6051 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 6052 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; 6053 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; 6054 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; 6055 } 6056 // Change to CMPXXri r, 0 first. 6057 MI->setDesc(get(NewOpc)); 6058 MI->getOperand(1).ChangeToImmediate(0); 6059 } else if (Ops.size() != 1) 6060 return nullptr; 6061 6062 // Make sure the subregisters match. 6063 // Otherwise we risk changing the size of the load. 6064 if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg()) 6065 return nullptr; 6066 6067 SmallVector<MachineOperand,X86::AddrNumOperands> MOs; 6068 switch (LoadMI->getOpcode()) { 6069 case X86::V_SET0: 6070 case X86::V_SETALLONES: 6071 case X86::AVX2_SETALLONES: 6072 case X86::AVX_SET0: 6073 case X86::FsFLD0SD: 6074 case X86::FsFLD0SS: { 6075 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. 6076 // Create a constant-pool entry and operands to load from it. 6077 6078 // Medium and large mode can't fold loads this way. 6079 if (MF.getTarget().getCodeModel() != CodeModel::Small && 6080 MF.getTarget().getCodeModel() != CodeModel::Kernel) 6081 return nullptr; 6082 6083 // x86-32 PIC requires a PIC base register for constant pools. 6084 unsigned PICBase = 0; 6085 if (MF.getTarget().getRelocationModel() == Reloc::PIC_) { 6086 if (Subtarget.is64Bit()) 6087 PICBase = X86::RIP; 6088 else 6089 // FIXME: PICBase = getGlobalBaseReg(&MF); 6090 // This doesn't work for several reasons. 6091 // 1. GlobalBaseReg may have been spilled. 6092 // 2. It may not be live at MI. 6093 return nullptr; 6094 } 6095 6096 // Create a constant-pool entry. 6097 MachineConstantPool &MCP = *MF.getConstantPool(); 6098 Type *Ty; 6099 unsigned Opc = LoadMI->getOpcode(); 6100 if (Opc == X86::FsFLD0SS) 6101 Ty = Type::getFloatTy(MF.getFunction()->getContext()); 6102 else if (Opc == X86::FsFLD0SD) 6103 Ty = Type::getDoubleTy(MF.getFunction()->getContext()); 6104 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0) 6105 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8); 6106 else 6107 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); 6108 6109 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES); 6110 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : 6111 Constant::getNullValue(Ty); 6112 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); 6113 6114 // Create operands to load from the constant pool entry. 6115 MOs.push_back(MachineOperand::CreateReg(PICBase, false)); 6116 MOs.push_back(MachineOperand::CreateImm(1)); 6117 MOs.push_back(MachineOperand::CreateReg(0, false)); 6118 MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); 6119 MOs.push_back(MachineOperand::CreateReg(0, false)); 6120 break; 6121 } 6122 default: { 6123 if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF)) 6124 return nullptr; 6125 6126 // Folding a normal load. Just copy the load's address operands. 6127 MOs.append(LoadMI->operands_begin() + NumOps - X86::AddrNumOperands, 6128 LoadMI->operands_begin() + NumOps); 6129 break; 6130 } 6131 } 6132 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, 6133 /*Size=*/0, Alignment, /*AllowCommute=*/true); 6134} 6135 6136bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 6137 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 6138 SmallVectorImpl<MachineInstr*> &NewMIs) const { 6139 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 6140 MemOp2RegOpTable.find(MI->getOpcode()); 6141 if (I == MemOp2RegOpTable.end()) 6142 return false; 6143 unsigned Opc = I->second.first; 6144 unsigned Index = I->second.second & TB_INDEX_MASK; 6145 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 6146 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 6147 if (UnfoldLoad && !FoldedLoad) 6148 return false; 6149 UnfoldLoad &= FoldedLoad; 6150 if (UnfoldStore && !FoldedStore) 6151 return false; 6152 UnfoldStore &= FoldedStore; 6153 6154 const MCInstrDesc &MCID = get(Opc); 6155 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 6156 // TODO: Check if 32-byte or greater accesses are slow too? 6157 if (!MI->hasOneMemOperand() && 6158 RC == &X86::VR128RegClass && 6159 Subtarget.isUnalignedMem16Slow()) 6160 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will 6161 // conservatively assume the address is unaligned. That's bad for 6162 // performance. 6163 return false; 6164 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; 6165 SmallVector<MachineOperand,2> BeforeOps; 6166 SmallVector<MachineOperand,2> AfterOps; 6167 SmallVector<MachineOperand,4> ImpOps; 6168 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 6169 MachineOperand &Op = MI->getOperand(i); 6170 if (i >= Index && i < Index + X86::AddrNumOperands) 6171 AddrOps.push_back(Op); 6172 else if (Op.isReg() && Op.isImplicit()) 6173 ImpOps.push_back(Op); 6174 else if (i < Index) 6175 BeforeOps.push_back(Op); 6176 else if (i > Index) 6177 AfterOps.push_back(Op); 6178 } 6179 6180 // Emit the load instruction. 6181 if (UnfoldLoad) { 6182 std::pair<MachineInstr::mmo_iterator, 6183 MachineInstr::mmo_iterator> MMOs = 6184 MF.extractLoadMemRefs(MI->memoperands_begin(), 6185 MI->memoperands_end()); 6186 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs); 6187 if (UnfoldStore) { 6188 // Address operands cannot be marked isKill. 6189 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { 6190 MachineOperand &MO = NewMIs[0]->getOperand(i); 6191 if (MO.isReg()) 6192 MO.setIsKill(false); 6193 } 6194 } 6195 } 6196 6197 // Emit the data processing instruction. 6198 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true); 6199 MachineInstrBuilder MIB(MF, DataMI); 6200 6201 if (FoldedStore) 6202 MIB.addReg(Reg, RegState::Define); 6203 for (MachineOperand &BeforeOp : BeforeOps) 6204 MIB.addOperand(BeforeOp); 6205 if (FoldedLoad) 6206 MIB.addReg(Reg); 6207 for (MachineOperand &AfterOp : AfterOps) 6208 MIB.addOperand(AfterOp); 6209 for (MachineOperand &ImpOp : ImpOps) { 6210 MIB.addReg(ImpOp.getReg(), 6211 getDefRegState(ImpOp.isDef()) | 6212 RegState::Implicit | 6213 getKillRegState(ImpOp.isKill()) | 6214 getDeadRegState(ImpOp.isDead()) | 6215 getUndefRegState(ImpOp.isUndef())); 6216 } 6217 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 6218 switch (DataMI->getOpcode()) { 6219 default: break; 6220 case X86::CMP64ri32: 6221 case X86::CMP64ri8: 6222 case X86::CMP32ri: 6223 case X86::CMP32ri8: 6224 case X86::CMP16ri: 6225 case X86::CMP16ri8: 6226 case X86::CMP8ri: { 6227 MachineOperand &MO0 = DataMI->getOperand(0); 6228 MachineOperand &MO1 = DataMI->getOperand(1); 6229 if (MO1.getImm() == 0) { 6230 unsigned NewOpc; 6231 switch (DataMI->getOpcode()) { 6232 default: llvm_unreachable("Unreachable!"); 6233 case X86::CMP64ri8: 6234 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 6235 case X86::CMP32ri8: 6236 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 6237 case X86::CMP16ri8: 6238 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 6239 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 6240 } 6241 DataMI->setDesc(get(NewOpc)); 6242 MO1.ChangeToRegister(MO0.getReg(), false); 6243 } 6244 } 6245 } 6246 NewMIs.push_back(DataMI); 6247 6248 // Emit the store instruction. 6249 if (UnfoldStore) { 6250 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); 6251 std::pair<MachineInstr::mmo_iterator, 6252 MachineInstr::mmo_iterator> MMOs = 6253 MF.extractStoreMemRefs(MI->memoperands_begin(), 6254 MI->memoperands_end()); 6255 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs); 6256 } 6257 6258 return true; 6259} 6260 6261bool 6262X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 6263 SmallVectorImpl<SDNode*> &NewNodes) const { 6264 if (!N->isMachineOpcode()) 6265 return false; 6266 6267 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 6268 MemOp2RegOpTable.find(N->getMachineOpcode()); 6269 if (I == MemOp2RegOpTable.end()) 6270 return false; 6271 unsigned Opc = I->second.first; 6272 unsigned Index = I->second.second & TB_INDEX_MASK; 6273 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 6274 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 6275 const MCInstrDesc &MCID = get(Opc); 6276 MachineFunction &MF = DAG.getMachineFunction(); 6277 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 6278 unsigned NumDefs = MCID.NumDefs; 6279 std::vector<SDValue> AddrOps; 6280 std::vector<SDValue> BeforeOps; 6281 std::vector<SDValue> AfterOps; 6282 SDLoc dl(N); 6283 unsigned NumOps = N->getNumOperands(); 6284 for (unsigned i = 0; i != NumOps-1; ++i) { 6285 SDValue Op = N->getOperand(i); 6286 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) 6287 AddrOps.push_back(Op); 6288 else if (i < Index-NumDefs) 6289 BeforeOps.push_back(Op); 6290 else if (i > Index-NumDefs) 6291 AfterOps.push_back(Op); 6292 } 6293 SDValue Chain = N->getOperand(NumOps-1); 6294 AddrOps.push_back(Chain); 6295 6296 // Emit the load instruction. 6297 SDNode *Load = nullptr; 6298 if (FoldedLoad) { 6299 EVT VT = *RC->vt_begin(); 6300 std::pair<MachineInstr::mmo_iterator, 6301 MachineInstr::mmo_iterator> MMOs = 6302 MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 6303 cast<MachineSDNode>(N)->memoperands_end()); 6304 if (!(*MMOs.first) && 6305 RC == &X86::VR128RegClass && 6306 Subtarget.isUnalignedMem16Slow()) 6307 // Do not introduce a slow unaligned load. 6308 return false; 6309 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 6310 // memory access is slow above. 6311 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 6312 bool isAligned = (*MMOs.first) && 6313 (*MMOs.first)->getAlignment() >= Alignment; 6314 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, 6315 VT, MVT::Other, AddrOps); 6316 NewNodes.push_back(Load); 6317 6318 // Preserve memory reference information. 6319 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second); 6320 } 6321 6322 // Emit the data processing instruction. 6323 std::vector<EVT> VTs; 6324 const TargetRegisterClass *DstRC = nullptr; 6325 if (MCID.getNumDefs() > 0) { 6326 DstRC = getRegClass(MCID, 0, &RI, MF); 6327 VTs.push_back(*DstRC->vt_begin()); 6328 } 6329 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 6330 EVT VT = N->getValueType(i); 6331 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) 6332 VTs.push_back(VT); 6333 } 6334 if (Load) 6335 BeforeOps.push_back(SDValue(Load, 0)); 6336 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end()); 6337 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); 6338 NewNodes.push_back(NewNode); 6339 6340 // Emit the store instruction. 6341 if (FoldedStore) { 6342 AddrOps.pop_back(); 6343 AddrOps.push_back(SDValue(NewNode, 0)); 6344 AddrOps.push_back(Chain); 6345 std::pair<MachineInstr::mmo_iterator, 6346 MachineInstr::mmo_iterator> MMOs = 6347 MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 6348 cast<MachineSDNode>(N)->memoperands_end()); 6349 if (!(*MMOs.first) && 6350 RC == &X86::VR128RegClass && 6351 Subtarget.isUnalignedMem16Slow()) 6352 // Do not introduce a slow unaligned store. 6353 return false; 6354 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte 6355 // memory access is slow above. 6356 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 6357 bool isAligned = (*MMOs.first) && 6358 (*MMOs.first)->getAlignment() >= Alignment; 6359 SDNode *Store = 6360 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), 6361 dl, MVT::Other, AddrOps); 6362 NewNodes.push_back(Store); 6363 6364 // Preserve memory reference information. 6365 cast<MachineSDNode>(Store)->setMemRefs(MMOs.first, MMOs.second); 6366 } 6367 6368 return true; 6369} 6370 6371unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 6372 bool UnfoldLoad, bool UnfoldStore, 6373 unsigned *LoadRegIndex) const { 6374 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 6375 MemOp2RegOpTable.find(Opc); 6376 if (I == MemOp2RegOpTable.end()) 6377 return 0; 6378 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 6379 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 6380 if (UnfoldLoad && !FoldedLoad) 6381 return 0; 6382 if (UnfoldStore && !FoldedStore) 6383 return 0; 6384 if (LoadRegIndex) 6385 *LoadRegIndex = I->second.second & TB_INDEX_MASK; 6386 return I->second.first; 6387} 6388 6389bool 6390X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 6391 int64_t &Offset1, int64_t &Offset2) const { 6392 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 6393 return false; 6394 unsigned Opc1 = Load1->getMachineOpcode(); 6395 unsigned Opc2 = Load2->getMachineOpcode(); 6396 switch (Opc1) { 6397 default: return false; 6398 case X86::MOV8rm: 6399 case X86::MOV16rm: 6400 case X86::MOV32rm: 6401 case X86::MOV64rm: 6402 case X86::LD_Fp32m: 6403 case X86::LD_Fp64m: 6404 case X86::LD_Fp80m: 6405 case X86::MOVSSrm: 6406 case X86::MOVSDrm: 6407 case X86::MMX_MOVD64rm: 6408 case X86::MMX_MOVQ64rm: 6409 case X86::FsMOVAPSrm: 6410 case X86::FsMOVAPDrm: 6411 case X86::MOVAPSrm: 6412 case X86::MOVUPSrm: 6413 case X86::MOVAPDrm: 6414 case X86::MOVDQArm: 6415 case X86::MOVDQUrm: 6416 // AVX load instructions 6417 case X86::VMOVSSrm: 6418 case X86::VMOVSDrm: 6419 case X86::FsVMOVAPSrm: 6420 case X86::FsVMOVAPDrm: 6421 case X86::VMOVAPSrm: 6422 case X86::VMOVUPSrm: 6423 case X86::VMOVAPDrm: 6424 case X86::VMOVDQArm: 6425 case X86::VMOVDQUrm: 6426 case X86::VMOVAPSYrm: 6427 case X86::VMOVUPSYrm: 6428 case X86::VMOVAPDYrm: 6429 case X86::VMOVDQAYrm: 6430 case X86::VMOVDQUYrm: 6431 break; 6432 } 6433 switch (Opc2) { 6434 default: return false; 6435 case X86::MOV8rm: 6436 case X86::MOV16rm: 6437 case X86::MOV32rm: 6438 case X86::MOV64rm: 6439 case X86::LD_Fp32m: 6440 case X86::LD_Fp64m: 6441 case X86::LD_Fp80m: 6442 case X86::MOVSSrm: 6443 case X86::MOVSDrm: 6444 case X86::MMX_MOVD64rm: 6445 case X86::MMX_MOVQ64rm: 6446 case X86::FsMOVAPSrm: 6447 case X86::FsMOVAPDrm: 6448 case X86::MOVAPSrm: 6449 case X86::MOVUPSrm: 6450 case X86::MOVAPDrm: 6451 case X86::MOVDQArm: 6452 case X86::MOVDQUrm: 6453 // AVX load instructions 6454 case X86::VMOVSSrm: 6455 case X86::VMOVSDrm: 6456 case X86::FsVMOVAPSrm: 6457 case X86::FsVMOVAPDrm: 6458 case X86::VMOVAPSrm: 6459 case X86::VMOVUPSrm: 6460 case X86::VMOVAPDrm: 6461 case X86::VMOVDQArm: 6462 case X86::VMOVDQUrm: 6463 case X86::VMOVAPSYrm: 6464 case X86::VMOVUPSYrm: 6465 case X86::VMOVAPDYrm: 6466 case X86::VMOVDQAYrm: 6467 case X86::VMOVDQUYrm: 6468 break; 6469 } 6470 6471 // Check if chain operands and base addresses match. 6472 if (Load1->getOperand(0) != Load2->getOperand(0) || 6473 Load1->getOperand(5) != Load2->getOperand(5)) 6474 return false; 6475 // Segment operands should match as well. 6476 if (Load1->getOperand(4) != Load2->getOperand(4)) 6477 return false; 6478 // Scale should be 1, Index should be Reg0. 6479 if (Load1->getOperand(1) == Load2->getOperand(1) && 6480 Load1->getOperand(2) == Load2->getOperand(2)) { 6481 if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1) 6482 return false; 6483 6484 // Now let's examine the displacements. 6485 if (isa<ConstantSDNode>(Load1->getOperand(3)) && 6486 isa<ConstantSDNode>(Load2->getOperand(3))) { 6487 Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue(); 6488 Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue(); 6489 return true; 6490 } 6491 } 6492 return false; 6493} 6494 6495bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 6496 int64_t Offset1, int64_t Offset2, 6497 unsigned NumLoads) const { 6498 assert(Offset2 > Offset1); 6499 if ((Offset2 - Offset1) / 8 > 64) 6500 return false; 6501 6502 unsigned Opc1 = Load1->getMachineOpcode(); 6503 unsigned Opc2 = Load2->getMachineOpcode(); 6504 if (Opc1 != Opc2) 6505 return false; // FIXME: overly conservative? 6506 6507 switch (Opc1) { 6508 default: break; 6509 case X86::LD_Fp32m: 6510 case X86::LD_Fp64m: 6511 case X86::LD_Fp80m: 6512 case X86::MMX_MOVD64rm: 6513 case X86::MMX_MOVQ64rm: 6514 return false; 6515 } 6516 6517 EVT VT = Load1->getValueType(0); 6518 switch (VT.getSimpleVT().SimpleTy) { 6519 default: 6520 // XMM registers. In 64-bit mode we can be a bit more aggressive since we 6521 // have 16 of them to play with. 6522 if (Subtarget.is64Bit()) { 6523 if (NumLoads >= 3) 6524 return false; 6525 } else if (NumLoads) { 6526 return false; 6527 } 6528 break; 6529 case MVT::i8: 6530 case MVT::i16: 6531 case MVT::i32: 6532 case MVT::i64: 6533 case MVT::f32: 6534 case MVT::f64: 6535 if (NumLoads) 6536 return false; 6537 break; 6538 } 6539 6540 return true; 6541} 6542 6543bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First, 6544 MachineInstr *Second) const { 6545 // Check if this processor supports macro-fusion. Since this is a minor 6546 // heuristic, we haven't specifically reserved a feature. hasAVX is a decent 6547 // proxy for SandyBridge+. 6548 if (!Subtarget.hasAVX()) 6549 return false; 6550 6551 enum { 6552 FuseTest, 6553 FuseCmp, 6554 FuseInc 6555 } FuseKind; 6556 6557 switch(Second->getOpcode()) { 6558 default: 6559 return false; 6560 case X86::JE_1: 6561 case X86::JNE_1: 6562 case X86::JL_1: 6563 case X86::JLE_1: 6564 case X86::JG_1: 6565 case X86::JGE_1: 6566 FuseKind = FuseInc; 6567 break; 6568 case X86::JB_1: 6569 case X86::JBE_1: 6570 case X86::JA_1: 6571 case X86::JAE_1: 6572 FuseKind = FuseCmp; 6573 break; 6574 case X86::JS_1: 6575 case X86::JNS_1: 6576 case X86::JP_1: 6577 case X86::JNP_1: 6578 case X86::JO_1: 6579 case X86::JNO_1: 6580 FuseKind = FuseTest; 6581 break; 6582 } 6583 switch (First->getOpcode()) { 6584 default: 6585 return false; 6586 case X86::TEST8rr: 6587 case X86::TEST16rr: 6588 case X86::TEST32rr: 6589 case X86::TEST64rr: 6590 case X86::TEST8ri: 6591 case X86::TEST16ri: 6592 case X86::TEST32ri: 6593 case X86::TEST32i32: 6594 case X86::TEST64i32: 6595 case X86::TEST64ri32: 6596 case X86::TEST8rm: 6597 case X86::TEST16rm: 6598 case X86::TEST32rm: 6599 case X86::TEST64rm: 6600 case X86::TEST8ri_NOREX: 6601 case X86::AND16i16: 6602 case X86::AND16ri: 6603 case X86::AND16ri8: 6604 case X86::AND16rm: 6605 case X86::AND16rr: 6606 case X86::AND32i32: 6607 case X86::AND32ri: 6608 case X86::AND32ri8: 6609 case X86::AND32rm: 6610 case X86::AND32rr: 6611 case X86::AND64i32: 6612 case X86::AND64ri32: 6613 case X86::AND64ri8: 6614 case X86::AND64rm: 6615 case X86::AND64rr: 6616 case X86::AND8i8: 6617 case X86::AND8ri: 6618 case X86::AND8rm: 6619 case X86::AND8rr: 6620 return true; 6621 case X86::CMP16i16: 6622 case X86::CMP16ri: 6623 case X86::CMP16ri8: 6624 case X86::CMP16rm: 6625 case X86::CMP16rr: 6626 case X86::CMP32i32: 6627 case X86::CMP32ri: 6628 case X86::CMP32ri8: 6629 case X86::CMP32rm: 6630 case X86::CMP32rr: 6631 case X86::CMP64i32: 6632 case X86::CMP64ri32: 6633 case X86::CMP64ri8: 6634 case X86::CMP64rm: 6635 case X86::CMP64rr: 6636 case X86::CMP8i8: 6637 case X86::CMP8ri: 6638 case X86::CMP8rm: 6639 case X86::CMP8rr: 6640 case X86::ADD16i16: 6641 case X86::ADD16ri: 6642 case X86::ADD16ri8: 6643 case X86::ADD16ri8_DB: 6644 case X86::ADD16ri_DB: 6645 case X86::ADD16rm: 6646 case X86::ADD16rr: 6647 case X86::ADD16rr_DB: 6648 case X86::ADD32i32: 6649 case X86::ADD32ri: 6650 case X86::ADD32ri8: 6651 case X86::ADD32ri8_DB: 6652 case X86::ADD32ri_DB: 6653 case X86::ADD32rm: 6654 case X86::ADD32rr: 6655 case X86::ADD32rr_DB: 6656 case X86::ADD64i32: 6657 case X86::ADD64ri32: 6658 case X86::ADD64ri32_DB: 6659 case X86::ADD64ri8: 6660 case X86::ADD64ri8_DB: 6661 case X86::ADD64rm: 6662 case X86::ADD64rr: 6663 case X86::ADD64rr_DB: 6664 case X86::ADD8i8: 6665 case X86::ADD8mi: 6666 case X86::ADD8mr: 6667 case X86::ADD8ri: 6668 case X86::ADD8rm: 6669 case X86::ADD8rr: 6670 case X86::SUB16i16: 6671 case X86::SUB16ri: 6672 case X86::SUB16ri8: 6673 case X86::SUB16rm: 6674 case X86::SUB16rr: 6675 case X86::SUB32i32: 6676 case X86::SUB32ri: 6677 case X86::SUB32ri8: 6678 case X86::SUB32rm: 6679 case X86::SUB32rr: 6680 case X86::SUB64i32: 6681 case X86::SUB64ri32: 6682 case X86::SUB64ri8: 6683 case X86::SUB64rm: 6684 case X86::SUB64rr: 6685 case X86::SUB8i8: 6686 case X86::SUB8ri: 6687 case X86::SUB8rm: 6688 case X86::SUB8rr: 6689 return FuseKind == FuseCmp || FuseKind == FuseInc; 6690 case X86::INC16r: 6691 case X86::INC32r: 6692 case X86::INC64r: 6693 case X86::INC8r: 6694 case X86::DEC16r: 6695 case X86::DEC32r: 6696 case X86::DEC64r: 6697 case X86::DEC8r: 6698 return FuseKind == FuseInc; 6699 } 6700} 6701 6702bool X86InstrInfo:: 6703ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 6704 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 6705 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 6706 if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E) 6707 return true; 6708 Cond[0].setImm(GetOppositeBranchCondition(CC)); 6709 return false; 6710} 6711 6712bool X86InstrInfo:: 6713isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 6714 // FIXME: Return false for x87 stack register classes for now. We can't 6715 // allow any loads of these registers before FpGet_ST0_80. 6716 return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass || 6717 RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass); 6718} 6719 6720/// Return a virtual register initialized with the 6721/// the global base register value. Output instructions required to 6722/// initialize the register in the function entry block, if necessary. 6723/// 6724/// TODO: Eliminate this and move the code to X86MachineFunctionInfo. 6725/// 6726unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 6727 assert(!Subtarget.is64Bit() && 6728 "X86-64 PIC uses RIP relative addressing"); 6729 6730 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 6731 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 6732 if (GlobalBaseReg != 0) 6733 return GlobalBaseReg; 6734 6735 // Create the register. The code to initialize it is inserted 6736 // later, by the CGBR pass (below). 6737 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 6738 GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 6739 X86FI->setGlobalBaseReg(GlobalBaseReg); 6740 return GlobalBaseReg; 6741} 6742 6743// These are the replaceable SSE instructions. Some of these have Int variants 6744// that we don't include here. We don't want to replace instructions selected 6745// by intrinsics. 6746static const uint16_t ReplaceableInstrs[][3] = { 6747 //PackedSingle PackedDouble PackedInt 6748 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, 6749 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, 6750 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, 6751 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, 6752 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, 6753 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, 6754 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, 6755 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, 6756 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, 6757 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, 6758 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, 6759 { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, 6760 { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, 6761 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, 6762 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, 6763 // AVX 128-bit support 6764 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, 6765 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, 6766 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, 6767 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, 6768 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, 6769 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, 6770 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, 6771 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, 6772 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, 6773 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, 6774 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, 6775 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, 6776 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, 6777 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, 6778 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, 6779 // AVX 256-bit support 6780 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, 6781 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, 6782 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, 6783 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, 6784 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, 6785 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr } 6786}; 6787 6788static const uint16_t ReplaceableInstrsAVX2[][3] = { 6789 //PackedSingle PackedDouble PackedInt 6790 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, 6791 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, 6792 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, 6793 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, 6794 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, 6795 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, 6796 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, 6797 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, 6798 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, 6799 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, 6800 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, 6801 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, 6802 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, 6803 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, 6804 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, 6805 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, 6806 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, 6807 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, 6808 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, 6809 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm} 6810}; 6811 6812// FIXME: Some shuffle and unpack instructions have equivalents in different 6813// domains, but they require a bit more work than just switching opcodes. 6814 6815static const uint16_t *lookup(unsigned opcode, unsigned domain) { 6816 for (const uint16_t (&Row)[3] : ReplaceableInstrs) 6817 if (Row[domain-1] == opcode) 6818 return Row; 6819 return nullptr; 6820} 6821 6822static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) { 6823 for (const uint16_t (&Row)[3] : ReplaceableInstrsAVX2) 6824 if (Row[domain-1] == opcode) 6825 return Row; 6826 return nullptr; 6827} 6828 6829std::pair<uint16_t, uint16_t> 6830X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const { 6831 uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6832 bool hasAVX2 = Subtarget.hasAVX2(); 6833 uint16_t validDomains = 0; 6834 if (domain && lookup(MI->getOpcode(), domain)) 6835 validDomains = 0xe; 6836 else if (domain && lookupAVX2(MI->getOpcode(), domain)) 6837 validDomains = hasAVX2 ? 0xe : 0x6; 6838 return std::make_pair(domain, validDomains); 6839} 6840 6841void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 6842 assert(Domain>0 && Domain<4 && "Invalid execution domain"); 6843 uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6844 assert(dom && "Not an SSE instruction"); 6845 const uint16_t *table = lookup(MI->getOpcode(), dom); 6846 if (!table) { // try the other table 6847 assert((Subtarget.hasAVX2() || Domain < 3) && 6848 "256-bit vector operations only available in AVX2"); 6849 table = lookupAVX2(MI->getOpcode(), dom); 6850 } 6851 assert(table && "Cannot change domain"); 6852 MI->setDesc(get(table[Domain-1])); 6853} 6854 6855/// Return the noop instruction to use for a noop. 6856void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { 6857 NopInst.setOpcode(X86::NOOP); 6858} 6859 6860// This code must remain in sync with getJumpInstrTableEntryBound in this class! 6861// In particular, getJumpInstrTableEntryBound must always return an upper bound 6862// on the encoding lengths of the instructions generated by 6863// getUnconditionalBranch and getTrap. 6864void X86InstrInfo::getUnconditionalBranch( 6865 MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const { 6866 Branch.setOpcode(X86::JMP_1); 6867 Branch.addOperand(MCOperand::createExpr(BranchTarget)); 6868} 6869 6870// This code must remain in sync with getJumpInstrTableEntryBound in this class! 6871// In particular, getJumpInstrTableEntryBound must always return an upper bound 6872// on the encoding lengths of the instructions generated by 6873// getUnconditionalBranch and getTrap. 6874void X86InstrInfo::getTrap(MCInst &MI) const { 6875 MI.setOpcode(X86::TRAP); 6876} 6877 6878// See getTrap and getUnconditionalBranch for conditions on the value returned 6879// by this function. 6880unsigned X86InstrInfo::getJumpInstrTableEntryBound() const { 6881 // 5 bytes suffice: JMP_4 Symbol@PLT is uses 1 byte (E9) for the JMP_4 and 4 6882 // bytes for the symbol offset. And TRAP is ud2, which is two bytes (0F 0B). 6883 return 5; 6884} 6885 6886bool X86InstrInfo::isHighLatencyDef(int opc) const { 6887 switch (opc) { 6888 default: return false; 6889 case X86::DIVSDrm: 6890 case X86::DIVSDrm_Int: 6891 case X86::DIVSDrr: 6892 case X86::DIVSDrr_Int: 6893 case X86::DIVSSrm: 6894 case X86::DIVSSrm_Int: 6895 case X86::DIVSSrr: 6896 case X86::DIVSSrr_Int: 6897 case X86::SQRTPDm: 6898 case X86::SQRTPDr: 6899 case X86::SQRTPSm: 6900 case X86::SQRTPSr: 6901 case X86::SQRTSDm: 6902 case X86::SQRTSDm_Int: 6903 case X86::SQRTSDr: 6904 case X86::SQRTSDr_Int: 6905 case X86::SQRTSSm: 6906 case X86::SQRTSSm_Int: 6907 case X86::SQRTSSr: 6908 case X86::SQRTSSr_Int: 6909 // AVX instructions with high latency 6910 case X86::VDIVSDrm: 6911 case X86::VDIVSDrm_Int: 6912 case X86::VDIVSDrr: 6913 case X86::VDIVSDrr_Int: 6914 case X86::VDIVSSrm: 6915 case X86::VDIVSSrm_Int: 6916 case X86::VDIVSSrr: 6917 case X86::VDIVSSrr_Int: 6918 case X86::VSQRTPDm: 6919 case X86::VSQRTPDr: 6920 case X86::VSQRTPSm: 6921 case X86::VSQRTPSr: 6922 case X86::VSQRTSDm: 6923 case X86::VSQRTSDm_Int: 6924 case X86::VSQRTSDr: 6925 case X86::VSQRTSSm: 6926 case X86::VSQRTSSm_Int: 6927 case X86::VSQRTSSr: 6928 case X86::VSQRTPDZm: 6929 case X86::VSQRTPDZr: 6930 case X86::VSQRTPSZm: 6931 case X86::VSQRTPSZr: 6932 case X86::VSQRTSDZm: 6933 case X86::VSQRTSDZm_Int: 6934 case X86::VSQRTSDZr: 6935 case X86::VSQRTSSZm_Int: 6936 case X86::VSQRTSSZr: 6937 case X86::VSQRTSSZm: 6938 case X86::VDIVSDZrm: 6939 case X86::VDIVSDZrr: 6940 case X86::VDIVSSZrm: 6941 case X86::VDIVSSZrr: 6942 6943 case X86::VGATHERQPSZrm: 6944 case X86::VGATHERQPDZrm: 6945 case X86::VGATHERDPDZrm: 6946 case X86::VGATHERDPSZrm: 6947 case X86::VPGATHERQDZrm: 6948 case X86::VPGATHERQQZrm: 6949 case X86::VPGATHERDDZrm: 6950 case X86::VPGATHERDQZrm: 6951 case X86::VSCATTERQPDZmr: 6952 case X86::VSCATTERQPSZmr: 6953 case X86::VSCATTERDPDZmr: 6954 case X86::VSCATTERDPSZmr: 6955 case X86::VPSCATTERQDZmr: 6956 case X86::VPSCATTERQQZmr: 6957 case X86::VPSCATTERDDZmr: 6958 case X86::VPSCATTERDQZmr: 6959 return true; 6960 } 6961} 6962 6963bool X86InstrInfo:: 6964hasHighOperandLatency(const TargetSchedModel &SchedModel, 6965 const MachineRegisterInfo *MRI, 6966 const MachineInstr *DefMI, unsigned DefIdx, 6967 const MachineInstr *UseMI, unsigned UseIdx) const { 6968 return isHighLatencyDef(DefMI->getOpcode()); 6969} 6970 6971bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, 6972 const MachineBasicBlock *MBB) const { 6973 assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) && 6974 "Reassociation needs binary operators"); 6975 6976 // Integer binary math/logic instructions have a third source operand: 6977 // the EFLAGS register. That operand must be both defined here and never 6978 // used; ie, it must be dead. If the EFLAGS operand is live, then we can 6979 // not change anything because rearranging the operands could affect other 6980 // instructions that depend on the exact status flags (zero, sign, etc.) 6981 // that are set by using these particular operands with this operation. 6982 if (Inst.getNumOperands() == 4) { 6983 assert(Inst.getOperand(3).isReg() && 6984 Inst.getOperand(3).getReg() == X86::EFLAGS && 6985 "Unexpected operand in reassociable instruction"); 6986 if (!Inst.getOperand(3).isDead()) 6987 return false; 6988 } 6989 6990 return TargetInstrInfo::hasReassociableOperands(Inst, MBB); 6991} 6992 6993// TODO: There are many more machine instruction opcodes to match: 6994// 1. Other data types (integer, vectors) 6995// 2. Other math / logic operations (xor, or) 6996// 3. Other forms of the same operation (intrinsics and other variants) 6997bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { 6998 switch (Inst.getOpcode()) { 6999 case X86::AND8rr: 7000 case X86::AND16rr: 7001 case X86::AND32rr: 7002 case X86::AND64rr: 7003 case X86::OR8rr: 7004 case X86::OR16rr: 7005 case X86::OR32rr: 7006 case X86::OR64rr: 7007 case X86::XOR8rr: 7008 case X86::XOR16rr: 7009 case X86::XOR32rr: 7010 case X86::XOR64rr: 7011 case X86::IMUL16rr: 7012 case X86::IMUL32rr: 7013 case X86::IMUL64rr: 7014 case X86::PANDrr: 7015 case X86::PORrr: 7016 case X86::PXORrr: 7017 case X86::VPANDrr: 7018 case X86::VPANDYrr: 7019 case X86::VPORrr: 7020 case X86::VPORYrr: 7021 case X86::VPXORrr: 7022 case X86::VPXORYrr: 7023 // Normal min/max instructions are not commutative because of NaN and signed 7024 // zero semantics, but these are. Thus, there's no need to check for global 7025 // relaxed math; the instructions themselves have the properties we need. 7026 case X86::MAXCPDrr: 7027 case X86::MAXCPSrr: 7028 case X86::MAXCSDrr: 7029 case X86::MAXCSSrr: 7030 case X86::MINCPDrr: 7031 case X86::MINCPSrr: 7032 case X86::MINCSDrr: 7033 case X86::MINCSSrr: 7034 case X86::VMAXCPDrr: 7035 case X86::VMAXCPSrr: 7036 case X86::VMAXCPDYrr: 7037 case X86::VMAXCPSYrr: 7038 case X86::VMAXCSDrr: 7039 case X86::VMAXCSSrr: 7040 case X86::VMINCPDrr: 7041 case X86::VMINCPSrr: 7042 case X86::VMINCPDYrr: 7043 case X86::VMINCPSYrr: 7044 case X86::VMINCSDrr: 7045 case X86::VMINCSSrr: 7046 return true; 7047 case X86::ADDPDrr: 7048 case X86::ADDPSrr: 7049 case X86::ADDSDrr: 7050 case X86::ADDSSrr: 7051 case X86::MULPDrr: 7052 case X86::MULPSrr: 7053 case X86::MULSDrr: 7054 case X86::MULSSrr: 7055 case X86::VADDPDrr: 7056 case X86::VADDPSrr: 7057 case X86::VADDPDYrr: 7058 case X86::VADDPSYrr: 7059 case X86::VADDSDrr: 7060 case X86::VADDSSrr: 7061 case X86::VMULPDrr: 7062 case X86::VMULPSrr: 7063 case X86::VMULPDYrr: 7064 case X86::VMULPSYrr: 7065 case X86::VMULSDrr: 7066 case X86::VMULSSrr: 7067 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; 7068 default: 7069 return false; 7070 } 7071} 7072 7073/// This is an architecture-specific helper function of reassociateOps. 7074/// Set special operand attributes for new instructions after reassociation. 7075void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, 7076 MachineInstr &OldMI2, 7077 MachineInstr &NewMI1, 7078 MachineInstr &NewMI2) const { 7079 // Integer instructions define an implicit EFLAGS source register operand as 7080 // the third source (fourth total) operand. 7081 if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4) 7082 return; 7083 7084 assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 && 7085 "Unexpected instruction type for reassociation"); 7086 7087 MachineOperand &OldOp1 = OldMI1.getOperand(3); 7088 MachineOperand &OldOp2 = OldMI2.getOperand(3); 7089 MachineOperand &NewOp1 = NewMI1.getOperand(3); 7090 MachineOperand &NewOp2 = NewMI2.getOperand(3); 7091 7092 assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() && 7093 "Must have dead EFLAGS operand in reassociable instruction"); 7094 assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() && 7095 "Must have dead EFLAGS operand in reassociable instruction"); 7096 7097 (void)OldOp1; 7098 (void)OldOp2; 7099 7100 assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS && 7101 "Unexpected operand in reassociable instruction"); 7102 assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS && 7103 "Unexpected operand in reassociable instruction"); 7104 7105 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations 7106 // of this pass or other passes. The EFLAGS operands must be dead in these new 7107 // instructions because the EFLAGS operands in the original instructions must 7108 // be dead in order for reassociation to occur. 7109 NewOp1.setIsDead(); 7110 NewOp2.setIsDead(); 7111} 7112 7113std::pair<unsigned, unsigned> 7114X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7115 return std::make_pair(TF, 0u); 7116} 7117 7118ArrayRef<std::pair<unsigned, const char *>> 7119X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7120 using namespace X86II; 7121 static const std::pair<unsigned, const char *> TargetFlags[] = { 7122 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, 7123 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, 7124 {MO_GOT, "x86-got"}, 7125 {MO_GOTOFF, "x86-gotoff"}, 7126 {MO_GOTPCREL, "x86-gotpcrel"}, 7127 {MO_PLT, "x86-plt"}, 7128 {MO_TLSGD, "x86-tlsgd"}, 7129 {MO_TLSLD, "x86-tlsld"}, 7130 {MO_TLSLDM, "x86-tlsldm"}, 7131 {MO_GOTTPOFF, "x86-gottpoff"}, 7132 {MO_INDNTPOFF, "x86-indntpoff"}, 7133 {MO_TPOFF, "x86-tpoff"}, 7134 {MO_DTPOFF, "x86-dtpoff"}, 7135 {MO_NTPOFF, "x86-ntpoff"}, 7136 {MO_GOTNTPOFF, "x86-gotntpoff"}, 7137 {MO_DLLIMPORT, "x86-dllimport"}, 7138 {MO_DARWIN_STUB, "x86-darwin-stub"}, 7139 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, 7140 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, 7141 {MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE, "x86-darwin-hidden-nonlazy-pic-base"}, 7142 {MO_TLVP, "x86-tlvp"}, 7143 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, 7144 {MO_SECREL, "x86-secrel"}}; 7145 return makeArrayRef(TargetFlags); 7146} 7147 7148namespace { 7149 /// Create Global Base Reg pass. This initializes the PIC 7150 /// global base register for x86-32. 7151 struct CGBR : public MachineFunctionPass { 7152 static char ID; 7153 CGBR() : MachineFunctionPass(ID) {} 7154 7155 bool runOnMachineFunction(MachineFunction &MF) override { 7156 const X86TargetMachine *TM = 7157 static_cast<const X86TargetMachine *>(&MF.getTarget()); 7158 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 7159 7160 // Don't do anything if this is 64-bit as 64-bit PIC 7161 // uses RIP relative addressing. 7162 if (STI.is64Bit()) 7163 return false; 7164 7165 // Only emit a global base reg in PIC mode. 7166 if (TM->getRelocationModel() != Reloc::PIC_) 7167 return false; 7168 7169 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 7170 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 7171 7172 // If we didn't need a GlobalBaseReg, don't insert code. 7173 if (GlobalBaseReg == 0) 7174 return false; 7175 7176 // Insert the set of GlobalBaseReg into the first MBB of the function 7177 MachineBasicBlock &FirstMBB = MF.front(); 7178 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 7179 DebugLoc DL = FirstMBB.findDebugLoc(MBBI); 7180 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7181 const X86InstrInfo *TII = STI.getInstrInfo(); 7182 7183 unsigned PC; 7184 if (STI.isPICStyleGOT()) 7185 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); 7186 else 7187 PC = GlobalBaseReg; 7188 7189 // Operand of MovePCtoStack is completely ignored by asm printer. It's 7190 // only used in JIT code emission as displacement to pc. 7191 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); 7192 7193 // If we're using vanilla 'GOT' PIC style, we should use relative addressing 7194 // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. 7195 if (STI.isPICStyleGOT()) { 7196 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register 7197 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) 7198 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 7199 X86II::MO_GOT_ABSOLUTE_ADDRESS); 7200 } 7201 7202 return true; 7203 } 7204 7205 const char *getPassName() const override { 7206 return "X86 PIC Global Base Reg Initialization"; 7207 } 7208 7209 void getAnalysisUsage(AnalysisUsage &AU) const override { 7210 AU.setPreservesCFG(); 7211 MachineFunctionPass::getAnalysisUsage(AU); 7212 } 7213 }; 7214} 7215 7216char CGBR::ID = 0; 7217FunctionPass* 7218llvm::createX86GlobalBaseRegPass() { return new CGBR(); } 7219 7220namespace { 7221 struct LDTLSCleanup : public MachineFunctionPass { 7222 static char ID; 7223 LDTLSCleanup() : MachineFunctionPass(ID) {} 7224 7225 bool runOnMachineFunction(MachineFunction &MF) override { 7226 X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>(); 7227 if (MFI->getNumLocalDynamicTLSAccesses() < 2) { 7228 // No point folding accesses if there isn't at least two. 7229 return false; 7230 } 7231 7232 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); 7233 return VisitNode(DT->getRootNode(), 0); 7234 } 7235 7236 // Visit the dominator subtree rooted at Node in pre-order. 7237 // If TLSBaseAddrReg is non-null, then use that to replace any 7238 // TLS_base_addr instructions. Otherwise, create the register 7239 // when the first such instruction is seen, and then use it 7240 // as we encounter more instructions. 7241 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { 7242 MachineBasicBlock *BB = Node->getBlock(); 7243 bool Changed = false; 7244 7245 // Traverse the current block. 7246 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; 7247 ++I) { 7248 switch (I->getOpcode()) { 7249 case X86::TLS_base_addr32: 7250 case X86::TLS_base_addr64: 7251 if (TLSBaseAddrReg) 7252 I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg); 7253 else 7254 I = SetRegister(I, &TLSBaseAddrReg); 7255 Changed = true; 7256 break; 7257 default: 7258 break; 7259 } 7260 } 7261 7262 // Visit the children of this block in the dominator tree. 7263 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); 7264 I != E; ++I) { 7265 Changed |= VisitNode(*I, TLSBaseAddrReg); 7266 } 7267 7268 return Changed; 7269 } 7270 7271 // Replace the TLS_base_addr instruction I with a copy from 7272 // TLSBaseAddrReg, returning the new instruction. 7273 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I, 7274 unsigned TLSBaseAddrReg) { 7275 MachineFunction *MF = I->getParent()->getParent(); 7276 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7277 const bool is64Bit = STI.is64Bit(); 7278 const X86InstrInfo *TII = STI.getInstrInfo(); 7279 7280 // Insert a Copy from TLSBaseAddrReg to RAX/EAX. 7281 MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(), 7282 TII->get(TargetOpcode::COPY), 7283 is64Bit ? X86::RAX : X86::EAX) 7284 .addReg(TLSBaseAddrReg); 7285 7286 // Erase the TLS_base_addr instruction. 7287 I->eraseFromParent(); 7288 7289 return Copy; 7290 } 7291 7292 // Create a virtal register in *TLSBaseAddrReg, and populate it by 7293 // inserting a copy instruction after I. Returns the new instruction. 7294 MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { 7295 MachineFunction *MF = I->getParent()->getParent(); 7296 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 7297 const bool is64Bit = STI.is64Bit(); 7298 const X86InstrInfo *TII = STI.getInstrInfo(); 7299 7300 // Create a virtual register for the TLS base address. 7301 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 7302 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit 7303 ? &X86::GR64RegClass 7304 : &X86::GR32RegClass); 7305 7306 // Insert a copy from RAX/EAX to TLSBaseAddrReg. 7307 MachineInstr *Next = I->getNextNode(); 7308 MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(), 7309 TII->get(TargetOpcode::COPY), 7310 *TLSBaseAddrReg) 7311 .addReg(is64Bit ? X86::RAX : X86::EAX); 7312 7313 return Copy; 7314 } 7315 7316 const char *getPassName() const override { 7317 return "Local Dynamic TLS Access Clean-up"; 7318 } 7319 7320 void getAnalysisUsage(AnalysisUsage &AU) const override { 7321 AU.setPreservesCFG(); 7322 AU.addRequired<MachineDominatorTree>(); 7323 MachineFunctionPass::getAnalysisUsage(AU); 7324 } 7325 }; 7326} 7327 7328char LDTLSCleanup::ID = 0; 7329FunctionPass* 7330llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } 7331