ARMInstrInfo.cpp (195098) | ARMInstrInfo.cpp (195340) |
---|---|
1//===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// --- 26 unchanged lines hidden (view full) --- 35} 36 37static inline 38const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) { 39 return MIB.addReg(0); 40} 41 42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI) | 1//===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// --- 26 unchanged lines hidden (view full) --- 35} 36 37static inline 38const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) { 39 return MIB.addReg(0); 40} 41 42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI) |
43 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)), 44 RI(*this, STI) { | 43 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) { |
45} 46 47ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI) | 44} 45 46ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI) |
48 : ARMBaseInstrInfo(STI) { | 47 : ARMBaseInstrInfo(STI), RI(*this, STI) { |
49} 50 | 48} 49 |
51/// Return true if the instruction is a register to register move and 52/// leave the source and dest operands in the passed parameters. 53/// 54bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI, 55 unsigned &SrcReg, unsigned &DstReg, 56 unsigned& SrcSubIdx, unsigned& DstSubIdx) const { 57 SrcSubIdx = DstSubIdx = 0; // No sub-registers. 58 59 unsigned oc = MI.getOpcode(); 60 switch (oc) { 61 default: 62 return false; 63 case ARM::FCPYS: 64 case ARM::FCPYD: 65 case ARM::VMOVD: 66 case ARM::VMOVQ: 67 SrcReg = MI.getOperand(1).getReg(); 68 DstReg = MI.getOperand(0).getReg(); 69 return true; 70 case ARM::MOVr: 71 assert(MI.getDesc().getNumOperands() >= 2 && 72 MI.getOperand(0).isReg() && 73 MI.getOperand(1).isReg() && 74 "Invalid ARM MOV instruction"); 75 SrcReg = MI.getOperand(1).getReg(); 76 DstReg = MI.getOperand(0).getReg(); 77 return true; 78 } 79} 80 81unsigned ARMInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 82 int &FrameIndex) const { 83 switch (MI->getOpcode()) { 84 default: break; 85 case ARM::LDR: 86 if (MI->getOperand(1).isFI() && 87 MI->getOperand(2).isReg() && 88 MI->getOperand(3).isImm() && 89 MI->getOperand(2).getReg() == 0 && 90 MI->getOperand(3).getImm() == 0) { 91 FrameIndex = MI->getOperand(1).getIndex(); 92 return MI->getOperand(0).getReg(); 93 } 94 break; 95 case ARM::FLDD: 96 case ARM::FLDS: 97 if (MI->getOperand(1).isFI() && 98 MI->getOperand(2).isImm() && 99 MI->getOperand(2).getImm() == 0) { 100 FrameIndex = MI->getOperand(1).getIndex(); 101 return MI->getOperand(0).getReg(); 102 } 103 break; 104 } 105 return 0; 106} 107 108unsigned ARMInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 109 int &FrameIndex) const { 110 switch (MI->getOpcode()) { 111 default: break; 112 case ARM::STR: 113 if (MI->getOperand(1).isFI() && 114 MI->getOperand(2).isReg() && 115 MI->getOperand(3).isImm() && 116 MI->getOperand(2).getReg() == 0 && 117 MI->getOperand(3).getImm() == 0) { 118 FrameIndex = MI->getOperand(1).getIndex(); 119 return MI->getOperand(0).getReg(); 120 } 121 break; 122 case ARM::FSTD: 123 case ARM::FSTS: 124 if (MI->getOperand(1).isFI() && 125 MI->getOperand(2).isImm() && 126 MI->getOperand(2).getImm() == 0) { 127 FrameIndex = MI->getOperand(1).getIndex(); 128 return MI->getOperand(0).getReg(); 129 } 130 break; 131 } 132 133 return 0; 134} 135 136void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB, 137 MachineBasicBlock::iterator I, 138 unsigned DestReg, 139 const MachineInstr *Orig) const { | 50void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB, 51 MachineBasicBlock::iterator I, 52 unsigned DestReg, 53 const MachineInstr *Orig) const { |
140 DebugLoc dl = Orig->getDebugLoc(); 141 if (Orig->getOpcode() == ARM::MOVi2pieces) { | 54 DebugLoc dl = Orig->getDebugLoc(); 55 if (Orig->getOpcode() == ARM::MOVi2pieces) { |
142 RI.emitLoadConstPool(MBB, I, DestReg, Orig->getOperand(1).getImm(), 143 Orig->getOperand(2).getImm(), 144 Orig->getOperand(3).getReg(), this, false, dl); | 56 RI.emitLoadConstPool(MBB, I, this, dl, 57 DestReg, 58 Orig->getOperand(1).getImm(), 59 (ARMCC::CondCodes)Orig->getOperand(2).getImm(), 60 Orig->getOperand(3).getReg()); |
145 return; 146 } 147 148 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 149 MI->getOperand(0).setReg(DestReg); 150 MBB.insert(I, MI); 151} 152 --- 176 unchanged lines hidden (view full) --- 329 330 MFI->insert(MBBI, NewMIs[1]); 331 MFI->insert(MBBI, NewMIs[0]); 332 return NewMIs[0]; 333} 334 335// Branch analysis. 336bool | 61 return; 62 } 63 64 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 65 MI->getOperand(0).setReg(DestReg); 66 MBB.insert(I, MI); 67} 68 --- 176 unchanged lines hidden (view full) --- 245 246 MFI->insert(MBBI, NewMIs[1]); 247 MFI->insert(MBBI, NewMIs[0]); 248 return NewMIs[0]; 249} 250 251// Branch analysis. 252bool |
337 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 338 MachineBasicBlock *&FBB, 339 SmallVectorImpl<MachineOperand> &Cond, 340 bool AllowModify) const { | 253ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 254 MachineBasicBlock *&FBB, 255 SmallVectorImpl 256 bool AllowModify) const { |
341 // If the block has no terminators, it just falls into the block after it. 342 MachineBasicBlock::iterator I = MBB.end(); 343 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) 344 return false; 345 346 // Get the last instruction in the block. 347 MachineInstr *LastInst = I; 348 349 // If there is only one terminator instruction, process it. 350 unsigned LastOpc = LastInst->getOpcode(); 351 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { | 257 // If the block has no terminators, it just falls into the block after it. 258 MachineBasicBlock::iterator I = MBB.end(); 259 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) 260 return false; 261 262 // Get the last instruction in the block. 263 MachineInstr *LastInst = I; 264 265 // If there is only one terminator instruction, process it. 266 unsigned LastOpc = LastInst->getOpcode(); 267 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { |
352 if (LastOpc == ARM::B || LastOpc == ARM::tB) { | 268 if (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B) { |
353 TBB = LastInst->getOperand(0).getMBB(); 354 return false; 355 } | 269 TBB = LastInst->getOperand(0).getMBB(); 270 return false; 271 } |
356 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) { | 272 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc || LastOpc == ARM::t2Bcc) { |
357 // Block ends with fall-through condbranch. 358 TBB = LastInst->getOperand(0).getMBB(); 359 Cond.push_back(LastInst->getOperand(1)); 360 Cond.push_back(LastInst->getOperand(2)); 361 return false; 362 } 363 return true; // Can't handle indirect branch. 364 } 365 366 // Get the instruction before it if it is a terminator. 367 MachineInstr *SecondLastInst = I; 368 369 // If there are three terminators, we don't know what sort of block this is. 370 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 371 return true; 372 | 273 // Block ends with fall-through condbranch. 274 TBB = LastInst->getOperand(0).getMBB(); 275 Cond.push_back(LastInst->getOperand(1)); 276 Cond.push_back(LastInst->getOperand(2)); 277 return false; 278 } 279 return true; // Can't handle indirect branch. 280 } 281 282 // Get the instruction before it if it is a terminator. 283 MachineInstr *SecondLastInst = I; 284 285 // If there are three terminators, we don't know what sort of block this is. 286 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 287 return true; 288 |
373 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it. | 289 // If the block ends with ARM::B/ARM::tB/ARM::t2B and a 290 // ARM::Bcc/ARM::tBcc/ARM::t2Bcc, handle it. |
374 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 375 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) || | 291 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 292 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) || |
376 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB)) { | 293 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB) || 294 (SecondLastOpc == ARM::t2Bcc && LastOpc == ARM::t2B)) { |
377 TBB = SecondLastInst->getOperand(0).getMBB(); 378 Cond.push_back(SecondLastInst->getOperand(1)); 379 Cond.push_back(SecondLastInst->getOperand(2)); 380 FBB = LastInst->getOperand(0).getMBB(); 381 return false; 382 } 383 384 // If the block ends with two unconditional branches, handle it. The second 385 // one is not executed, so remove it. | 295 TBB = SecondLastInst->getOperand(0).getMBB(); 296 Cond.push_back(SecondLastInst->getOperand(1)); 297 Cond.push_back(SecondLastInst->getOperand(2)); 298 FBB = LastInst->getOperand(0).getMBB(); 299 return false; 300 } 301 302 // If the block ends with two unconditional branches, handle it. The second 303 // one is not executed, so remove it. |
386 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) && 387 (LastOpc == ARM::B || LastOpc == ARM::tB)) { | 304 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB || 305 SecondLastOpc==ARM::t2B) && 306 (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) { |
388 TBB = SecondLastInst->getOperand(0).getMBB(); 389 I = LastInst; 390 if (AllowModify) 391 I->eraseFromParent(); 392 return false; 393 } 394 395 // ...likewise if it ends with a branch table followed by an unconditional 396 // branch. The branch folder can create these, and we must get rid of them for 397 // correctness of Thumb constant islands. 398 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm || | 307 TBB = SecondLastInst->getOperand(0).getMBB(); 308 I = LastInst; 309 if (AllowModify) 310 I->eraseFromParent(); 311 return false; 312 } 313 314 // ...likewise if it ends with a branch table followed by an unconditional 315 // branch. The branch folder can create these, and we must get rid of them for 316 // correctness of Thumb constant islands. 317 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm || |
399 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr) && 400 (LastOpc == ARM::B || LastOpc == ARM::tB)) { | 318 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr || 319 SecondLastOpc == ARM::t2BR_JTr || SecondLastOpc==ARM::t2BR_JTm || 320 SecondLastOpc == ARM::t2BR_JTadd) && 321 (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) { |
401 I = LastInst; 402 if (AllowModify) 403 I->eraseFromParent(); 404 return true; 405 } 406 407 // Otherwise, can't handle this. 408 return true; 409} 410 411 412unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 413 MachineFunction &MF = *MBB.getParent(); 414 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | 322 I = LastInst; 323 if (AllowModify) 324 I->eraseFromParent(); 325 return true; 326 } 327 328 // Otherwise, can't handle this. 329 return true; 330} 331 332 333unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 334 MachineFunction &MF = *MBB.getParent(); 335 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
415 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B; 416 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc; | 336 int BOpc = AFI->isThumbFunction() ? 337 (AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B; 338 int BccOpc = AFI->isThumbFunction() ? 339 (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; |
417 418 MachineBasicBlock::iterator I = MBB.end(); 419 if (I == MBB.begin()) return 0; 420 --I; 421 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc) 422 return 0; 423 424 // Remove the branch. --- 14 unchanged lines hidden (view full) --- 439unsigned 440ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 441 MachineBasicBlock *FBB, 442 const SmallVectorImpl<MachineOperand> &Cond) const { 443 // FIXME this should probably have a DebugLoc argument 444 DebugLoc dl = DebugLoc::getUnknownLoc(); 445 MachineFunction &MF = *MBB.getParent(); 446 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | 340 341 MachineBasicBlock::iterator I = MBB.end(); 342 if (I == MBB.begin()) return 0; 343 --I; 344 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc) 345 return 0; 346 347 // Remove the branch. --- 14 unchanged lines hidden (view full) --- 362unsigned 363ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 364 MachineBasicBlock *FBB, 365 const SmallVectorImpl<MachineOperand> &Cond) const { 366 // FIXME this should probably have a DebugLoc argument 367 DebugLoc dl = DebugLoc::getUnknownLoc(); 368 MachineFunction &MF = *MBB.getParent(); 369 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
447 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B; 448 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc; | 370 int BOpc = AFI->isThumbFunction() ? 371 (AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B; 372 int BccOpc = AFI->isThumbFunction() ? 373 (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; |
449 450 // Shouldn't be a fall through. 451 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 452 assert((Cond.size() == 2 || Cond.size() == 0) && 453 "ARM branch conditions have two components!"); 454 455 if (FBB == 0) { 456 if (Cond.empty()) // Unconditional branch? --- 6 unchanged lines hidden (view full) --- 463 464 // Two-way conditional branch. 465 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB) 466 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 467 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB); 468 return 2; 469} 470 | 374 375 // Shouldn't be a fall through. 376 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 377 assert((Cond.size() == 2 || Cond.size() == 0) && 378 "ARM branch conditions have two components!"); 379 380 if (FBB == 0) { 381 if (Cond.empty()) // Unconditional branch? --- 6 unchanged lines hidden (view full) --- 388 389 // Two-way conditional branch. 390 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB) 391 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 392 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB); 393 return 2; 394} 395 |
471bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB, 472 MachineBasicBlock::iterator I, 473 unsigned DestReg, unsigned SrcReg, 474 const TargetRegisterClass *DestRC, 475 const TargetRegisterClass *SrcRC) const { | 396bool 397ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const { 398 if (MBB.empty()) return false; 399 400 switch (MBB.back().getOpcode()) { 401 case ARM::BX_RET: // Return. 402 case ARM::LDM_RET: 403 case ARM::tBX_RET: 404 case ARM::tBX_RET_vararg: 405 case ARM::tPOP_RET: 406 case ARM::B: 407 case ARM::tB: 408 case ARM::t2B: // Uncond branch. 409 case ARM::tBR_JTr: 410 case ARM::t2BR_JTr: 411 case ARM::BR_JTr: // Jumptable branch. 412 case ARM::t2BR_JTm: 413 case ARM::BR_JTm: // Jumptable branch through mem. 414 case ARM::t2BR_JTadd: 415 case ARM::BR_JTadd: // Jumptable branch add to pc. 416 return true; 417 default: return false; 418 } 419} 420 421bool ARMBaseInstrInfo:: 422ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 423 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 424 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 425 return false; 426} 427 428bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 429 int PIdx = MI->findFirstPredOperandIdx(); 430 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 431} 432 433bool ARMBaseInstrInfo:: 434PredicateInstruction(MachineInstr *MI, 435 const SmallVectorImpl<MachineOperand> &Pred) const { 436 unsigned Opc = MI->getOpcode(); 437 if (Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B) { 438 MI->setDesc(get((Opc == ARM::B) ? ARM::Bcc : 439 ((Opc == ARM::tB) ? ARM::tBcc : ARM::t2Bcc))); 440 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm())); 441 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false)); 442 return true; 443 } 444 445 int PIdx = MI->findFirstPredOperandIdx(); 446 if (PIdx != -1) { 447 MachineOperand &PMO = MI->getOperand(PIdx); 448 PMO.setImm(Pred[0].getImm()); 449 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 450 return true; 451 } 452 return false; 453} 454 455bool ARMBaseInstrInfo:: 456SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 457 const SmallVectorImpl<MachineOperand> &Pred2) const { 458 if (Pred1.size() > 2 || Pred2.size() > 2) 459 return false; 460 461 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 462 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 463 if (CC1 == CC2) 464 return true; 465 466 switch (CC1) { 467 default: 468 return false; 469 case ARMCC::AL: 470 return true; 471 case ARMCC::HS: 472 return CC2 == ARMCC::HI; 473 case ARMCC::LS: 474 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 475 case ARMCC::GE: 476 return CC2 == ARMCC::GT; 477 case ARMCC::LE: 478 return CC2 == ARMCC::LT; 479 } 480} 481 482bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 483 std::vector<MachineOperand> &Pred) const { 484 const TargetInstrDesc &TID = MI->getDesc(); 485 if (!TID.getImplicitDefs() && !TID.hasOptionalDef()) 486 return false; 487 488 bool Found = false; 489 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 490 const MachineOperand &MO = MI->getOperand(i); 491 if (MO.isReg() && MO.getReg() == ARM::CPSR) { 492 Pred.push_back(MO); 493 Found = true; 494 } 495 } 496 497 return Found; 498} 499 500 501/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing 502static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 503 unsigned JTI) DISABLE_INLINE; 504static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 505 unsigned JTI) { 506 return JT[JTI].MBBs.size(); 507} 508 509/// GetInstSize - Return the size of the specified MachineInstr. 510/// 511unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 512 const MachineBasicBlock &MBB = *MI->getParent(); 513 const MachineFunction *MF = MBB.getParent(); 514 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo(); 515 516 // Basic size info comes from the TSFlags field. 517 const TargetInstrDesc &TID = MI->getDesc(); 518 unsigned TSFlags = TID.TSFlags; 519 520 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) { 521 default: { 522 // If this machine instr is an inline asm, measure it. 523 if (MI->getOpcode() == ARM::INLINEASM) 524 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName()); 525 if (MI->isLabel()) 526 return 0; 527 switch (MI->getOpcode()) { 528 default: 529 assert(0 && "Unknown or unset size field for instr!"); 530 break; 531 case TargetInstrInfo::IMPLICIT_DEF: 532 case TargetInstrInfo::DECLARE: 533 case TargetInstrInfo::DBG_LABEL: 534 case TargetInstrInfo::EH_LABEL: 535 return 0; 536 } 537 break; 538 } 539 case ARMII::Size8Bytes: return 8; // Arm instruction x 2. 540 case ARMII::Size4Bytes: return 4; // Arm instruction. 541 case ARMII::Size2Bytes: return 2; // Thumb instruction. 542 case ARMII::SizeSpecial: { 543 switch (MI->getOpcode()) { 544 case ARM::CONSTPOOL_ENTRY: 545 // If this machine instr is a constant pool entry, its size is recorded as 546 // operand #2. 547 return MI->getOperand(2).getImm(); 548 case ARM::Int_eh_sjlj_setjmp: return 12; 549 case ARM::BR_JTr: 550 case ARM::BR_JTm: 551 case ARM::BR_JTadd: 552 case ARM::t2BR_JTr: 553 case ARM::t2BR_JTm: 554 case ARM::t2BR_JTadd: 555 case ARM::tBR_JTr: { 556 // These are jumptable branches, i.e. a branch followed by an inlined 557 // jumptable. The size is 4 + 4 * number of entries. 558 unsigned NumOps = TID.getNumOperands(); 559 MachineOperand JTOP = 560 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2)); 561 unsigned JTI = JTOP.getIndex(); 562 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 563 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 564 assert(JTI < JT.size()); 565 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 566 // 4 aligned. The assembler / linker may add 2 byte padding just before 567 // the JT entries. The size does not include this padding; the 568 // constant islands pass does separate bookkeeping for it. 569 // FIXME: If we know the size of the function is less than (1 << 16) *2 570 // bytes, we can use 16-bit entries instead. Then there won't be an 571 // alignment issue. 572 return getNumJTEntries(JT, JTI) * 4 + 573 ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4); 574 } 575 default: 576 // Otherwise, pseudo-instruction sizes are zero. 577 return 0; 578 } 579 } 580 } 581 return 0; // Not reached 582} 583 584/// Return true if the instruction is a register to register move and 585/// leave the source and dest operands in the passed parameters. 586/// 587bool 588ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI, 589 unsigned &SrcReg, unsigned &DstReg, 590 unsigned& SrcSubIdx, unsigned& DstSubIdx) const { 591 SrcSubIdx = DstSubIdx = 0; // No sub-registers. 592 593 unsigned oc = MI.getOpcode(); 594 switch (oc) { 595 default: 596 return false; 597 case ARM::FCPYS: 598 case ARM::FCPYD: 599 case ARM::VMOVD: 600 case ARM::VMOVQ: 601 SrcReg = MI.getOperand(1).getReg(); 602 DstReg = MI.getOperand(0).getReg(); 603 return true; 604 case ARM::MOVr: 605 assert(MI.getDesc().getNumOperands() >= 2 && 606 MI.getOperand(0).isReg() && 607 MI.getOperand(1).isReg() && 608 "Invalid ARM MOV instruction"); 609 SrcReg = MI.getOperand(1).getReg(); 610 DstReg = MI.getOperand(0).getReg(); 611 return true; 612 } 613} 614 615unsigned 616ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 617 int &FrameIndex) const { 618 switch (MI->getOpcode()) { 619 default: break; 620 case ARM::LDR: 621 if (MI->getOperand(1).isFI() && 622 MI->getOperand(2).isReg() && 623 MI->getOperand(3).isImm() && 624 MI->getOperand(2).getReg() == 0 && 625 MI->getOperand(3).getImm() == 0) { 626 FrameIndex = MI->getOperand(1).getIndex(); 627 return MI->getOperand(0).getReg(); 628 } 629 break; 630 case ARM::FLDD: 631 case ARM::FLDS: 632 if (MI->getOperand(1).isFI() && 633 MI->getOperand(2).isImm() && 634 MI->getOperand(2).getImm() == 0) { 635 FrameIndex = MI->getOperand(1).getIndex(); 636 return MI->getOperand(0).getReg(); 637 } 638 break; 639 } 640 return 0; 641} 642 643unsigned 644ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 645 int &FrameIndex) const { 646 switch (MI->getOpcode()) { 647 default: break; 648 case ARM::STR: 649 if (MI->getOperand(1).isFI() && 650 MI->getOperand(2).isReg() && 651 MI->getOperand(3).isImm() && 652 MI->getOperand(2).getReg() == 0 && 653 MI->getOperand(3).getImm() == 0) { 654 FrameIndex = MI->getOperand(1).getIndex(); 655 return MI->getOperand(0).getReg(); 656 } 657 break; 658 case ARM::FSTD: 659 case ARM::FSTS: 660 if (MI->getOperand(1).isFI() && 661 MI->getOperand(2).isImm() && 662 MI->getOperand(2).getImm() == 0) { 663 FrameIndex = MI->getOperand(1).getIndex(); 664 return MI->getOperand(0).getReg(); 665 } 666 break; 667 } 668 669 return 0; 670} 671 672bool 673ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB, 674 MachineBasicBlock::iterator I, 675 unsigned DestReg, unsigned SrcReg, 676 const TargetRegisterClass *DestRC, 677 const TargetRegisterClass *SrcRC) const { |
476 DebugLoc DL = DebugLoc::getUnknownLoc(); 477 if (I != MBB.end()) DL = I->getDebugLoc(); 478 479 if (DestRC != SrcRC) { 480 // Not yet supported! 481 return false; 482 } 483 --- 9 unchanged lines hidden (view full) --- 493 else if (DestRC == ARM::QPRRegisterClass) 494 BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg); 495 else 496 return false; 497 498 return true; 499} 500 | 678 DebugLoc DL = DebugLoc::getUnknownLoc(); 679 if (I != MBB.end()) DL = I->getDebugLoc(); 680 681 if (DestRC != SrcRC) { 682 // Not yet supported! 683 return false; 684 } 685 --- 9 unchanged lines hidden (view full) --- 695 else if (DestRC == ARM::QPRRegisterClass) 696 BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg); 697 else 698 return false; 699 700 return true; 701} 702 |
501void ARMInstrInfo:: | 703void ARMBaseInstrInfo:: |
502storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 503 unsigned SrcReg, bool isKill, int FI, 504 const TargetRegisterClass *RC) const { 505 DebugLoc DL = DebugLoc::getUnknownLoc(); 506 if (I != MBB.end()) DL = I->getDebugLoc(); 507 508 if (RC == ARM::GPRRegisterClass) { 509 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR)) --- 6 unchanged lines hidden (view full) --- 516 } else { 517 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); 518 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS)) 519 .addReg(SrcReg, getKillRegState(isKill)) 520 .addFrameIndex(FI).addImm(0)); 521 } 522} 523 | 704storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 705 unsigned SrcReg, bool isKill, int FI, 706 const TargetRegisterClass *RC) const { 707 DebugLoc DL = DebugLoc::getUnknownLoc(); 708 if (I != MBB.end()) DL = I->getDebugLoc(); 709 710 if (RC == ARM::GPRRegisterClass) { 711 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR)) --- 6 unchanged lines hidden (view full) --- 718 } else { 719 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); 720 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS)) 721 .addReg(SrcReg, getKillRegState(isKill)) 722 .addFrameIndex(FI).addImm(0)); 723 } 724} 725 |
524void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 525 bool isKill, 526 SmallVectorImpl<MachineOperand> &Addr, 527 const TargetRegisterClass *RC, 528 SmallVectorImpl<MachineInstr*> &NewMIs) const{ | 726void 727ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 728 bool isKill, 729 SmallVectorImpl<MachineOperand> &Addr, 730 const TargetRegisterClass *RC, 731 SmallVectorImpl<MachineInstr*> &NewMIs) const{ |
529 DebugLoc DL = DebugLoc::getUnknownLoc(); 530 unsigned Opc = 0; 531 if (RC == ARM::GPRRegisterClass) { 532 Opc = ARM::STR; 533 } else if (RC == ARM::DPRRegisterClass) { 534 Opc = ARM::FSTD; 535 } else { 536 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); --- 4 unchanged lines hidden (view full) --- 541 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)); 542 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 543 MIB.addOperand(Addr[i]); 544 AddDefaultPred(MIB); 545 NewMIs.push_back(MIB); 546 return; 547} 548 | 732 DebugLoc DL = DebugLoc::getUnknownLoc(); 733 unsigned Opc = 0; 734 if (RC == ARM::GPRRegisterClass) { 735 Opc = ARM::STR; 736 } else if (RC == ARM::DPRRegisterClass) { 737 Opc = ARM::FSTD; 738 } else { 739 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); --- 4 unchanged lines hidden (view full) --- 744 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)); 745 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 746 MIB.addOperand(Addr[i]); 747 AddDefaultPred(MIB); 748 NewMIs.push_back(MIB); 749 return; 750} 751 |
549void ARMInstrInfo:: | 752void ARMBaseInstrInfo:: |
550loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 551 unsigned DestReg, int FI, 552 const TargetRegisterClass *RC) const { 553 DebugLoc DL = DebugLoc::getUnknownLoc(); 554 if (I != MBB.end()) DL = I->getDebugLoc(); 555 556 if (RC == ARM::GPRRegisterClass) { 557 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg) 558 .addFrameIndex(FI).addReg(0).addImm(0)); 559 } else if (RC == ARM::DPRRegisterClass) { 560 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg) 561 .addFrameIndex(FI).addImm(0)); 562 } else { 563 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); 564 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg) 565 .addFrameIndex(FI).addImm(0)); 566 } 567} 568 | 753loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 754 unsigned DestReg, int FI, 755 const TargetRegisterClass *RC) const { 756 DebugLoc DL = DebugLoc::getUnknownLoc(); 757 if (I != MBB.end()) DL = I->getDebugLoc(); 758 759 if (RC == ARM::GPRRegisterClass) { 760 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg) 761 .addFrameIndex(FI).addReg(0).addImm(0)); 762 } else if (RC == ARM::DPRRegisterClass) { 763 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg) 764 .addFrameIndex(FI).addImm(0)); 765 } else { 766 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!"); 767 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg) 768 .addFrameIndex(FI).addImm(0)); 769 } 770} 771 |
569void ARMInstrInfo:: | 772void ARMBaseInstrInfo:: |
570loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 571 SmallVectorImpl<MachineOperand> &Addr, 572 const TargetRegisterClass *RC, 573 SmallVectorImpl<MachineInstr*> &NewMIs) const { 574 DebugLoc DL = DebugLoc::getUnknownLoc(); 575 unsigned Opc = 0; 576 if (RC == ARM::GPRRegisterClass) { 577 Opc = ARM::LDR; --- 7 unchanged lines hidden (view full) --- 585 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 586 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 587 MIB.addOperand(Addr[i]); 588 AddDefaultPred(MIB); 589 NewMIs.push_back(MIB); 590 return; 591} 592 | 773loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 774 SmallVectorImpl<MachineOperand> &Addr, 775 const TargetRegisterClass *RC, 776 SmallVectorImpl<MachineInstr*> &NewMIs) const { 777 DebugLoc DL = DebugLoc::getUnknownLoc(); 778 unsigned Opc = 0; 779 if (RC == ARM::GPRRegisterClass) { 780 Opc = ARM::LDR; --- 7 unchanged lines hidden (view full) --- 788 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 789 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 790 MIB.addOperand(Addr[i]); 791 AddDefaultPred(MIB); 792 NewMIs.push_back(MIB); 793 return; 794} 795 |
593MachineInstr *ARMInstrInfo:: | 796MachineInstr *ARMBaseInstrInfo:: |
594foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 595 const SmallVectorImpl<unsigned> &Ops, int FI) const { 596 if (Ops.size() != 1) return NULL; 597 598 unsigned OpNum = Ops[0]; 599 unsigned Opc = MI->getOpcode(); 600 MachineInstr *NewMI = NULL; 601 switch (Opc) { 602 default: break; 603 case ARM::MOVr: { 604 if (MI->getOperand(4).getReg() == ARM::CPSR) 605 // If it is updating CPSR, then it cannot be folded. 606 break; 607 unsigned Pred = MI->getOperand(2).getImm(); 608 unsigned PredReg = MI->getOperand(3).getReg(); 609 if (OpNum == 0) { // move -> store 610 unsigned SrcReg = MI->getOperand(1).getReg(); 611 bool isKill = MI->getOperand(1).isKill(); | 797foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 798 const SmallVectorImpl<unsigned> &Ops, int FI) const { 799 if (Ops.size() != 1) return NULL; 800 801 unsigned OpNum = Ops[0]; 802 unsigned Opc = MI->getOpcode(); 803 MachineInstr *NewMI = NULL; 804 switch (Opc) { 805 default: break; 806 case ARM::MOVr: { 807 if (MI->getOperand(4).getReg() == ARM::CPSR) 808 // If it is updating CPSR, then it cannot be folded. 809 break; 810 unsigned Pred = MI->getOperand(2).getImm(); 811 unsigned PredReg = MI->getOperand(3).getReg(); 812 if (OpNum == 0) { // move -> store 813 unsigned SrcReg = MI->getOperand(1).getReg(); 814 bool isKill = MI->getOperand(1).isKill(); |
815 bool isUndef = MI->getOperand(1).isUndef(); |
|
612 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR)) | 816 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR)) |
613 .addReg(SrcReg, getKillRegState(isKill)) | 817 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) |
614 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg); 615 } else { // move -> load 616 unsigned DstReg = MI->getOperand(0).getReg(); 617 bool isDead = MI->getOperand(0).isDead(); | 818 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg); 819 } else { // move -> load 820 unsigned DstReg = MI->getOperand(0).getReg(); 821 bool isDead = MI->getOperand(0).isDead(); |
822 bool isUndef = MI->getOperand(0).isUndef(); |
|
618 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR)) | 823 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR)) |
619 .addReg(DstReg, RegState::Define | getDeadRegState(isDead)) | 824 .addReg(DstReg, 825 RegState::Define | 826 getDeadRegState(isDead) | 827 getUndefRegState(isUndef)) |
620 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg); 621 } 622 break; 623 } 624 case ARM::FCPYS: { 625 unsigned Pred = MI->getOperand(2).getImm(); 626 unsigned PredReg = MI->getOperand(3).getReg(); 627 if (OpNum == 0) { // move -> store 628 unsigned SrcReg = MI->getOperand(1).getReg(); | 828 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg); 829 } 830 break; 831 } 832 case ARM::FCPYS: { 833 unsigned Pred = MI->getOperand(2).getImm(); 834 unsigned PredReg = MI->getOperand(3).getReg(); 835 if (OpNum == 0) { // move -> store 836 unsigned SrcReg = MI->getOperand(1).getReg(); |
837 bool isKill = MI->getOperand(1).isKill(); 838 bool isUndef = MI->getOperand(1).isUndef(); |
|
629 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS)) | 839 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS)) |
630 .addReg(SrcReg).addFrameIndex(FI) | 840 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) 841 .addFrameIndex(FI) |
631 .addImm(0).addImm(Pred).addReg(PredReg); 632 } else { // move -> load 633 unsigned DstReg = MI->getOperand(0).getReg(); | 842 .addImm(0).addImm(Pred).addReg(PredReg); 843 } else { // move -> load 844 unsigned DstReg = MI->getOperand(0).getReg(); |
634 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS), DstReg) 635 .addFrameIndex(FI) 636 .addImm(0).addImm(Pred).addReg(PredReg); | 845 bool isDead = MI->getOperand(0).isDead(); 846 bool isUndef = MI->getOperand(0).isUndef(); 847 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS)) 848 .addReg(DstReg, 849 RegState::Define | 850 getDeadRegState(isDead) | 851 getUndefRegState(isUndef)) 852 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg); |
637 } 638 break; 639 } 640 case ARM::FCPYD: { 641 unsigned Pred = MI->getOperand(2).getImm(); 642 unsigned PredReg = MI->getOperand(3).getReg(); 643 if (OpNum == 0) { // move -> store 644 unsigned SrcReg = MI->getOperand(1).getReg(); 645 bool isKill = MI->getOperand(1).isKill(); | 853 } 854 break; 855 } 856 case ARM::FCPYD: { 857 unsigned Pred = MI->getOperand(2).getImm(); 858 unsigned PredReg = MI->getOperand(3).getReg(); 859 if (OpNum == 0) { // move -> store 860 unsigned SrcReg = MI->getOperand(1).getReg(); 861 bool isKill = MI->getOperand(1).isKill(); |
862 bool isUndef = MI->getOperand(1).isUndef(); |
|
646 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD)) | 863 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD)) |
647 .addReg(SrcReg, getKillRegState(isKill)) | 864 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) |
648 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg); 649 } else { // move -> load 650 unsigned DstReg = MI->getOperand(0).getReg(); 651 bool isDead = MI->getOperand(0).isDead(); | 865 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg); 866 } else { // move -> load 867 unsigned DstReg = MI->getOperand(0).getReg(); 868 bool isDead = MI->getOperand(0).isDead(); |
869 bool isUndef = MI->getOperand(0).isUndef(); |
|
652 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD)) | 870 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD)) |
653 .addReg(DstReg, RegState::Define | getDeadRegState(isDead)) | 871 .addReg(DstReg, 872 RegState::Define | 873 getDeadRegState(isDead) | 874 getUndefRegState(isUndef)) |
654 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg); 655 } 656 break; 657 } 658 } 659 660 return NewMI; 661} 662 | 875 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg); 876 } 877 break; 878 } 879 } 880 881 return NewMI; 882} 883 |
663bool ARMBaseInstrInfo:: 664canFoldMemoryOperand(const MachineInstr *MI, 665 const SmallVectorImpl<unsigned> &Ops) const { | 884MachineInstr* 885ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 886 MachineInstr* MI, 887 const SmallVectorImpl<unsigned> &Ops, 888 MachineInstr* LoadMI) const { 889 return 0; 890} 891 892bool 893ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, 894 const SmallVectorImpl<unsigned> &Ops) const { |
666 if (Ops.size() != 1) return false; 667 | 895 if (Ops.size() != 1) return false; 896 |
668 unsigned OpNum = Ops[0]; | |
669 unsigned Opc = MI->getOpcode(); 670 switch (Opc) { 671 default: break; 672 case ARM::MOVr: 673 // If it is updating CPSR, then it cannot be folded. 674 return MI->getOperand(4).getReg() != ARM::CPSR; | 897 unsigned Opc = MI->getOpcode(); 898 switch (Opc) { 899 default: break; 900 case ARM::MOVr: 901 // If it is updating CPSR, then it cannot be folded. 902 return MI->getOperand(4).getReg() != ARM::CPSR; |
675 case ARM::tMOVr: 676 case ARM::tMOVlor2hir: 677 case ARM::tMOVhir2lor: 678 case ARM::tMOVhir2hir: { 679 if (OpNum == 0) { // move -> store 680 unsigned SrcReg = MI->getOperand(1).getReg(); 681 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg)) 682 // tSpill cannot take a high register operand. 683 return false; 684 } else { // move -> load 685 unsigned DstReg = MI->getOperand(0).getReg(); 686 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg)) 687 // tRestore cannot target a high register operand. 688 return false; 689 } 690 return true; 691 } | |
692 case ARM::FCPYS: 693 case ARM::FCPYD: 694 return true; 695 696 case ARM::VMOVD: 697 case ARM::VMOVQ: 698 return false; // FIXME 699 } 700 701 return false; 702} | 903 case ARM::FCPYS: 904 case ARM::FCPYD: 905 return true; 906 907 case ARM::VMOVD: 908 case ARM::VMOVQ: 909 return false; // FIXME 910 } 911 912 return false; 913} |
703 704bool 705 ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const { 706 if (MBB.empty()) return false; 707 708 switch (MBB.back().getOpcode()) { 709 case ARM::BX_RET: // Return. 710 case ARM::LDM_RET: 711 case ARM::tBX_RET: 712 case ARM::tBX_RET_vararg: 713 case ARM::tPOP_RET: 714 case ARM::B: 715 case ARM::tB: // Uncond branch. 716 case ARM::tBR_JTr: 717 case ARM::BR_JTr: // Jumptable branch. 718 case ARM::BR_JTm: // Jumptable branch through mem. 719 case ARM::BR_JTadd: // Jumptable branch add to pc. 720 return true; 721 default: return false; 722 } 723} 724 725bool ARMBaseInstrInfo:: 726ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 727 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 728 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 729 return false; 730} 731 732bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 733 int PIdx = MI->findFirstPredOperandIdx(); 734 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 735} 736 737bool ARMBaseInstrInfo:: 738PredicateInstruction(MachineInstr *MI, 739 const SmallVectorImpl<MachineOperand> &Pred) const { 740 unsigned Opc = MI->getOpcode(); 741 if (Opc == ARM::B || Opc == ARM::tB) { 742 MI->setDesc(get(Opc == ARM::B ? ARM::Bcc : ARM::tBcc)); 743 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm())); 744 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false)); 745 return true; 746 } 747 748 int PIdx = MI->findFirstPredOperandIdx(); 749 if (PIdx != -1) { 750 MachineOperand &PMO = MI->getOperand(PIdx); 751 PMO.setImm(Pred[0].getImm()); 752 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 753 return true; 754 } 755 return false; 756} 757 758bool ARMBaseInstrInfo:: 759SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 760 const SmallVectorImpl<MachineOperand> &Pred2) const { 761 if (Pred1.size() > 2 || Pred2.size() > 2) 762 return false; 763 764 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 765 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 766 if (CC1 == CC2) 767 return true; 768 769 switch (CC1) { 770 default: 771 return false; 772 case ARMCC::AL: 773 return true; 774 case ARMCC::HS: 775 return CC2 == ARMCC::HI; 776 case ARMCC::LS: 777 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 778 case ARMCC::GE: 779 return CC2 == ARMCC::GT; 780 case ARMCC::LE: 781 return CC2 == ARMCC::LT; 782 } 783} 784 785bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 786 std::vector<MachineOperand> &Pred) const { 787 const TargetInstrDesc &TID = MI->getDesc(); 788 if (!TID.getImplicitDefs() && !TID.hasOptionalDef()) 789 return false; 790 791 bool Found = false; 792 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 793 const MachineOperand &MO = MI->getOperand(i); 794 if (MO.isReg() && MO.getReg() == ARM::CPSR) { 795 Pred.push_back(MO); 796 Found = true; 797 } 798 } 799 800 return Found; 801} 802 803 804/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing 805static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 806 unsigned JTI) DISABLE_INLINE; 807static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 808 unsigned JTI) { 809 return JT[JTI].MBBs.size(); 810} 811 812/// GetInstSize - Return the size of the specified MachineInstr. 813/// 814unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 815 const MachineBasicBlock &MBB = *MI->getParent(); 816 const MachineFunction *MF = MBB.getParent(); 817 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo(); 818 819 // Basic size info comes from the TSFlags field. 820 const TargetInstrDesc &TID = MI->getDesc(); 821 unsigned TSFlags = TID.TSFlags; 822 823 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) { 824 default: { 825 // If this machine instr is an inline asm, measure it. 826 if (MI->getOpcode() == ARM::INLINEASM) 827 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName()); 828 if (MI->isLabel()) 829 return 0; 830 switch (MI->getOpcode()) { 831 default: 832 assert(0 && "Unknown or unset size field for instr!"); 833 break; 834 case TargetInstrInfo::IMPLICIT_DEF: 835 case TargetInstrInfo::DECLARE: 836 case TargetInstrInfo::DBG_LABEL: 837 case TargetInstrInfo::EH_LABEL: 838 return 0; 839 } 840 break; 841 } 842 case ARMII::Size8Bytes: return 8; // Arm instruction x 2. 843 case ARMII::Size4Bytes: return 4; // Arm instruction. 844 case ARMII::Size2Bytes: return 2; // Thumb instruction. 845 case ARMII::SizeSpecial: { 846 switch (MI->getOpcode()) { 847 case ARM::CONSTPOOL_ENTRY: 848 // If this machine instr is a constant pool entry, its size is recorded as 849 // operand #2. 850 return MI->getOperand(2).getImm(); 851 case ARM::Int_eh_sjlj_setjmp: return 12; 852 case ARM::BR_JTr: 853 case ARM::BR_JTm: 854 case ARM::BR_JTadd: 855 case ARM::tBR_JTr: { 856 // These are jumptable branches, i.e. a branch followed by an inlined 857 // jumptable. The size is 4 + 4 * number of entries. 858 unsigned NumOps = TID.getNumOperands(); 859 MachineOperand JTOP = 860 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2)); 861 unsigned JTI = JTOP.getIndex(); 862 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 863 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 864 assert(JTI < JT.size()); 865 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 866 // 4 aligned. The assembler / linker may add 2 byte padding just before 867 // the JT entries. The size does not include this padding; the 868 // constant islands pass does separate bookkeeping for it. 869 // FIXME: If we know the size of the function is less than (1 << 16) *2 870 // bytes, we can use 16-bit entries instead. Then there won't be an 871 // alignment issue. 872 return getNumJTEntries(JT, JTI) * 4 + 873 (MI->getOpcode()==ARM::tBR_JTr ? 2 : 4); 874 } 875 default: 876 // Otherwise, pseudo-instruction sizes are zero. 877 return 0; 878 } 879 } 880 } 881 return 0; // Not reached 882} | |