ARMBaseRegisterInfo.cpp revision 224145
1//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the base ARM implementation of TargetRegisterInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARM.h" 15#include "ARMAddressingModes.h" 16#include "ARMBaseInstrInfo.h" 17#include "ARMBaseRegisterInfo.h" 18#include "ARMFrameLowering.h" 19#include "ARMInstrInfo.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMSubtarget.h" 22#include "llvm/Constants.h" 23#include "llvm/DerivedTypes.h" 24#include "llvm/Function.h" 25#include "llvm/LLVMContext.h" 26#include "llvm/CodeGen/MachineConstantPool.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineLocation.h" 31#include "llvm/CodeGen/MachineRegisterInfo.h" 32#include "llvm/CodeGen/RegisterScavenging.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/ErrorHandling.h" 35#include "llvm/Support/raw_ostream.h" 36#include "llvm/Target/TargetFrameLowering.h" 37#include "llvm/Target/TargetMachine.h" 38#include "llvm/Target/TargetOptions.h" 39#include "llvm/ADT/BitVector.h" 40#include "llvm/ADT/SmallVector.h" 41#include "llvm/Support/CommandLine.h" 42 43#define GET_REGINFO_TARGET_DESC 44#include "ARMGenRegisterInfo.inc" 45 46using namespace llvm; 47 48static cl::opt<bool> 49ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false), 50 cl::desc("Force use of virtual base registers for stack load/store")); 51static cl::opt<bool> 52EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden, 53 cl::desc("Enable pre-regalloc stack frame index allocation")); 54static cl::opt<bool> 55EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true), 56 cl::desc("Enable use of a base pointer for complex stack frames")); 57 58ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii, 59 const ARMSubtarget &sti) 60 : ARMGenRegisterInfo(), TII(tii), STI(sti), 61 FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11), 62 BasePtr(ARM::R6) { 63} 64 65const unsigned* 66ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 67 static const unsigned CalleeSavedRegs[] = { 68 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8, 69 ARM::R7, ARM::R6, ARM::R5, ARM::R4, 70 71 ARM::D15, ARM::D14, ARM::D13, ARM::D12, 72 ARM::D11, ARM::D10, ARM::D9, ARM::D8, 73 0 74 }; 75 76 static const unsigned DarwinCalleeSavedRegs[] = { 77 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved 78 // register. 79 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4, 80 ARM::R11, ARM::R10, ARM::R8, 81 82 ARM::D15, ARM::D14, ARM::D13, ARM::D12, 83 ARM::D11, ARM::D10, ARM::D9, ARM::D8, 84 0 85 }; 86 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs; 87} 88 89BitVector ARMBaseRegisterInfo:: 90getReservedRegs(const MachineFunction &MF) const { 91 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 92 93 // FIXME: avoid re-calculating this every time. 94 BitVector Reserved(getNumRegs()); 95 Reserved.set(ARM::SP); 96 Reserved.set(ARM::PC); 97 Reserved.set(ARM::FPSCR); 98 if (TFI->hasFP(MF)) 99 Reserved.set(FramePtr); 100 if (hasBasePointer(MF)) 101 Reserved.set(BasePtr); 102 // Some targets reserve R9. 103 if (STI.isR9Reserved()) 104 Reserved.set(ARM::R9); 105 // Reserve D16-D31 if the subtarget doesn't support them. 106 if (!STI.hasVFP3() || STI.hasD16()) { 107 assert(ARM::D31 == ARM::D16 + 15); 108 for (unsigned i = 0; i != 16; ++i) 109 Reserved.set(ARM::D16 + i); 110 } 111 return Reserved; 112} 113 114bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF, 115 unsigned Reg) const { 116 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 117 118 switch (Reg) { 119 default: break; 120 case ARM::SP: 121 case ARM::PC: 122 return true; 123 case ARM::R6: 124 if (hasBasePointer(MF)) 125 return true; 126 break; 127 case ARM::R7: 128 case ARM::R11: 129 if (FramePtr == Reg && TFI->hasFP(MF)) 130 return true; 131 break; 132 case ARM::R9: 133 return STI.isR9Reserved(); 134 } 135 136 return false; 137} 138 139const TargetRegisterClass * 140ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 141 const TargetRegisterClass *B, 142 unsigned SubIdx) const { 143 switch (SubIdx) { 144 default: return 0; 145 case ARM::ssub_0: 146 case ARM::ssub_1: 147 case ARM::ssub_2: 148 case ARM::ssub_3: { 149 // S sub-registers. 150 if (A->getSize() == 8) { 151 if (B == &ARM::SPR_8RegClass) 152 return &ARM::DPR_8RegClass; 153 assert(B == &ARM::SPRRegClass && "Expecting SPR register class!"); 154 if (A == &ARM::DPR_8RegClass) 155 return A; 156 return &ARM::DPR_VFP2RegClass; 157 } 158 159 if (A->getSize() == 16) { 160 if (B == &ARM::SPR_8RegClass) 161 return &ARM::QPR_8RegClass; 162 return &ARM::QPR_VFP2RegClass; 163 } 164 165 if (A->getSize() == 32) { 166 if (B == &ARM::SPR_8RegClass) 167 return 0; // Do not allow coalescing! 168 return &ARM::QQPR_VFP2RegClass; 169 } 170 171 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 172 return 0; // Do not allow coalescing! 173 } 174 case ARM::dsub_0: 175 case ARM::dsub_1: 176 case ARM::dsub_2: 177 case ARM::dsub_3: { 178 // D sub-registers. 179 if (A->getSize() == 16) { 180 if (B == &ARM::DPR_VFP2RegClass) 181 return &ARM::QPR_VFP2RegClass; 182 if (B == &ARM::DPR_8RegClass) 183 return 0; // Do not allow coalescing! 184 return A; 185 } 186 187 if (A->getSize() == 32) { 188 if (B == &ARM::DPR_VFP2RegClass) 189 return &ARM::QQPR_VFP2RegClass; 190 if (B == &ARM::DPR_8RegClass) 191 return 0; // Do not allow coalescing! 192 return A; 193 } 194 195 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 196 if (B != &ARM::DPRRegClass) 197 return 0; // Do not allow coalescing! 198 return A; 199 } 200 case ARM::dsub_4: 201 case ARM::dsub_5: 202 case ARM::dsub_6: 203 case ARM::dsub_7: { 204 // D sub-registers of QQQQ registers. 205 if (A->getSize() == 64 && B == &ARM::DPRRegClass) 206 return A; 207 return 0; // Do not allow coalescing! 208 } 209 210 case ARM::qsub_0: 211 case ARM::qsub_1: { 212 // Q sub-registers. 213 if (A->getSize() == 32) { 214 if (B == &ARM::QPR_VFP2RegClass) 215 return &ARM::QQPR_VFP2RegClass; 216 if (B == &ARM::QPR_8RegClass) 217 return 0; // Do not allow coalescing! 218 return A; 219 } 220 221 assert(A->getSize() == 64 && "Expecting a QQQQ register class!"); 222 if (B == &ARM::QPRRegClass) 223 return A; 224 return 0; // Do not allow coalescing! 225 } 226 case ARM::qsub_2: 227 case ARM::qsub_3: { 228 // Q sub-registers of QQQQ registers. 229 if (A->getSize() == 64 && B == &ARM::QPRRegClass) 230 return A; 231 return 0; // Do not allow coalescing! 232 } 233 } 234 return 0; 235} 236 237bool 238ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC, 239 SmallVectorImpl<unsigned> &SubIndices, 240 unsigned &NewSubIdx) const { 241 242 unsigned Size = RC->getSize() * 8; 243 if (Size < 6) 244 return 0; 245 246 NewSubIdx = 0; // Whole register. 247 unsigned NumRegs = SubIndices.size(); 248 if (NumRegs == 8) { 249 // 8 D registers -> 1 QQQQ register. 250 return (Size == 512 && 251 SubIndices[0] == ARM::dsub_0 && 252 SubIndices[1] == ARM::dsub_1 && 253 SubIndices[2] == ARM::dsub_2 && 254 SubIndices[3] == ARM::dsub_3 && 255 SubIndices[4] == ARM::dsub_4 && 256 SubIndices[5] == ARM::dsub_5 && 257 SubIndices[6] == ARM::dsub_6 && 258 SubIndices[7] == ARM::dsub_7); 259 } else if (NumRegs == 4) { 260 if (SubIndices[0] == ARM::qsub_0) { 261 // 4 Q registers -> 1 QQQQ register. 262 return (Size == 512 && 263 SubIndices[1] == ARM::qsub_1 && 264 SubIndices[2] == ARM::qsub_2 && 265 SubIndices[3] == ARM::qsub_3); 266 } else if (SubIndices[0] == ARM::dsub_0) { 267 // 4 D registers -> 1 QQ register. 268 if (Size >= 256 && 269 SubIndices[1] == ARM::dsub_1 && 270 SubIndices[2] == ARM::dsub_2 && 271 SubIndices[3] == ARM::dsub_3) { 272 if (Size == 512) 273 NewSubIdx = ARM::qqsub_0; 274 return true; 275 } 276 } else if (SubIndices[0] == ARM::dsub_4) { 277 // 4 D registers -> 1 QQ register (2nd). 278 if (Size == 512 && 279 SubIndices[1] == ARM::dsub_5 && 280 SubIndices[2] == ARM::dsub_6 && 281 SubIndices[3] == ARM::dsub_7) { 282 NewSubIdx = ARM::qqsub_1; 283 return true; 284 } 285 } else if (SubIndices[0] == ARM::ssub_0) { 286 // 4 S registers -> 1 Q register. 287 if (Size >= 128 && 288 SubIndices[1] == ARM::ssub_1 && 289 SubIndices[2] == ARM::ssub_2 && 290 SubIndices[3] == ARM::ssub_3) { 291 if (Size >= 256) 292 NewSubIdx = ARM::qsub_0; 293 return true; 294 } 295 } 296 } else if (NumRegs == 2) { 297 if (SubIndices[0] == ARM::qsub_0) { 298 // 2 Q registers -> 1 QQ register. 299 if (Size >= 256 && SubIndices[1] == ARM::qsub_1) { 300 if (Size == 512) 301 NewSubIdx = ARM::qqsub_0; 302 return true; 303 } 304 } else if (SubIndices[0] == ARM::qsub_2) { 305 // 2 Q registers -> 1 QQ register (2nd). 306 if (Size == 512 && SubIndices[1] == ARM::qsub_3) { 307 NewSubIdx = ARM::qqsub_1; 308 return true; 309 } 310 } else if (SubIndices[0] == ARM::dsub_0) { 311 // 2 D registers -> 1 Q register. 312 if (Size >= 128 && SubIndices[1] == ARM::dsub_1) { 313 if (Size >= 256) 314 NewSubIdx = ARM::qsub_0; 315 return true; 316 } 317 } else if (SubIndices[0] == ARM::dsub_2) { 318 // 2 D registers -> 1 Q register (2nd). 319 if (Size >= 256 && SubIndices[1] == ARM::dsub_3) { 320 NewSubIdx = ARM::qsub_1; 321 return true; 322 } 323 } else if (SubIndices[0] == ARM::dsub_4) { 324 // 2 D registers -> 1 Q register (3rd). 325 if (Size == 512 && SubIndices[1] == ARM::dsub_5) { 326 NewSubIdx = ARM::qsub_2; 327 return true; 328 } 329 } else if (SubIndices[0] == ARM::dsub_6) { 330 // 2 D registers -> 1 Q register (3rd). 331 if (Size == 512 && SubIndices[1] == ARM::dsub_7) { 332 NewSubIdx = ARM::qsub_3; 333 return true; 334 } 335 } else if (SubIndices[0] == ARM::ssub_0) { 336 // 2 S registers -> 1 D register. 337 if (SubIndices[1] == ARM::ssub_1) { 338 if (Size >= 128) 339 NewSubIdx = ARM::dsub_0; 340 return true; 341 } 342 } else if (SubIndices[0] == ARM::ssub_2) { 343 // 2 S registers -> 1 D register (2nd). 344 if (Size >= 128 && SubIndices[1] == ARM::ssub_3) { 345 NewSubIdx = ARM::dsub_1; 346 return true; 347 } 348 } 349 } 350 return false; 351} 352 353const TargetRegisterClass* 354ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) 355 const { 356 const TargetRegisterClass *Super = RC; 357 TargetRegisterClass::sc_iterator I = RC->superclasses_begin(); 358 do { 359 switch (Super->getID()) { 360 case ARM::GPRRegClassID: 361 case ARM::SPRRegClassID: 362 case ARM::DPRRegClassID: 363 case ARM::QPRRegClassID: 364 case ARM::QQPRRegClassID: 365 case ARM::QQQQPRRegClassID: 366 return Super; 367 } 368 Super = *I++; 369 } while (Super); 370 return RC; 371} 372 373const TargetRegisterClass * 374ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const { 375 return ARM::GPRRegisterClass; 376} 377 378unsigned 379ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 380 MachineFunction &MF) const { 381 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 382 383 switch (RC->getID()) { 384 default: 385 return 0; 386 case ARM::tGPRRegClassID: 387 return TFI->hasFP(MF) ? 4 : 5; 388 case ARM::GPRRegClassID: { 389 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 390 return 10 - FP - (STI.isR9Reserved() ? 1 : 0); 391 } 392 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 393 case ARM::DPRRegClassID: 394 return 32 - 10; 395 } 396} 397 398/// getRawAllocationOrder - Returns the register allocation order for a 399/// specified register class with a target-dependent hint. 400ArrayRef<unsigned> 401ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC, 402 unsigned HintType, unsigned HintReg, 403 const MachineFunction &MF) const { 404 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 405 // Alternative register allocation orders when favoring even / odd registers 406 // of register pairs. 407 408 // No FP, R9 is available. 409 static const unsigned GPREven1[] = { 410 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10, 411 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, 412 ARM::R9, ARM::R11 413 }; 414 static const unsigned GPROdd1[] = { 415 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11, 416 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, 417 ARM::R8, ARM::R10 418 }; 419 420 // FP is R7, R9 is available. 421 static const unsigned GPREven2[] = { 422 ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10, 423 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, 424 ARM::R9, ARM::R11 425 }; 426 static const unsigned GPROdd2[] = { 427 ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11, 428 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, 429 ARM::R8, ARM::R10 430 }; 431 432 // FP is R11, R9 is available. 433 static const unsigned GPREven3[] = { 434 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, 435 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, 436 ARM::R9 437 }; 438 static const unsigned GPROdd3[] = { 439 ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9, 440 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7, 441 ARM::R8 442 }; 443 444 // No FP, R9 is not available. 445 static const unsigned GPREven4[] = { 446 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10, 447 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8, 448 ARM::R11 449 }; 450 static const unsigned GPROdd4[] = { 451 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11, 452 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8, 453 ARM::R10 454 }; 455 456 // FP is R7, R9 is not available. 457 static const unsigned GPREven5[] = { 458 ARM::R0, ARM::R2, ARM::R4, ARM::R10, 459 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8, 460 ARM::R11 461 }; 462 static const unsigned GPROdd5[] = { 463 ARM::R1, ARM::R3, ARM::R5, ARM::R11, 464 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8, 465 ARM::R10 466 }; 467 468 // FP is R11, R9 is not available. 469 static const unsigned GPREven6[] = { 470 ARM::R0, ARM::R2, ARM::R4, ARM::R6, 471 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8 472 }; 473 static const unsigned GPROdd6[] = { 474 ARM::R1, ARM::R3, ARM::R5, ARM::R7, 475 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8 476 }; 477 478 // We only support even/odd hints for GPR and rGPR. 479 if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass) 480 return RC->getRawAllocationOrder(MF); 481 482 if (HintType == ARMRI::RegPairEven) { 483 if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0) 484 // It's no longer possible to fulfill this hint. Return the default 485 // allocation order. 486 return RC->getRawAllocationOrder(MF); 487 488 if (!TFI->hasFP(MF)) { 489 if (!STI.isR9Reserved()) 490 return ArrayRef<unsigned>(GPREven1); 491 else 492 return ArrayRef<unsigned>(GPREven4); 493 } else if (FramePtr == ARM::R7) { 494 if (!STI.isR9Reserved()) 495 return ArrayRef<unsigned>(GPREven2); 496 else 497 return ArrayRef<unsigned>(GPREven5); 498 } else { // FramePtr == ARM::R11 499 if (!STI.isR9Reserved()) 500 return ArrayRef<unsigned>(GPREven3); 501 else 502 return ArrayRef<unsigned>(GPREven6); 503 } 504 } else if (HintType == ARMRI::RegPairOdd) { 505 if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0) 506 // It's no longer possible to fulfill this hint. Return the default 507 // allocation order. 508 return RC->getRawAllocationOrder(MF); 509 510 if (!TFI->hasFP(MF)) { 511 if (!STI.isR9Reserved()) 512 return ArrayRef<unsigned>(GPROdd1); 513 else 514 return ArrayRef<unsigned>(GPROdd4); 515 } else if (FramePtr == ARM::R7) { 516 if (!STI.isR9Reserved()) 517 return ArrayRef<unsigned>(GPROdd2); 518 else 519 return ArrayRef<unsigned>(GPROdd5); 520 } else { // FramePtr == ARM::R11 521 if (!STI.isR9Reserved()) 522 return ArrayRef<unsigned>(GPROdd3); 523 else 524 return ArrayRef<unsigned>(GPROdd6); 525 } 526 } 527 return RC->getRawAllocationOrder(MF); 528} 529 530/// ResolveRegAllocHint - Resolves the specified register allocation hint 531/// to a physical register. Returns the physical register if it is successful. 532unsigned 533ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg, 534 const MachineFunction &MF) const { 535 if (Reg == 0 || !isPhysicalRegister(Reg)) 536 return 0; 537 if (Type == 0) 538 return Reg; 539 else if (Type == (unsigned)ARMRI::RegPairOdd) 540 // Odd register. 541 return getRegisterPairOdd(Reg, MF); 542 else if (Type == (unsigned)ARMRI::RegPairEven) 543 // Even register. 544 return getRegisterPairEven(Reg, MF); 545 return 0; 546} 547 548void 549ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg, 550 MachineFunction &MF) const { 551 MachineRegisterInfo *MRI = &MF.getRegInfo(); 552 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 553 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 554 Hint.first == (unsigned)ARMRI::RegPairEven) && 555 TargetRegisterInfo::isVirtualRegister(Hint.second)) { 556 // If 'Reg' is one of the even / odd register pair and it's now changed 557 // (e.g. coalesced) into a different register. The other register of the 558 // pair allocation hint must be updated to reflect the relationship 559 // change. 560 unsigned OtherReg = Hint.second; 561 Hint = MRI->getRegAllocationHint(OtherReg); 562 if (Hint.second == Reg) 563 // Make sure the pair has not already divorced. 564 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 565 } 566} 567 568bool 569ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const { 570 // CortexA9 has a Write-after-write hazard for NEON registers. 571 if (!STI.isCortexA9()) 572 return false; 573 574 switch (RC->getID()) { 575 case ARM::DPRRegClassID: 576 case ARM::DPR_8RegClassID: 577 case ARM::DPR_VFP2RegClassID: 578 case ARM::QPRRegClassID: 579 case ARM::QPR_8RegClassID: 580 case ARM::QPR_VFP2RegClassID: 581 case ARM::SPRRegClassID: 582 case ARM::SPR_8RegClassID: 583 // Avoid reusing S, D, and Q registers. 584 // Don't increase register pressure for QQ and QQQQ. 585 return true; 586 default: 587 return false; 588 } 589} 590 591bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 592 const MachineFrameInfo *MFI = MF.getFrameInfo(); 593 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 594 595 if (!EnableBasePointer) 596 return false; 597 598 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects()) 599 return true; 600 601 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 602 // negative range for ldr/str (255), and thumb1 is positive offsets only. 603 // It's going to be better to use the SP or Base Pointer instead. When there 604 // are variable sized objects, we can't reference off of the SP, so we 605 // reserve a Base Pointer. 606 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) { 607 // Conservatively estimate whether the negative offset from the frame 608 // pointer will be sufficient to reach. If a function has a smallish 609 // frame, it's less likely to have lots of spills and callee saved 610 // space, so it's all more likely to be within range of the frame pointer. 611 // If it's wrong, the scavenger will still enable access to work, it just 612 // won't be optimal. 613 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128) 614 return false; 615 return true; 616 } 617 618 return false; 619} 620 621bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 622 const MachineFrameInfo *MFI = MF.getFrameInfo(); 623 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 624 // We can't realign the stack if: 625 // 1. Dynamic stack realignment is explicitly disabled, 626 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or 627 // 3. There are VLAs in the function and the base pointer is disabled. 628 return (RealignStack && !AFI->isThumb1OnlyFunction() && 629 (!MFI->hasVarSizedObjects() || EnableBasePointer)); 630} 631 632bool ARMBaseRegisterInfo:: 633needsStackRealignment(const MachineFunction &MF) const { 634 const MachineFrameInfo *MFI = MF.getFrameInfo(); 635 const Function *F = MF.getFunction(); 636 unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); 637 bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) || 638 F->hasFnAttr(Attribute::StackAlignment)); 639 640 return requiresRealignment && canRealignStack(MF); 641} 642 643bool ARMBaseRegisterInfo:: 644cannotEliminateFrame(const MachineFunction &MF) const { 645 const MachineFrameInfo *MFI = MF.getFrameInfo(); 646 if (DisableFramePointerElim(MF) && MFI->adjustsStack()) 647 return true; 648 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() 649 || needsStackRealignment(MF); 650} 651 652unsigned ARMBaseRegisterInfo::getRARegister() const { 653 return ARM::LR; 654} 655 656unsigned 657ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 658 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 659 660 if (TFI->hasFP(MF)) 661 return FramePtr; 662 return ARM::SP; 663} 664 665unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const { 666 llvm_unreachable("What is the exception register"); 667 return 0; 668} 669 670unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const { 671 llvm_unreachable("What is the exception handler register"); 672 return 0; 673} 674 675int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const { 676 return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0); 677} 678 679int ARMBaseRegisterInfo::getLLVMRegNum(unsigned DwarfRegNo, bool isEH) const { 680 return ARMGenRegisterInfo::getLLVMRegNumFull(DwarfRegNo,0); 681} 682 683unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg, 684 const MachineFunction &MF) const { 685 switch (Reg) { 686 default: break; 687 // Return 0 if either register of the pair is a special register. 688 // So no R12, etc. 689 case ARM::R1: 690 return ARM::R0; 691 case ARM::R3: 692 return ARM::R2; 693 case ARM::R5: 694 return ARM::R4; 695 case ARM::R7: 696 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6)) 697 ? 0 : ARM::R6; 698 case ARM::R9: 699 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8; 700 case ARM::R11: 701 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10; 702 703 case ARM::S1: 704 return ARM::S0; 705 case ARM::S3: 706 return ARM::S2; 707 case ARM::S5: 708 return ARM::S4; 709 case ARM::S7: 710 return ARM::S6; 711 case ARM::S9: 712 return ARM::S8; 713 case ARM::S11: 714 return ARM::S10; 715 case ARM::S13: 716 return ARM::S12; 717 case ARM::S15: 718 return ARM::S14; 719 case ARM::S17: 720 return ARM::S16; 721 case ARM::S19: 722 return ARM::S18; 723 case ARM::S21: 724 return ARM::S20; 725 case ARM::S23: 726 return ARM::S22; 727 case ARM::S25: 728 return ARM::S24; 729 case ARM::S27: 730 return ARM::S26; 731 case ARM::S29: 732 return ARM::S28; 733 case ARM::S31: 734 return ARM::S30; 735 736 case ARM::D1: 737 return ARM::D0; 738 case ARM::D3: 739 return ARM::D2; 740 case ARM::D5: 741 return ARM::D4; 742 case ARM::D7: 743 return ARM::D6; 744 case ARM::D9: 745 return ARM::D8; 746 case ARM::D11: 747 return ARM::D10; 748 case ARM::D13: 749 return ARM::D12; 750 case ARM::D15: 751 return ARM::D14; 752 case ARM::D17: 753 return ARM::D16; 754 case ARM::D19: 755 return ARM::D18; 756 case ARM::D21: 757 return ARM::D20; 758 case ARM::D23: 759 return ARM::D22; 760 case ARM::D25: 761 return ARM::D24; 762 case ARM::D27: 763 return ARM::D26; 764 case ARM::D29: 765 return ARM::D28; 766 case ARM::D31: 767 return ARM::D30; 768 } 769 770 return 0; 771} 772 773unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg, 774 const MachineFunction &MF) const { 775 switch (Reg) { 776 default: break; 777 // Return 0 if either register of the pair is a special register. 778 // So no R12, etc. 779 case ARM::R0: 780 return ARM::R1; 781 case ARM::R2: 782 return ARM::R3; 783 case ARM::R4: 784 return ARM::R5; 785 case ARM::R6: 786 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6)) 787 ? 0 : ARM::R7; 788 case ARM::R8: 789 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9; 790 case ARM::R10: 791 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11; 792 793 case ARM::S0: 794 return ARM::S1; 795 case ARM::S2: 796 return ARM::S3; 797 case ARM::S4: 798 return ARM::S5; 799 case ARM::S6: 800 return ARM::S7; 801 case ARM::S8: 802 return ARM::S9; 803 case ARM::S10: 804 return ARM::S11; 805 case ARM::S12: 806 return ARM::S13; 807 case ARM::S14: 808 return ARM::S15; 809 case ARM::S16: 810 return ARM::S17; 811 case ARM::S18: 812 return ARM::S19; 813 case ARM::S20: 814 return ARM::S21; 815 case ARM::S22: 816 return ARM::S23; 817 case ARM::S24: 818 return ARM::S25; 819 case ARM::S26: 820 return ARM::S27; 821 case ARM::S28: 822 return ARM::S29; 823 case ARM::S30: 824 return ARM::S31; 825 826 case ARM::D0: 827 return ARM::D1; 828 case ARM::D2: 829 return ARM::D3; 830 case ARM::D4: 831 return ARM::D5; 832 case ARM::D6: 833 return ARM::D7; 834 case ARM::D8: 835 return ARM::D9; 836 case ARM::D10: 837 return ARM::D11; 838 case ARM::D12: 839 return ARM::D13; 840 case ARM::D14: 841 return ARM::D15; 842 case ARM::D16: 843 return ARM::D17; 844 case ARM::D18: 845 return ARM::D19; 846 case ARM::D20: 847 return ARM::D21; 848 case ARM::D22: 849 return ARM::D23; 850 case ARM::D24: 851 return ARM::D25; 852 case ARM::D26: 853 return ARM::D27; 854 case ARM::D28: 855 return ARM::D29; 856 case ARM::D30: 857 return ARM::D31; 858 } 859 860 return 0; 861} 862 863/// emitLoadConstPool - Emits a load from constpool to materialize the 864/// specified immediate. 865void ARMBaseRegisterInfo:: 866emitLoadConstPool(MachineBasicBlock &MBB, 867 MachineBasicBlock::iterator &MBBI, 868 DebugLoc dl, 869 unsigned DestReg, unsigned SubIdx, int Val, 870 ARMCC::CondCodes Pred, 871 unsigned PredReg, unsigned MIFlags) const { 872 MachineFunction &MF = *MBB.getParent(); 873 MachineConstantPool *ConstantPool = MF.getConstantPool(); 874 const Constant *C = 875 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); 876 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 877 878 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 879 .addReg(DestReg, getDefRegState(true), SubIdx) 880 .addConstantPoolIndex(Idx) 881 .addImm(0).addImm(Pred).addReg(PredReg) 882 .setMIFlags(MIFlags); 883} 884 885bool ARMBaseRegisterInfo:: 886requiresRegisterScavenging(const MachineFunction &MF) const { 887 return true; 888} 889 890bool ARMBaseRegisterInfo:: 891requiresFrameIndexScavenging(const MachineFunction &MF) const { 892 return true; 893} 894 895bool ARMBaseRegisterInfo:: 896requiresVirtualBaseRegisters(const MachineFunction &MF) const { 897 return EnableLocalStackAlloc; 898} 899 900static void 901emitSPUpdate(bool isARM, 902 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 903 DebugLoc dl, const ARMBaseInstrInfo &TII, 904 int NumBytes, 905 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) { 906 if (isARM) 907 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 908 Pred, PredReg, TII); 909 else 910 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, 911 Pred, PredReg, TII); 912} 913 914 915void ARMBaseRegisterInfo:: 916eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 917 MachineBasicBlock::iterator I) const { 918 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 919 if (!TFI->hasReservedCallFrame(MF)) { 920 // If we have alloca, convert as follows: 921 // ADJCALLSTACKDOWN -> sub, sp, sp, amount 922 // ADJCALLSTACKUP -> add, sp, sp, amount 923 MachineInstr *Old = I; 924 DebugLoc dl = Old->getDebugLoc(); 925 unsigned Amount = Old->getOperand(0).getImm(); 926 if (Amount != 0) { 927 // We need to keep the stack aligned properly. To do this, we round the 928 // amount of space needed for the outgoing arguments up to the next 929 // alignment boundary. 930 unsigned Align = TFI->getStackAlignment(); 931 Amount = (Amount+Align-1)/Align*Align; 932 933 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 934 assert(!AFI->isThumb1OnlyFunction() && 935 "This eliminateCallFramePseudoInstr does not support Thumb1!"); 936 bool isARM = !AFI->isThumbFunction(); 937 938 // Replace the pseudo instruction with a new instruction... 939 unsigned Opc = Old->getOpcode(); 940 int PIdx = Old->findFirstPredOperandIdx(); 941 ARMCC::CondCodes Pred = (PIdx == -1) 942 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm(); 943 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) { 944 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN. 945 unsigned PredReg = Old->getOperand(2).getReg(); 946 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg); 947 } else { 948 // Note: PredReg is operand 3 for ADJCALLSTACKUP. 949 unsigned PredReg = Old->getOperand(3).getReg(); 950 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP); 951 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg); 952 } 953 } 954 } 955 MBB.erase(I); 956} 957 958int64_t ARMBaseRegisterInfo:: 959getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 960 const MCInstrDesc &Desc = MI->getDesc(); 961 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 962 int64_t InstrOffs = 0;; 963 int Scale = 1; 964 unsigned ImmIdx = 0; 965 switch (AddrMode) { 966 case ARMII::AddrModeT2_i8: 967 case ARMII::AddrModeT2_i12: 968 case ARMII::AddrMode_i12: 969 InstrOffs = MI->getOperand(Idx+1).getImm(); 970 Scale = 1; 971 break; 972 case ARMII::AddrMode5: { 973 // VFP address mode. 974 const MachineOperand &OffOp = MI->getOperand(Idx+1); 975 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 976 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 977 InstrOffs = -InstrOffs; 978 Scale = 4; 979 break; 980 } 981 case ARMII::AddrMode2: { 982 ImmIdx = Idx+2; 983 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 984 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 985 InstrOffs = -InstrOffs; 986 break; 987 } 988 case ARMII::AddrMode3: { 989 ImmIdx = Idx+2; 990 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 991 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 992 InstrOffs = -InstrOffs; 993 break; 994 } 995 case ARMII::AddrModeT1_s: { 996 ImmIdx = Idx+1; 997 InstrOffs = MI->getOperand(ImmIdx).getImm(); 998 Scale = 4; 999 break; 1000 } 1001 default: 1002 llvm_unreachable("Unsupported addressing mode!"); 1003 break; 1004 } 1005 1006 return InstrOffs * Scale; 1007} 1008 1009/// needsFrameBaseReg - Returns true if the instruction's frame index 1010/// reference would be better served by a base register other than FP 1011/// or SP. Used by LocalStackFrameAllocation to determine which frame index 1012/// references it should create new base registers for. 1013bool ARMBaseRegisterInfo:: 1014needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 1015 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 1016 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 1017 } 1018 1019 // It's the load/store FI references that cause issues, as it can be difficult 1020 // to materialize the offset if it won't fit in the literal field. Estimate 1021 // based on the size of the local frame and some conservative assumptions 1022 // about the rest of the stack frame (note, this is pre-regalloc, so 1023 // we don't know everything for certain yet) whether this offset is likely 1024 // to be out of range of the immediate. Return true if so. 1025 1026 // We only generate virtual base registers for loads and stores, so 1027 // return false for everything else. 1028 unsigned Opc = MI->getOpcode(); 1029 switch (Opc) { 1030 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 1031 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 1032 case ARM::t2LDRi12: case ARM::t2LDRi8: 1033 case ARM::t2STRi12: case ARM::t2STRi8: 1034 case ARM::VLDRS: case ARM::VLDRD: 1035 case ARM::VSTRS: case ARM::VSTRD: 1036 case ARM::tSTRspi: case ARM::tLDRspi: 1037 if (ForceAllBaseRegAlloc) 1038 return true; 1039 break; 1040 default: 1041 return false; 1042 } 1043 1044 // Without a virtual base register, if the function has variable sized 1045 // objects, all fixed-size local references will be via the frame pointer, 1046 // Approximate the offset and see if it's legal for the instruction. 1047 // Note that the incoming offset is based on the SP value at function entry, 1048 // so it'll be negative. 1049 MachineFunction &MF = *MI->getParent()->getParent(); 1050 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 1051 MachineFrameInfo *MFI = MF.getFrameInfo(); 1052 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1053 1054 // Estimate an offset from the frame pointer. 1055 // Conservatively assume all callee-saved registers get pushed. R4-R6 1056 // will be earlier than the FP, so we ignore those. 1057 // R7, LR 1058 int64_t FPOffset = Offset - 8; 1059 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 1060 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 1061 FPOffset -= 80; 1062 // Estimate an offset from the stack pointer. 1063 // The incoming offset is relating to the SP at the start of the function, 1064 // but when we access the local it'll be relative to the SP after local 1065 // allocation, so adjust our SP-relative offset by that allocation size. 1066 Offset = -Offset; 1067 Offset += MFI->getLocalFrameSize(); 1068 // Assume that we'll have at least some spill slots allocated. 1069 // FIXME: This is a total SWAG number. We should run some statistics 1070 // and pick a real one. 1071 Offset += 128; // 128 bytes of spill slots 1072 1073 // If there is a frame pointer, try using it. 1074 // The FP is only available if there is no dynamic realignment. We 1075 // don't know for sure yet whether we'll need that, so we guess based 1076 // on whether there are any local variables that would trigger it. 1077 unsigned StackAlign = TFI->getStackAlignment(); 1078 if (TFI->hasFP(MF) && 1079 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 1080 if (isFrameOffsetLegal(MI, FPOffset)) 1081 return false; 1082 } 1083 // If we can reference via the stack pointer, try that. 1084 // FIXME: This (and the code that resolves the references) can be improved 1085 // to only disallow SP relative references in the live range of 1086 // the VLA(s). In practice, it's unclear how much difference that 1087 // would make, but it may be worth doing. 1088 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset)) 1089 return false; 1090 1091 // The offset likely isn't legal, we want to allocate a virtual base register. 1092 return true; 1093} 1094 1095/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 1096/// be a pointer to FrameIdx at the beginning of the basic block. 1097void ARMBaseRegisterInfo:: 1098materializeFrameBaseRegister(MachineBasicBlock *MBB, 1099 unsigned BaseReg, int FrameIdx, 1100 int64_t Offset) const { 1101 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 1102 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 1103 (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri); 1104 1105 MachineBasicBlock::iterator Ins = MBB->begin(); 1106 DebugLoc DL; // Defaults to "unknown" 1107 if (Ins != MBB->end()) 1108 DL = Ins->getDebugLoc(); 1109 1110 const MCInstrDesc &MCID = TII.get(ADDriOpc); 1111 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1112 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this)); 1113 1114 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 1115 .addFrameIndex(FrameIdx).addImm(Offset); 1116 1117 if (!AFI->isThumb1OnlyFunction()) 1118 AddDefaultCC(AddDefaultPred(MIB)); 1119} 1120 1121void 1122ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I, 1123 unsigned BaseReg, int64_t Offset) const { 1124 MachineInstr &MI = *I; 1125 MachineBasicBlock &MBB = *MI.getParent(); 1126 MachineFunction &MF = *MBB.getParent(); 1127 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1128 int Off = Offset; // ARM doesn't need the general 64-bit offsets 1129 unsigned i = 0; 1130 1131 assert(!AFI->isThumb1OnlyFunction() && 1132 "This resolveFrameIndex does not support Thumb1!"); 1133 1134 while (!MI.getOperand(i).isFI()) { 1135 ++i; 1136 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 1137 } 1138 bool Done = false; 1139 if (!AFI->isThumbFunction()) 1140 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 1141 else { 1142 assert(AFI->isThumb2Function()); 1143 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII); 1144 } 1145 assert (Done && "Unable to resolve frame index!"); 1146} 1147 1148bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 1149 int64_t Offset) const { 1150 const MCInstrDesc &Desc = MI->getDesc(); 1151 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 1152 unsigned i = 0; 1153 1154 while (!MI->getOperand(i).isFI()) { 1155 ++i; 1156 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 1157 } 1158 1159 // AddrMode4 and AddrMode6 cannot handle any offset. 1160 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 1161 return Offset == 0; 1162 1163 unsigned NumBits = 0; 1164 unsigned Scale = 1; 1165 bool isSigned = true; 1166 switch (AddrMode) { 1167 case ARMII::AddrModeT2_i8: 1168 case ARMII::AddrModeT2_i12: 1169 // i8 supports only negative, and i12 supports only positive, so 1170 // based on Offset sign, consider the appropriate instruction 1171 Scale = 1; 1172 if (Offset < 0) { 1173 NumBits = 8; 1174 Offset = -Offset; 1175 } else { 1176 NumBits = 12; 1177 } 1178 break; 1179 case ARMII::AddrMode5: 1180 // VFP address mode. 1181 NumBits = 8; 1182 Scale = 4; 1183 break; 1184 case ARMII::AddrMode_i12: 1185 case ARMII::AddrMode2: 1186 NumBits = 12; 1187 break; 1188 case ARMII::AddrMode3: 1189 NumBits = 8; 1190 break; 1191 case ARMII::AddrModeT1_s: 1192 NumBits = 5; 1193 Scale = 4; 1194 isSigned = false; 1195 break; 1196 default: 1197 llvm_unreachable("Unsupported addressing mode!"); 1198 break; 1199 } 1200 1201 Offset += getFrameIndexInstrOffset(MI, i); 1202 // Make sure the offset is encodable for instructions that scale the 1203 // immediate. 1204 if ((Offset & (Scale-1)) != 0) 1205 return false; 1206 1207 if (isSigned && Offset < 0) 1208 Offset = -Offset; 1209 1210 unsigned Mask = (1 << NumBits) - 1; 1211 if ((unsigned)Offset <= Mask * Scale) 1212 return true; 1213 1214 return false; 1215} 1216 1217void 1218ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 1219 int SPAdj, RegScavenger *RS) const { 1220 unsigned i = 0; 1221 MachineInstr &MI = *II; 1222 MachineBasicBlock &MBB = *MI.getParent(); 1223 MachineFunction &MF = *MBB.getParent(); 1224 const ARMFrameLowering *TFI = 1225 static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering()); 1226 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1227 assert(!AFI->isThumb1OnlyFunction() && 1228 "This eliminateFrameIndex does not support Thumb1!"); 1229 1230 while (!MI.getOperand(i).isFI()) { 1231 ++i; 1232 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 1233 } 1234 1235 int FrameIndex = MI.getOperand(i).getIndex(); 1236 unsigned FrameReg; 1237 1238 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 1239 1240 // Special handling of dbg_value instructions. 1241 if (MI.isDebugValue()) { 1242 MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/); 1243 MI.getOperand(i+1).ChangeToImmediate(Offset); 1244 return; 1245 } 1246 1247 // Modify MI as necessary to handle as much of 'Offset' as possible 1248 bool Done = false; 1249 if (!AFI->isThumbFunction()) 1250 Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII); 1251 else { 1252 assert(AFI->isThumb2Function()); 1253 Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII); 1254 } 1255 if (Done) 1256 return; 1257 1258 // If we get here, the immediate doesn't fit into the instruction. We folded 1259 // as much as possible above, handle the rest, providing a register that is 1260 // SP+LargeImm. 1261 assert((Offset || 1262 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 1263 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 1264 "This code isn't needed if offset already handled!"); 1265 1266 unsigned ScratchReg = 0; 1267 int PIdx = MI.findFirstPredOperandIdx(); 1268 ARMCC::CondCodes Pred = (PIdx == -1) 1269 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 1270 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 1271 if (Offset == 0) 1272 // Must be addrmode4/6. 1273 MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false); 1274 else { 1275 ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass); 1276 if (!AFI->isThumbFunction()) 1277 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 1278 Offset, Pred, PredReg, TII); 1279 else { 1280 assert(AFI->isThumb2Function()); 1281 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 1282 Offset, Pred, PredReg, TII); 1283 } 1284 // Update the original instruction to use the scratch register. 1285 MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true); 1286 } 1287} 1288