ARMBaseInstrInfo.cpp revision 200581
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMGenInstrInfo.inc"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMRegisterInfo.h"
21#include "llvm/Constants.h"
22#include "llvm/Function.h"
23#include "llvm/GlobalValue.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineJumpTableInfo.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36using namespace llvm;
37
38static cl::opt<bool>
39EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
40               cl::desc("Enable ARM 2-addr to 3-addr conv"));
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
43  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
44    Subtarget(STI) {
45}
46
47MachineInstr *
48ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
49                                        MachineBasicBlock::iterator &MBBI,
50                                        LiveVariables *LV) const {
51  // FIXME: Thumb2 support.
52
53  if (!EnableARM3Addr)
54    return NULL;
55
56  MachineInstr *MI = MBBI;
57  MachineFunction &MF = *MI->getParent()->getParent();
58  unsigned TSFlags = MI->getDesc().TSFlags;
59  bool isPre = false;
60  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
61  default: return NULL;
62  case ARMII::IndexModePre:
63    isPre = true;
64    break;
65  case ARMII::IndexModePost:
66    break;
67  }
68
69  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
70  // operation.
71  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
72  if (MemOpc == 0)
73    return NULL;
74
75  MachineInstr *UpdateMI = NULL;
76  MachineInstr *MemMI = NULL;
77  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
78  const TargetInstrDesc &TID = MI->getDesc();
79  unsigned NumOps = TID.getNumOperands();
80  bool isLoad = !TID.mayStore();
81  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
82  const MachineOperand &Base = MI->getOperand(2);
83  const MachineOperand &Offset = MI->getOperand(NumOps-3);
84  unsigned WBReg = WB.getReg();
85  unsigned BaseReg = Base.getReg();
86  unsigned OffReg = Offset.getReg();
87  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
88  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
89  switch (AddrMode) {
90  default:
91    assert(false && "Unknown indexed op!");
92    return NULL;
93  case ARMII::AddrMode2: {
94    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
95    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
96    if (OffReg == 0) {
97      if (ARM_AM::getSOImmVal(Amt) == -1)
98        // Can't encode it in a so_imm operand. This transformation will
99        // add more than 1 instruction. Abandon!
100        return NULL;
101      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
102                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
103        .addReg(BaseReg).addImm(Amt)
104        .addImm(Pred).addReg(0).addReg(0);
105    } else if (Amt != 0) {
106      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
107      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
108      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
109                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
110        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
111        .addImm(Pred).addReg(0).addReg(0);
112    } else
113      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
114                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
115        .addReg(BaseReg).addReg(OffReg)
116        .addImm(Pred).addReg(0).addReg(0);
117    break;
118  }
119  case ARMII::AddrMode3 : {
120    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
121    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
122    if (OffReg == 0)
123      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
124      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
125                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
126        .addReg(BaseReg).addImm(Amt)
127        .addImm(Pred).addReg(0).addReg(0);
128    else
129      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
130                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
131        .addReg(BaseReg).addReg(OffReg)
132        .addImm(Pred).addReg(0).addReg(0);
133    break;
134  }
135  }
136
137  std::vector<MachineInstr*> NewMIs;
138  if (isPre) {
139    if (isLoad)
140      MemMI = BuildMI(MF, MI->getDebugLoc(),
141                      get(MemOpc), MI->getOperand(0).getReg())
142        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
143    else
144      MemMI = BuildMI(MF, MI->getDebugLoc(),
145                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
146        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
147    NewMIs.push_back(MemMI);
148    NewMIs.push_back(UpdateMI);
149  } else {
150    if (isLoad)
151      MemMI = BuildMI(MF, MI->getDebugLoc(),
152                      get(MemOpc), MI->getOperand(0).getReg())
153        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
154    else
155      MemMI = BuildMI(MF, MI->getDebugLoc(),
156                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
157        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
158    if (WB.isDead())
159      UpdateMI->getOperand(0).setIsDead();
160    NewMIs.push_back(UpdateMI);
161    NewMIs.push_back(MemMI);
162  }
163
164  // Transfer LiveVariables states, kill / dead info.
165  if (LV) {
166    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167      MachineOperand &MO = MI->getOperand(i);
168      if (MO.isReg() && MO.getReg() &&
169          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
170        unsigned Reg = MO.getReg();
171
172        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
173        if (MO.isDef()) {
174          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
175          if (MO.isDead())
176            LV->addVirtualRegisterDead(Reg, NewMI);
177        }
178        if (MO.isUse() && MO.isKill()) {
179          for (unsigned j = 0; j < 2; ++j) {
180            // Look at the two new MI's in reverse order.
181            MachineInstr *NewMI = NewMIs[j];
182            if (!NewMI->readsRegister(Reg))
183              continue;
184            LV->addVirtualRegisterKilled(Reg, NewMI);
185            if (VI.removeKill(MI))
186              VI.Kills.push_back(NewMI);
187            break;
188          }
189        }
190      }
191    }
192  }
193
194  MFI->insert(MBBI, NewMIs[1]);
195  MFI->insert(MBBI, NewMIs[0]);
196  return NewMIs[0];
197}
198
199// Branch analysis.
200bool
201ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
202                                MachineBasicBlock *&FBB,
203                                SmallVectorImpl<MachineOperand> &Cond,
204                                bool AllowModify) const {
205  // If the block has no terminators, it just falls into the block after it.
206  MachineBasicBlock::iterator I = MBB.end();
207  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
208    return false;
209
210  // Get the last instruction in the block.
211  MachineInstr *LastInst = I;
212
213  // If there is only one terminator instruction, process it.
214  unsigned LastOpc = LastInst->getOpcode();
215  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
216    if (isUncondBranchOpcode(LastOpc)) {
217      TBB = LastInst->getOperand(0).getMBB();
218      return false;
219    }
220    if (isCondBranchOpcode(LastOpc)) {
221      // Block ends with fall-through condbranch.
222      TBB = LastInst->getOperand(0).getMBB();
223      Cond.push_back(LastInst->getOperand(1));
224      Cond.push_back(LastInst->getOperand(2));
225      return false;
226    }
227    return true;  // Can't handle indirect branch.
228  }
229
230  // Get the instruction before it if it is a terminator.
231  MachineInstr *SecondLastInst = I;
232
233  // If there are three terminators, we don't know what sort of block this is.
234  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
235    return true;
236
237  // If the block ends with a B and a Bcc, handle it.
238  unsigned SecondLastOpc = SecondLastInst->getOpcode();
239  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
240    TBB =  SecondLastInst->getOperand(0).getMBB();
241    Cond.push_back(SecondLastInst->getOperand(1));
242    Cond.push_back(SecondLastInst->getOperand(2));
243    FBB = LastInst->getOperand(0).getMBB();
244    return false;
245  }
246
247  // If the block ends with two unconditional branches, handle it.  The second
248  // one is not executed, so remove it.
249  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
250    TBB = SecondLastInst->getOperand(0).getMBB();
251    I = LastInst;
252    if (AllowModify)
253      I->eraseFromParent();
254    return false;
255  }
256
257  // ...likewise if it ends with a branch table followed by an unconditional
258  // branch. The branch folder can create these, and we must get rid of them for
259  // correctness of Thumb constant islands.
260  if ((isJumpTableBranchOpcode(SecondLastOpc) ||
261       isIndirectBranchOpcode(SecondLastOpc)) &&
262      isUncondBranchOpcode(LastOpc)) {
263    I = LastInst;
264    if (AllowModify)
265      I->eraseFromParent();
266    return true;
267  }
268
269  // Otherwise, can't handle this.
270  return true;
271}
272
273
274unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
275  MachineBasicBlock::iterator I = MBB.end();
276  if (I == MBB.begin()) return 0;
277  --I;
278  if (!isUncondBranchOpcode(I->getOpcode()) &&
279      !isCondBranchOpcode(I->getOpcode()))
280    return 0;
281
282  // Remove the branch.
283  I->eraseFromParent();
284
285  I = MBB.end();
286
287  if (I == MBB.begin()) return 1;
288  --I;
289  if (!isCondBranchOpcode(I->getOpcode()))
290    return 1;
291
292  // Remove the branch.
293  I->eraseFromParent();
294  return 2;
295}
296
297unsigned
298ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
299                               MachineBasicBlock *FBB,
300                             const SmallVectorImpl<MachineOperand> &Cond) const {
301  // FIXME this should probably have a DebugLoc argument
302  DebugLoc dl = DebugLoc::getUnknownLoc();
303
304  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
305  int BOpc   = !AFI->isThumbFunction()
306    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
307  int BccOpc = !AFI->isThumbFunction()
308    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
309
310  // Shouldn't be a fall through.
311  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
312  assert((Cond.size() == 2 || Cond.size() == 0) &&
313         "ARM branch conditions have two components!");
314
315  if (FBB == 0) {
316    if (Cond.empty()) // Unconditional branch?
317      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
318    else
319      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
320        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
321    return 1;
322  }
323
324  // Two-way conditional branch.
325  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
326    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
327  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
328  return 2;
329}
330
331bool ARMBaseInstrInfo::
332ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
333  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
334  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
335  return false;
336}
337
338bool ARMBaseInstrInfo::
339PredicateInstruction(MachineInstr *MI,
340                     const SmallVectorImpl<MachineOperand> &Pred) const {
341  unsigned Opc = MI->getOpcode();
342  if (isUncondBranchOpcode(Opc)) {
343    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
344    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
345    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
346    return true;
347  }
348
349  int PIdx = MI->findFirstPredOperandIdx();
350  if (PIdx != -1) {
351    MachineOperand &PMO = MI->getOperand(PIdx);
352    PMO.setImm(Pred[0].getImm());
353    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
354    return true;
355  }
356  return false;
357}
358
359bool ARMBaseInstrInfo::
360SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
361                  const SmallVectorImpl<MachineOperand> &Pred2) const {
362  if (Pred1.size() > 2 || Pred2.size() > 2)
363    return false;
364
365  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
366  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
367  if (CC1 == CC2)
368    return true;
369
370  switch (CC1) {
371  default:
372    return false;
373  case ARMCC::AL:
374    return true;
375  case ARMCC::HS:
376    return CC2 == ARMCC::HI;
377  case ARMCC::LS:
378    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
379  case ARMCC::GE:
380    return CC2 == ARMCC::GT;
381  case ARMCC::LE:
382    return CC2 == ARMCC::LT;
383  }
384}
385
386bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
387                                    std::vector<MachineOperand> &Pred) const {
388  // FIXME: This confuses implicit_def with optional CPSR def.
389  const TargetInstrDesc &TID = MI->getDesc();
390  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
391    return false;
392
393  bool Found = false;
394  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
395    const MachineOperand &MO = MI->getOperand(i);
396    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
397      Pred.push_back(MO);
398      Found = true;
399    }
400  }
401
402  return Found;
403}
404
405/// isPredicable - Return true if the specified instruction can be predicated.
406/// By default, this returns true for every instruction with a
407/// PredicateOperand.
408bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
409  const TargetInstrDesc &TID = MI->getDesc();
410  if (!TID.isPredicable())
411    return false;
412
413  if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
414    ARMFunctionInfo *AFI =
415      MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
416    return AFI->isThumb2Function();
417  }
418  return true;
419}
420
421/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
422DISABLE_INLINE
423static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
424                                unsigned JTI);
425static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
426                                unsigned JTI) {
427  assert(JTI < JT.size());
428  return JT[JTI].MBBs.size();
429}
430
431/// GetInstSize - Return the size of the specified MachineInstr.
432///
433unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
434  const MachineBasicBlock &MBB = *MI->getParent();
435  const MachineFunction *MF = MBB.getParent();
436  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
437
438  // Basic size info comes from the TSFlags field.
439  const TargetInstrDesc &TID = MI->getDesc();
440  unsigned TSFlags = TID.TSFlags;
441
442  unsigned Opc = MI->getOpcode();
443  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
444  default: {
445    // If this machine instr is an inline asm, measure it.
446    if (MI->getOpcode() == ARM::INLINEASM)
447      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
448    if (MI->isLabel())
449      return 0;
450    switch (Opc) {
451    default:
452      llvm_unreachable("Unknown or unset size field for instr!");
453    case TargetInstrInfo::IMPLICIT_DEF:
454    case TargetInstrInfo::KILL:
455    case TargetInstrInfo::DBG_LABEL:
456    case TargetInstrInfo::EH_LABEL:
457      return 0;
458    }
459    break;
460  }
461  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
462  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
463  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
464  case ARMII::SizeSpecial: {
465    switch (Opc) {
466    case ARM::CONSTPOOL_ENTRY:
467      // If this machine instr is a constant pool entry, its size is recorded as
468      // operand #2.
469      return MI->getOperand(2).getImm();
470    case ARM::Int_eh_sjlj_setjmp:
471      return 24;
472    case ARM::tInt_eh_sjlj_setjmp:
473      return 22;
474    case ARM::t2Int_eh_sjlj_setjmp:
475      return 22;
476    case ARM::BR_JTr:
477    case ARM::BR_JTm:
478    case ARM::BR_JTadd:
479    case ARM::tBR_JTr:
480    case ARM::t2BR_JT:
481    case ARM::t2TBB:
482    case ARM::t2TBH: {
483      // These are jumptable branches, i.e. a branch followed by an inlined
484      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
485      // entry is one byte; TBH two byte each.
486      unsigned EntrySize = (Opc == ARM::t2TBB)
487        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
488      unsigned NumOps = TID.getNumOperands();
489      MachineOperand JTOP =
490        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
491      unsigned JTI = JTOP.getIndex();
492      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
493      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
494      assert(JTI < JT.size());
495      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
496      // 4 aligned. The assembler / linker may add 2 byte padding just before
497      // the JT entries.  The size does not include this padding; the
498      // constant islands pass does separate bookkeeping for it.
499      // FIXME: If we know the size of the function is less than (1 << 16) *2
500      // bytes, we can use 16-bit entries instead. Then there won't be an
501      // alignment issue.
502      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
503      unsigned NumEntries = getNumJTEntries(JT, JTI);
504      if (Opc == ARM::t2TBB && (NumEntries & 1))
505        // Make sure the instruction that follows TBB is 2-byte aligned.
506        // FIXME: Constant island pass should insert an "ALIGN" instruction
507        // instead.
508        ++NumEntries;
509      return NumEntries * EntrySize + InstSize;
510    }
511    default:
512      // Otherwise, pseudo-instruction sizes are zero.
513      return 0;
514    }
515  }
516  }
517  return 0; // Not reached
518}
519
520/// Return true if the instruction is a register to register move and
521/// leave the source and dest operands in the passed parameters.
522///
523bool
524ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
525                              unsigned &SrcReg, unsigned &DstReg,
526                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
527  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
528
529  switch (MI.getOpcode()) {
530  default: break;
531  case ARM::VMOVS:
532  case ARM::VMOVD:
533  case ARM::VMOVDneon:
534  case ARM::VMOVQ: {
535    SrcReg = MI.getOperand(1).getReg();
536    DstReg = MI.getOperand(0).getReg();
537    return true;
538  }
539  case ARM::MOVr:
540  case ARM::tMOVr:
541  case ARM::tMOVgpr2tgpr:
542  case ARM::tMOVtgpr2gpr:
543  case ARM::tMOVgpr2gpr:
544  case ARM::t2MOVr: {
545    assert(MI.getDesc().getNumOperands() >= 2 &&
546           MI.getOperand(0).isReg() &&
547           MI.getOperand(1).isReg() &&
548           "Invalid ARM MOV instruction");
549    SrcReg = MI.getOperand(1).getReg();
550    DstReg = MI.getOperand(0).getReg();
551    return true;
552  }
553  }
554
555  return false;
556}
557
558unsigned
559ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
560                                      int &FrameIndex) const {
561  switch (MI->getOpcode()) {
562  default: break;
563  case ARM::LDR:
564  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
565    if (MI->getOperand(1).isFI() &&
566        MI->getOperand(2).isReg() &&
567        MI->getOperand(3).isImm() &&
568        MI->getOperand(2).getReg() == 0 &&
569        MI->getOperand(3).getImm() == 0) {
570      FrameIndex = MI->getOperand(1).getIndex();
571      return MI->getOperand(0).getReg();
572    }
573    break;
574  case ARM::t2LDRi12:
575  case ARM::tRestore:
576    if (MI->getOperand(1).isFI() &&
577        MI->getOperand(2).isImm() &&
578        MI->getOperand(2).getImm() == 0) {
579      FrameIndex = MI->getOperand(1).getIndex();
580      return MI->getOperand(0).getReg();
581    }
582    break;
583  case ARM::VLDRD:
584  case ARM::VLDRS:
585    if (MI->getOperand(1).isFI() &&
586        MI->getOperand(2).isImm() &&
587        MI->getOperand(2).getImm() == 0) {
588      FrameIndex = MI->getOperand(1).getIndex();
589      return MI->getOperand(0).getReg();
590    }
591    break;
592  }
593
594  return 0;
595}
596
597unsigned
598ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
599                                     int &FrameIndex) const {
600  switch (MI->getOpcode()) {
601  default: break;
602  case ARM::STR:
603  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
604    if (MI->getOperand(1).isFI() &&
605        MI->getOperand(2).isReg() &&
606        MI->getOperand(3).isImm() &&
607        MI->getOperand(2).getReg() == 0 &&
608        MI->getOperand(3).getImm() == 0) {
609      FrameIndex = MI->getOperand(1).getIndex();
610      return MI->getOperand(0).getReg();
611    }
612    break;
613  case ARM::t2STRi12:
614  case ARM::tSpill:
615    if (MI->getOperand(1).isFI() &&
616        MI->getOperand(2).isImm() &&
617        MI->getOperand(2).getImm() == 0) {
618      FrameIndex = MI->getOperand(1).getIndex();
619      return MI->getOperand(0).getReg();
620    }
621    break;
622  case ARM::VSTRD:
623  case ARM::VSTRS:
624    if (MI->getOperand(1).isFI() &&
625        MI->getOperand(2).isImm() &&
626        MI->getOperand(2).getImm() == 0) {
627      FrameIndex = MI->getOperand(1).getIndex();
628      return MI->getOperand(0).getReg();
629    }
630    break;
631  }
632
633  return 0;
634}
635
636bool
637ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
638                               MachineBasicBlock::iterator I,
639                               unsigned DestReg, unsigned SrcReg,
640                               const TargetRegisterClass *DestRC,
641                               const TargetRegisterClass *SrcRC) const {
642  DebugLoc DL = DebugLoc::getUnknownLoc();
643  if (I != MBB.end()) DL = I->getDebugLoc();
644
645  if (DestRC != SrcRC) {
646    if (DestRC->getSize() != SrcRC->getSize())
647      return false;
648
649    // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
650    // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
651    if (DestRC->getSize() != 8 && DestRC->getSize() != 16)
652      return false;
653  }
654
655  if (DestRC == ARM::GPRRegisterClass) {
656    AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
657                                        DestReg).addReg(SrcReg)));
658  } else if (DestRC == ARM::SPRRegisterClass) {
659    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVS), DestReg)
660                   .addReg(SrcReg));
661  } else if (DestRC == ARM::DPRRegisterClass) {
662    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVD), DestReg)
663                   .addReg(SrcReg));
664  } else if (DestRC == ARM::DPR_VFP2RegisterClass ||
665             DestRC == ARM::DPR_8RegisterClass ||
666             SrcRC == ARM::DPR_VFP2RegisterClass ||
667             SrcRC == ARM::DPR_8RegisterClass) {
668    // Always use neon reg-reg move if source or dest is NEON-only regclass.
669    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVDneon),
670                           DestReg).addReg(SrcReg));
671  } else if (DestRC == ARM::QPRRegisterClass ||
672             DestRC == ARM::QPR_VFP2RegisterClass ||
673             DestRC == ARM::QPR_8RegisterClass) {
674    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVQ),
675                           DestReg).addReg(SrcReg));
676  } else {
677    return false;
678  }
679
680  return true;
681}
682
683void ARMBaseInstrInfo::
684storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
685                    unsigned SrcReg, bool isKill, int FI,
686                    const TargetRegisterClass *RC) const {
687  DebugLoc DL = DebugLoc::getUnknownLoc();
688  if (I != MBB.end()) DL = I->getDebugLoc();
689  MachineFunction &MF = *MBB.getParent();
690  MachineFrameInfo &MFI = *MF.getFrameInfo();
691  unsigned Align = MFI.getObjectAlignment(FI);
692
693  MachineMemOperand *MMO =
694    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
695                            MachineMemOperand::MOStore, 0,
696                            MFI.getObjectSize(FI),
697                            Align);
698
699  if (RC == ARM::GPRRegisterClass) {
700    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
701                   .addReg(SrcReg, getKillRegState(isKill))
702                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
703  } else if (RC == ARM::DPRRegisterClass ||
704             RC == ARM::DPR_VFP2RegisterClass ||
705             RC == ARM::DPR_8RegisterClass) {
706    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
707                   .addReg(SrcReg, getKillRegState(isKill))
708                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
709  } else if (RC == ARM::SPRRegisterClass) {
710    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
711                   .addReg(SrcReg, getKillRegState(isKill))
712                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
713  } else {
714    assert((RC == ARM::QPRRegisterClass ||
715            RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
716    // FIXME: Neon instructions should support predicates
717    if (Align >= 16
718        && (getRegisterInfo().needsStackRealignment(MF))) {
719      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
720                     .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
721                     .addMemOperand(MMO)
722                     .addReg(SrcReg, getKillRegState(isKill)));
723    } else {
724      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRQ)).
725                     addReg(SrcReg, getKillRegState(isKill))
726                     .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
727    }
728  }
729}
730
731void ARMBaseInstrInfo::
732loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
733                     unsigned DestReg, int FI,
734                     const TargetRegisterClass *RC) const {
735  DebugLoc DL = DebugLoc::getUnknownLoc();
736  if (I != MBB.end()) DL = I->getDebugLoc();
737  MachineFunction &MF = *MBB.getParent();
738  MachineFrameInfo &MFI = *MF.getFrameInfo();
739  unsigned Align = MFI.getObjectAlignment(FI);
740
741  MachineMemOperand *MMO =
742    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
743                            MachineMemOperand::MOLoad, 0,
744                            MFI.getObjectSize(FI),
745                            Align);
746
747  if (RC == ARM::GPRRegisterClass) {
748    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
749                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
750  } else if (RC == ARM::DPRRegisterClass ||
751             RC == ARM::DPR_VFP2RegisterClass ||
752             RC == ARM::DPR_8RegisterClass) {
753    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
754                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
755  } else if (RC == ARM::SPRRegisterClass) {
756    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
757                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
758  } else {
759    assert((RC == ARM::QPRRegisterClass ||
760            RC == ARM::QPR_VFP2RegisterClass ||
761            RC == ARM::QPR_8RegisterClass) && "Unknown regclass!");
762    if (Align >= 16
763        && (getRegisterInfo().needsStackRealignment(MF))) {
764      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
765                     .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
766                     .addMemOperand(MMO));
767    } else {
768      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg)
769                     .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
770    }
771  }
772}
773
774MachineInstr *ARMBaseInstrInfo::
775foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
776                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
777  if (Ops.size() != 1) return NULL;
778
779  unsigned OpNum = Ops[0];
780  unsigned Opc = MI->getOpcode();
781  MachineInstr *NewMI = NULL;
782  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
783    // If it is updating CPSR, then it cannot be folded.
784    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
785      return NULL;
786    unsigned Pred = MI->getOperand(2).getImm();
787    unsigned PredReg = MI->getOperand(3).getReg();
788    if (OpNum == 0) { // move -> store
789      unsigned SrcReg = MI->getOperand(1).getReg();
790      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
791      bool isKill = MI->getOperand(1).isKill();
792      bool isUndef = MI->getOperand(1).isUndef();
793      if (Opc == ARM::MOVr)
794        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
795          .addReg(SrcReg,
796                  getKillRegState(isKill) | getUndefRegState(isUndef),
797                  SrcSubReg)
798          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
799      else // ARM::t2MOVr
800        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
801          .addReg(SrcReg,
802                  getKillRegState(isKill) | getUndefRegState(isUndef),
803                  SrcSubReg)
804          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
805    } else {          // move -> load
806      unsigned DstReg = MI->getOperand(0).getReg();
807      unsigned DstSubReg = MI->getOperand(0).getSubReg();
808      bool isDead = MI->getOperand(0).isDead();
809      bool isUndef = MI->getOperand(0).isUndef();
810      if (Opc == ARM::MOVr)
811        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
812          .addReg(DstReg,
813                  RegState::Define |
814                  getDeadRegState(isDead) |
815                  getUndefRegState(isUndef), DstSubReg)
816          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
817      else // ARM::t2MOVr
818        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
819          .addReg(DstReg,
820                  RegState::Define |
821                  getDeadRegState(isDead) |
822                  getUndefRegState(isUndef), DstSubReg)
823          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
824    }
825  } else if (Opc == ARM::tMOVgpr2gpr ||
826             Opc == ARM::tMOVtgpr2gpr ||
827             Opc == ARM::tMOVgpr2tgpr) {
828    if (OpNum == 0) { // move -> store
829      unsigned SrcReg = MI->getOperand(1).getReg();
830      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
831      bool isKill = MI->getOperand(1).isKill();
832      bool isUndef = MI->getOperand(1).isUndef();
833      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
834        .addReg(SrcReg,
835                getKillRegState(isKill) | getUndefRegState(isUndef),
836                SrcSubReg)
837        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
838    } else {          // move -> load
839      unsigned DstReg = MI->getOperand(0).getReg();
840      unsigned DstSubReg = MI->getOperand(0).getSubReg();
841      bool isDead = MI->getOperand(0).isDead();
842      bool isUndef = MI->getOperand(0).isUndef();
843      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
844        .addReg(DstReg,
845                RegState::Define |
846                getDeadRegState(isDead) |
847                getUndefRegState(isUndef),
848                DstSubReg)
849        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
850    }
851  } else if (Opc == ARM::VMOVS) {
852    unsigned Pred = MI->getOperand(2).getImm();
853    unsigned PredReg = MI->getOperand(3).getReg();
854    if (OpNum == 0) { // move -> store
855      unsigned SrcReg = MI->getOperand(1).getReg();
856      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
857      bool isKill = MI->getOperand(1).isKill();
858      bool isUndef = MI->getOperand(1).isUndef();
859      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
860        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
861                SrcSubReg)
862        .addFrameIndex(FI)
863        .addImm(0).addImm(Pred).addReg(PredReg);
864    } else {          // move -> load
865      unsigned DstReg = MI->getOperand(0).getReg();
866      unsigned DstSubReg = MI->getOperand(0).getSubReg();
867      bool isDead = MI->getOperand(0).isDead();
868      bool isUndef = MI->getOperand(0).isUndef();
869      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
870        .addReg(DstReg,
871                RegState::Define |
872                getDeadRegState(isDead) |
873                getUndefRegState(isUndef),
874                DstSubReg)
875        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
876    }
877  }
878  else if (Opc == ARM::VMOVD) {
879    unsigned Pred = MI->getOperand(2).getImm();
880    unsigned PredReg = MI->getOperand(3).getReg();
881    if (OpNum == 0) { // move -> store
882      unsigned SrcReg = MI->getOperand(1).getReg();
883      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
884      bool isKill = MI->getOperand(1).isKill();
885      bool isUndef = MI->getOperand(1).isUndef();
886      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
887        .addReg(SrcReg,
888                getKillRegState(isKill) | getUndefRegState(isUndef),
889                SrcSubReg)
890        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
891    } else {          // move -> load
892      unsigned DstReg = MI->getOperand(0).getReg();
893      unsigned DstSubReg = MI->getOperand(0).getSubReg();
894      bool isDead = MI->getOperand(0).isDead();
895      bool isUndef = MI->getOperand(0).isUndef();
896      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
897        .addReg(DstReg,
898                RegState::Define |
899                getDeadRegState(isDead) |
900                getUndefRegState(isUndef),
901                DstSubReg)
902        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
903    }
904  }
905
906  return NewMI;
907}
908
909MachineInstr*
910ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
911                                        MachineInstr* MI,
912                                        const SmallVectorImpl<unsigned> &Ops,
913                                        MachineInstr* LoadMI) const {
914  // FIXME
915  return 0;
916}
917
918bool
919ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
920                                   const SmallVectorImpl<unsigned> &Ops) const {
921  if (Ops.size() != 1) return false;
922
923  unsigned Opc = MI->getOpcode();
924  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
925    // If it is updating CPSR, then it cannot be folded.
926    return MI->getOperand(4).getReg() != ARM::CPSR ||
927      MI->getOperand(4).isDead();
928  } else if (Opc == ARM::tMOVgpr2gpr ||
929             Opc == ARM::tMOVtgpr2gpr ||
930             Opc == ARM::tMOVgpr2tgpr) {
931    return true;
932  } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD) {
933    return true;
934  } else if (Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
935    return false; // FIXME
936  }
937
938  return false;
939}
940
941void ARMBaseInstrInfo::
942reMaterialize(MachineBasicBlock &MBB,
943              MachineBasicBlock::iterator I,
944              unsigned DestReg, unsigned SubIdx,
945              const MachineInstr *Orig,
946              const TargetRegisterInfo *TRI) const {
947  DebugLoc dl = Orig->getDebugLoc();
948
949  if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
950    DestReg = TRI->getSubReg(DestReg, SubIdx);
951    SubIdx = 0;
952  }
953
954  unsigned Opcode = Orig->getOpcode();
955  switch (Opcode) {
956  default: {
957    MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
958    MI->getOperand(0).setReg(DestReg);
959    MBB.insert(I, MI);
960    break;
961  }
962  case ARM::tLDRpci_pic:
963  case ARM::t2LDRpci_pic: {
964    MachineFunction &MF = *MBB.getParent();
965    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
966    MachineConstantPool *MCP = MF.getConstantPool();
967    unsigned CPI = Orig->getOperand(1).getIndex();
968    const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
969    assert(MCPE.isMachineConstantPoolEntry() &&
970           "Expecting a machine constantpool entry!");
971    ARMConstantPoolValue *ACPV =
972      static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
973    unsigned PCLabelId = AFI->createConstPoolEntryUId();
974    ARMConstantPoolValue *NewCPV = 0;
975    if (ACPV->isGlobalValue())
976      NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
977                                        ARMCP::CPValue, 4);
978    else if (ACPV->isExtSymbol())
979      NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
980                                        ACPV->getSymbol(), PCLabelId, 4);
981    else if (ACPV->isBlockAddress())
982      NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
983                                        ARMCP::CPBlockAddress, 4);
984    else
985      llvm_unreachable("Unexpected ARM constantpool value type!!");
986    CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
987    MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
988                                      DestReg)
989      .addConstantPoolIndex(CPI).addImm(PCLabelId);
990    (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
991    break;
992  }
993  }
994
995  MachineInstr *NewMI = prior(I);
996  NewMI->getOperand(0).setSubReg(SubIdx);
997}
998
999bool ARMBaseInstrInfo::isIdentical(const MachineInstr *MI0,
1000                                  const MachineInstr *MI1,
1001                                  const MachineRegisterInfo *MRI) const {
1002  int Opcode = MI0->getOpcode();
1003  if (Opcode == ARM::t2LDRpci ||
1004      Opcode == ARM::t2LDRpci_pic ||
1005      Opcode == ARM::tLDRpci ||
1006      Opcode == ARM::tLDRpci_pic) {
1007    if (MI1->getOpcode() != Opcode)
1008      return false;
1009    if (MI0->getNumOperands() != MI1->getNumOperands())
1010      return false;
1011
1012    const MachineOperand &MO0 = MI0->getOperand(1);
1013    const MachineOperand &MO1 = MI1->getOperand(1);
1014    if (MO0.getOffset() != MO1.getOffset())
1015      return false;
1016
1017    const MachineFunction *MF = MI0->getParent()->getParent();
1018    const MachineConstantPool *MCP = MF->getConstantPool();
1019    int CPI0 = MO0.getIndex();
1020    int CPI1 = MO1.getIndex();
1021    const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1022    const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1023    ARMConstantPoolValue *ACPV0 =
1024      static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1025    ARMConstantPoolValue *ACPV1 =
1026      static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1027    return ACPV0->hasSameValue(ACPV1);
1028  }
1029
1030  return TargetInstrInfoImpl::isIdentical(MI0, MI1, MRI);
1031}
1032
1033/// getInstrPredicate - If instruction is predicated, returns its predicate
1034/// condition, otherwise returns AL. It also returns the condition code
1035/// register by reference.
1036ARMCC::CondCodes
1037llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1038  int PIdx = MI->findFirstPredOperandIdx();
1039  if (PIdx == -1) {
1040    PredReg = 0;
1041    return ARMCC::AL;
1042  }
1043
1044  PredReg = MI->getOperand(PIdx+1).getReg();
1045  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1046}
1047
1048
1049int llvm::getMatchingCondBranchOpcode(int Opc) {
1050  if (Opc == ARM::B)
1051    return ARM::Bcc;
1052  else if (Opc == ARM::tB)
1053    return ARM::tBcc;
1054  else if (Opc == ARM::t2B)
1055      return ARM::t2Bcc;
1056
1057  llvm_unreachable("Unknown unconditional branch opcode!");
1058  return 0;
1059}
1060
1061
1062void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1063                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1064                               unsigned DestReg, unsigned BaseReg, int NumBytes,
1065                               ARMCC::CondCodes Pred, unsigned PredReg,
1066                               const ARMBaseInstrInfo &TII) {
1067  bool isSub = NumBytes < 0;
1068  if (isSub) NumBytes = -NumBytes;
1069
1070  while (NumBytes) {
1071    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1072    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1073    assert(ThisVal && "Didn't extract field correctly");
1074
1075    // We will handle these bits from offset, clear them.
1076    NumBytes &= ~ThisVal;
1077
1078    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1079
1080    // Build the new ADD / SUB.
1081    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1082    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1083      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1084      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
1085    BaseReg = DestReg;
1086  }
1087}
1088
1089bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1090                                unsigned FrameReg, int &Offset,
1091                                const ARMBaseInstrInfo &TII) {
1092  unsigned Opcode = MI.getOpcode();
1093  const TargetInstrDesc &Desc = MI.getDesc();
1094  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1095  bool isSub = false;
1096
1097  // Memory operands in inline assembly always use AddrMode2.
1098  if (Opcode == ARM::INLINEASM)
1099    AddrMode = ARMII::AddrMode2;
1100
1101  if (Opcode == ARM::ADDri) {
1102    Offset += MI.getOperand(FrameRegIdx+1).getImm();
1103    if (Offset == 0) {
1104      // Turn it into a move.
1105      MI.setDesc(TII.get(ARM::MOVr));
1106      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1107      MI.RemoveOperand(FrameRegIdx+1);
1108      Offset = 0;
1109      return true;
1110    } else if (Offset < 0) {
1111      Offset = -Offset;
1112      isSub = true;
1113      MI.setDesc(TII.get(ARM::SUBri));
1114    }
1115
1116    // Common case: small offset, fits into instruction.
1117    if (ARM_AM::getSOImmVal(Offset) != -1) {
1118      // Replace the FrameIndex with sp / fp
1119      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1120      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1121      Offset = 0;
1122      return true;
1123    }
1124
1125    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1126    // as possible.
1127    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1128    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1129
1130    // We will handle these bits from offset, clear them.
1131    Offset &= ~ThisImmVal;
1132
1133    // Get the properly encoded SOImmVal field.
1134    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1135           "Bit extraction didn't work?");
1136    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1137 } else {
1138    unsigned ImmIdx = 0;
1139    int InstrOffs = 0;
1140    unsigned NumBits = 0;
1141    unsigned Scale = 1;
1142    switch (AddrMode) {
1143    case ARMII::AddrMode2: {
1144      ImmIdx = FrameRegIdx+2;
1145      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1146      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1147        InstrOffs *= -1;
1148      NumBits = 12;
1149      break;
1150    }
1151    case ARMII::AddrMode3: {
1152      ImmIdx = FrameRegIdx+2;
1153      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1154      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1155        InstrOffs *= -1;
1156      NumBits = 8;
1157      break;
1158    }
1159    case ARMII::AddrMode4:
1160    case ARMII::AddrMode6:
1161      // Can't fold any offset even if it's zero.
1162      return false;
1163    case ARMII::AddrMode5: {
1164      ImmIdx = FrameRegIdx+1;
1165      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1166      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1167        InstrOffs *= -1;
1168      NumBits = 8;
1169      Scale = 4;
1170      break;
1171    }
1172    default:
1173      llvm_unreachable("Unsupported addressing mode!");
1174      break;
1175    }
1176
1177    Offset += InstrOffs * Scale;
1178    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1179    if (Offset < 0) {
1180      Offset = -Offset;
1181      isSub = true;
1182    }
1183
1184    // Attempt to fold address comp. if opcode has offset bits
1185    if (NumBits > 0) {
1186      // Common case: small offset, fits into instruction.
1187      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1188      int ImmedOffset = Offset / Scale;
1189      unsigned Mask = (1 << NumBits) - 1;
1190      if ((unsigned)Offset <= Mask * Scale) {
1191        // Replace the FrameIndex with sp
1192        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1193        if (isSub)
1194          ImmedOffset |= 1 << NumBits;
1195        ImmOp.ChangeToImmediate(ImmedOffset);
1196        Offset = 0;
1197        return true;
1198      }
1199
1200      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1201      ImmedOffset = ImmedOffset & Mask;
1202      if (isSub)
1203        ImmedOffset |= 1 << NumBits;
1204      ImmOp.ChangeToImmediate(ImmedOffset);
1205      Offset &= ~(Mask*Scale);
1206    }
1207  }
1208
1209  Offset = (isSub) ? -Offset : Offset;
1210  return Offset == 0;
1211}
1212