ARMBaseInstrInfo.cpp revision 205407
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMGenInstrInfo.inc"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMRegisterInfo.h"
21#include "llvm/Constants.h"
22#include "llvm/Function.h"
23#include "llvm/GlobalValue.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineJumpTableInfo.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36using namespace llvm;
37
38static cl::opt<bool>
39EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
40               cl::desc("Enable ARM 2-addr to 3-addr conv"));
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
43  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
44    Subtarget(STI) {
45}
46
47MachineInstr *
48ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
49                                        MachineBasicBlock::iterator &MBBI,
50                                        LiveVariables *LV) const {
51  // FIXME: Thumb2 support.
52
53  if (!EnableARM3Addr)
54    return NULL;
55
56  MachineInstr *MI = MBBI;
57  MachineFunction &MF = *MI->getParent()->getParent();
58  unsigned TSFlags = MI->getDesc().TSFlags;
59  bool isPre = false;
60  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
61  default: return NULL;
62  case ARMII::IndexModePre:
63    isPre = true;
64    break;
65  case ARMII::IndexModePost:
66    break;
67  }
68
69  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
70  // operation.
71  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
72  if (MemOpc == 0)
73    return NULL;
74
75  MachineInstr *UpdateMI = NULL;
76  MachineInstr *MemMI = NULL;
77  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
78  const TargetInstrDesc &TID = MI->getDesc();
79  unsigned NumOps = TID.getNumOperands();
80  bool isLoad = !TID.mayStore();
81  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
82  const MachineOperand &Base = MI->getOperand(2);
83  const MachineOperand &Offset = MI->getOperand(NumOps-3);
84  unsigned WBReg = WB.getReg();
85  unsigned BaseReg = Base.getReg();
86  unsigned OffReg = Offset.getReg();
87  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
88  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
89  switch (AddrMode) {
90  default:
91    assert(false && "Unknown indexed op!");
92    return NULL;
93  case ARMII::AddrMode2: {
94    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
95    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
96    if (OffReg == 0) {
97      if (ARM_AM::getSOImmVal(Amt) == -1)
98        // Can't encode it in a so_imm operand. This transformation will
99        // add more than 1 instruction. Abandon!
100        return NULL;
101      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
102                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
103        .addReg(BaseReg).addImm(Amt)
104        .addImm(Pred).addReg(0).addReg(0);
105    } else if (Amt != 0) {
106      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
107      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
108      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
109                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
110        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
111        .addImm(Pred).addReg(0).addReg(0);
112    } else
113      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
114                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
115        .addReg(BaseReg).addReg(OffReg)
116        .addImm(Pred).addReg(0).addReg(0);
117    break;
118  }
119  case ARMII::AddrMode3 : {
120    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
121    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
122    if (OffReg == 0)
123      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
124      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
125                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
126        .addReg(BaseReg).addImm(Amt)
127        .addImm(Pred).addReg(0).addReg(0);
128    else
129      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
130                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
131        .addReg(BaseReg).addReg(OffReg)
132        .addImm(Pred).addReg(0).addReg(0);
133    break;
134  }
135  }
136
137  std::vector<MachineInstr*> NewMIs;
138  if (isPre) {
139    if (isLoad)
140      MemMI = BuildMI(MF, MI->getDebugLoc(),
141                      get(MemOpc), MI->getOperand(0).getReg())
142        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
143    else
144      MemMI = BuildMI(MF, MI->getDebugLoc(),
145                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
146        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
147    NewMIs.push_back(MemMI);
148    NewMIs.push_back(UpdateMI);
149  } else {
150    if (isLoad)
151      MemMI = BuildMI(MF, MI->getDebugLoc(),
152                      get(MemOpc), MI->getOperand(0).getReg())
153        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
154    else
155      MemMI = BuildMI(MF, MI->getDebugLoc(),
156                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
157        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
158    if (WB.isDead())
159      UpdateMI->getOperand(0).setIsDead();
160    NewMIs.push_back(UpdateMI);
161    NewMIs.push_back(MemMI);
162  }
163
164  // Transfer LiveVariables states, kill / dead info.
165  if (LV) {
166    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167      MachineOperand &MO = MI->getOperand(i);
168      if (MO.isReg() && MO.getReg() &&
169          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
170        unsigned Reg = MO.getReg();
171
172        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
173        if (MO.isDef()) {
174          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
175          if (MO.isDead())
176            LV->addVirtualRegisterDead(Reg, NewMI);
177        }
178        if (MO.isUse() && MO.isKill()) {
179          for (unsigned j = 0; j < 2; ++j) {
180            // Look at the two new MI's in reverse order.
181            MachineInstr *NewMI = NewMIs[j];
182            if (!NewMI->readsRegister(Reg))
183              continue;
184            LV->addVirtualRegisterKilled(Reg, NewMI);
185            if (VI.removeKill(MI))
186              VI.Kills.push_back(NewMI);
187            break;
188          }
189        }
190      }
191    }
192  }
193
194  MFI->insert(MBBI, NewMIs[1]);
195  MFI->insert(MBBI, NewMIs[0]);
196  return NewMIs[0];
197}
198
199// Branch analysis.
200bool
201ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
202                                MachineBasicBlock *&FBB,
203                                SmallVectorImpl<MachineOperand> &Cond,
204                                bool AllowModify) const {
205  // If the block has no terminators, it just falls into the block after it.
206  MachineBasicBlock::iterator I = MBB.end();
207  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
208    return false;
209
210  // Get the last instruction in the block.
211  MachineInstr *LastInst = I;
212
213  // If there is only one terminator instruction, process it.
214  unsigned LastOpc = LastInst->getOpcode();
215  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
216    if (isUncondBranchOpcode(LastOpc)) {
217      TBB = LastInst->getOperand(0).getMBB();
218      return false;
219    }
220    if (isCondBranchOpcode(LastOpc)) {
221      // Block ends with fall-through condbranch.
222      TBB = LastInst->getOperand(0).getMBB();
223      Cond.push_back(LastInst->getOperand(1));
224      Cond.push_back(LastInst->getOperand(2));
225      return false;
226    }
227    return true;  // Can't handle indirect branch.
228  }
229
230  // Get the instruction before it if it is a terminator.
231  MachineInstr *SecondLastInst = I;
232
233  // If there are three terminators, we don't know what sort of block this is.
234  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
235    return true;
236
237  // If the block ends with a B and a Bcc, handle it.
238  unsigned SecondLastOpc = SecondLastInst->getOpcode();
239  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
240    TBB =  SecondLastInst->getOperand(0).getMBB();
241    Cond.push_back(SecondLastInst->getOperand(1));
242    Cond.push_back(SecondLastInst->getOperand(2));
243    FBB = LastInst->getOperand(0).getMBB();
244    return false;
245  }
246
247  // If the block ends with two unconditional branches, handle it.  The second
248  // one is not executed, so remove it.
249  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
250    TBB = SecondLastInst->getOperand(0).getMBB();
251    I = LastInst;
252    if (AllowModify)
253      I->eraseFromParent();
254    return false;
255  }
256
257  // ...likewise if it ends with a branch table followed by an unconditional
258  // branch. The branch folder can create these, and we must get rid of them for
259  // correctness of Thumb constant islands.
260  if ((isJumpTableBranchOpcode(SecondLastOpc) ||
261       isIndirectBranchOpcode(SecondLastOpc)) &&
262      isUncondBranchOpcode(LastOpc)) {
263    I = LastInst;
264    if (AllowModify)
265      I->eraseFromParent();
266    return true;
267  }
268
269  // Otherwise, can't handle this.
270  return true;
271}
272
273
274unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
275  MachineBasicBlock::iterator I = MBB.end();
276  if (I == MBB.begin()) return 0;
277  --I;
278  if (!isUncondBranchOpcode(I->getOpcode()) &&
279      !isCondBranchOpcode(I->getOpcode()))
280    return 0;
281
282  // Remove the branch.
283  I->eraseFromParent();
284
285  I = MBB.end();
286
287  if (I == MBB.begin()) return 1;
288  --I;
289  if (!isCondBranchOpcode(I->getOpcode()))
290    return 1;
291
292  // Remove the branch.
293  I->eraseFromParent();
294  return 2;
295}
296
297unsigned
298ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
299                               MachineBasicBlock *FBB,
300                             const SmallVectorImpl<MachineOperand> &Cond) const {
301  // FIXME this should probably have a DebugLoc argument
302  DebugLoc dl = DebugLoc::getUnknownLoc();
303
304  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
305  int BOpc   = !AFI->isThumbFunction()
306    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
307  int BccOpc = !AFI->isThumbFunction()
308    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
309
310  // Shouldn't be a fall through.
311  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
312  assert((Cond.size() == 2 || Cond.size() == 0) &&
313         "ARM branch conditions have two components!");
314
315  if (FBB == 0) {
316    if (Cond.empty()) // Unconditional branch?
317      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
318    else
319      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
320        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
321    return 1;
322  }
323
324  // Two-way conditional branch.
325  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
326    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
327  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
328  return 2;
329}
330
331bool ARMBaseInstrInfo::
332ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
333  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
334  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
335  return false;
336}
337
338bool ARMBaseInstrInfo::
339PredicateInstruction(MachineInstr *MI,
340                     const SmallVectorImpl<MachineOperand> &Pred) const {
341  unsigned Opc = MI->getOpcode();
342  if (isUncondBranchOpcode(Opc)) {
343    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
344    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
345    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
346    return true;
347  }
348
349  int PIdx = MI->findFirstPredOperandIdx();
350  if (PIdx != -1) {
351    MachineOperand &PMO = MI->getOperand(PIdx);
352    PMO.setImm(Pred[0].getImm());
353    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
354    return true;
355  }
356  return false;
357}
358
359bool ARMBaseInstrInfo::
360SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
361                  const SmallVectorImpl<MachineOperand> &Pred2) const {
362  if (Pred1.size() > 2 || Pred2.size() > 2)
363    return false;
364
365  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
366  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
367  if (CC1 == CC2)
368    return true;
369
370  switch (CC1) {
371  default:
372    return false;
373  case ARMCC::AL:
374    return true;
375  case ARMCC::HS:
376    return CC2 == ARMCC::HI;
377  case ARMCC::LS:
378    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
379  case ARMCC::GE:
380    return CC2 == ARMCC::GT;
381  case ARMCC::LE:
382    return CC2 == ARMCC::LT;
383  }
384}
385
386bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
387                                    std::vector<MachineOperand> &Pred) const {
388  // FIXME: This confuses implicit_def with optional CPSR def.
389  const TargetInstrDesc &TID = MI->getDesc();
390  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
391    return false;
392
393  bool Found = false;
394  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
395    const MachineOperand &MO = MI->getOperand(i);
396    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
397      Pred.push_back(MO);
398      Found = true;
399    }
400  }
401
402  return Found;
403}
404
405/// isPredicable - Return true if the specified instruction can be predicated.
406/// By default, this returns true for every instruction with a
407/// PredicateOperand.
408bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
409  const TargetInstrDesc &TID = MI->getDesc();
410  if (!TID.isPredicable())
411    return false;
412
413  if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
414    ARMFunctionInfo *AFI =
415      MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
416    return AFI->isThumb2Function();
417  }
418  return true;
419}
420
421/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
422DISABLE_INLINE
423static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
424                                unsigned JTI);
425static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
426                                unsigned JTI) {
427  assert(JTI < JT.size());
428  return JT[JTI].MBBs.size();
429}
430
431/// GetInstSize - Return the size of the specified MachineInstr.
432///
433unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
434  const MachineBasicBlock &MBB = *MI->getParent();
435  const MachineFunction *MF = MBB.getParent();
436  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
437
438  // Basic size info comes from the TSFlags field.
439  const TargetInstrDesc &TID = MI->getDesc();
440  unsigned TSFlags = TID.TSFlags;
441
442  unsigned Opc = MI->getOpcode();
443  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
444  default: {
445    // If this machine instr is an inline asm, measure it.
446    if (MI->getOpcode() == ARM::INLINEASM)
447      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
448    if (MI->isLabel())
449      return 0;
450    switch (Opc) {
451    default:
452      llvm_unreachable("Unknown or unset size field for instr!");
453    case TargetOpcode::IMPLICIT_DEF:
454    case TargetOpcode::KILL:
455    case TargetOpcode::DBG_LABEL:
456    case TargetOpcode::EH_LABEL:
457      return 0;
458    }
459    break;
460  }
461  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
462  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
463  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
464  case ARMII::SizeSpecial: {
465    switch (Opc) {
466    case ARM::CONSTPOOL_ENTRY:
467      // If this machine instr is a constant pool entry, its size is recorded as
468      // operand #2.
469      return MI->getOperand(2).getImm();
470    case ARM::Int_eh_sjlj_setjmp:
471      return 24;
472    case ARM::tInt_eh_sjlj_setjmp:
473      return 14;
474    case ARM::t2Int_eh_sjlj_setjmp:
475      return 14;
476    case ARM::BR_JTr:
477    case ARM::BR_JTm:
478    case ARM::BR_JTadd:
479    case ARM::tBR_JTr:
480    case ARM::t2BR_JT:
481    case ARM::t2TBB:
482    case ARM::t2TBH: {
483      // These are jumptable branches, i.e. a branch followed by an inlined
484      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
485      // entry is one byte; TBH two byte each.
486      unsigned EntrySize = (Opc == ARM::t2TBB)
487        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
488      unsigned NumOps = TID.getNumOperands();
489      MachineOperand JTOP =
490        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
491      unsigned JTI = JTOP.getIndex();
492      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
493      assert(MJTI != 0);
494      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
495      assert(JTI < JT.size());
496      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
497      // 4 aligned. The assembler / linker may add 2 byte padding just before
498      // the JT entries.  The size does not include this padding; the
499      // constant islands pass does separate bookkeeping for it.
500      // FIXME: If we know the size of the function is less than (1 << 16) *2
501      // bytes, we can use 16-bit entries instead. Then there won't be an
502      // alignment issue.
503      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
504      unsigned NumEntries = getNumJTEntries(JT, JTI);
505      if (Opc == ARM::t2TBB && (NumEntries & 1))
506        // Make sure the instruction that follows TBB is 2-byte aligned.
507        // FIXME: Constant island pass should insert an "ALIGN" instruction
508        // instead.
509        ++NumEntries;
510      return NumEntries * EntrySize + InstSize;
511    }
512    default:
513      // Otherwise, pseudo-instruction sizes are zero.
514      return 0;
515    }
516  }
517  }
518  return 0; // Not reached
519}
520
521/// Return true if the instruction is a register to register move and
522/// leave the source and dest operands in the passed parameters.
523///
524bool
525ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
526                              unsigned &SrcReg, unsigned &DstReg,
527                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
528  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
529
530  switch (MI.getOpcode()) {
531  default: break;
532  case ARM::VMOVS:
533  case ARM::VMOVD:
534  case ARM::VMOVDneon:
535  case ARM::VMOVQ: {
536    SrcReg = MI.getOperand(1).getReg();
537    DstReg = MI.getOperand(0).getReg();
538    return true;
539  }
540  case ARM::MOVr:
541  case ARM::tMOVr:
542  case ARM::tMOVgpr2tgpr:
543  case ARM::tMOVtgpr2gpr:
544  case ARM::tMOVgpr2gpr:
545  case ARM::t2MOVr: {
546    assert(MI.getDesc().getNumOperands() >= 2 &&
547           MI.getOperand(0).isReg() &&
548           MI.getOperand(1).isReg() &&
549           "Invalid ARM MOV instruction");
550    SrcReg = MI.getOperand(1).getReg();
551    DstReg = MI.getOperand(0).getReg();
552    return true;
553  }
554  }
555
556  return false;
557}
558
559unsigned
560ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
561                                      int &FrameIndex) const {
562  switch (MI->getOpcode()) {
563  default: break;
564  case ARM::LDR:
565  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
566    if (MI->getOperand(1).isFI() &&
567        MI->getOperand(2).isReg() &&
568        MI->getOperand(3).isImm() &&
569        MI->getOperand(2).getReg() == 0 &&
570        MI->getOperand(3).getImm() == 0) {
571      FrameIndex = MI->getOperand(1).getIndex();
572      return MI->getOperand(0).getReg();
573    }
574    break;
575  case ARM::t2LDRi12:
576  case ARM::tRestore:
577    if (MI->getOperand(1).isFI() &&
578        MI->getOperand(2).isImm() &&
579        MI->getOperand(2).getImm() == 0) {
580      FrameIndex = MI->getOperand(1).getIndex();
581      return MI->getOperand(0).getReg();
582    }
583    break;
584  case ARM::VLDRD:
585  case ARM::VLDRS:
586    if (MI->getOperand(1).isFI() &&
587        MI->getOperand(2).isImm() &&
588        MI->getOperand(2).getImm() == 0) {
589      FrameIndex = MI->getOperand(1).getIndex();
590      return MI->getOperand(0).getReg();
591    }
592    break;
593  }
594
595  return 0;
596}
597
598unsigned
599ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
600                                     int &FrameIndex) const {
601  switch (MI->getOpcode()) {
602  default: break;
603  case ARM::STR:
604  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
605    if (MI->getOperand(1).isFI() &&
606        MI->getOperand(2).isReg() &&
607        MI->getOperand(3).isImm() &&
608        MI->getOperand(2).getReg() == 0 &&
609        MI->getOperand(3).getImm() == 0) {
610      FrameIndex = MI->getOperand(1).getIndex();
611      return MI->getOperand(0).getReg();
612    }
613    break;
614  case ARM::t2STRi12:
615  case ARM::tSpill:
616    if (MI->getOperand(1).isFI() &&
617        MI->getOperand(2).isImm() &&
618        MI->getOperand(2).getImm() == 0) {
619      FrameIndex = MI->getOperand(1).getIndex();
620      return MI->getOperand(0).getReg();
621    }
622    break;
623  case ARM::VSTRD:
624  case ARM::VSTRS:
625    if (MI->getOperand(1).isFI() &&
626        MI->getOperand(2).isImm() &&
627        MI->getOperand(2).getImm() == 0) {
628      FrameIndex = MI->getOperand(1).getIndex();
629      return MI->getOperand(0).getReg();
630    }
631    break;
632  }
633
634  return 0;
635}
636
637bool
638ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
639                               MachineBasicBlock::iterator I,
640                               unsigned DestReg, unsigned SrcReg,
641                               const TargetRegisterClass *DestRC,
642                               const TargetRegisterClass *SrcRC) const {
643  DebugLoc DL = DebugLoc::getUnknownLoc();
644  if (I != MBB.end()) DL = I->getDebugLoc();
645
646  // tGPR is used sometimes in ARM instructions that need to avoid using
647  // certain registers.  Just treat it as GPR here.
648  if (DestRC == ARM::tGPRRegisterClass)
649    DestRC = ARM::GPRRegisterClass;
650  if (SrcRC == ARM::tGPRRegisterClass)
651    SrcRC = ARM::GPRRegisterClass;
652
653  // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
654  if (DestRC == ARM::DPR_8RegisterClass)
655    DestRC = ARM::DPR_VFP2RegisterClass;
656  if (SrcRC == ARM::DPR_8RegisterClass)
657    SrcRC = ARM::DPR_VFP2RegisterClass;
658
659  // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
660  if (DestRC == ARM::QPR_VFP2RegisterClass ||
661      DestRC == ARM::QPR_8RegisterClass)
662    DestRC = ARM::QPRRegisterClass;
663  if (SrcRC == ARM::QPR_VFP2RegisterClass ||
664      SrcRC == ARM::QPR_8RegisterClass)
665    SrcRC = ARM::QPRRegisterClass;
666
667  // Disallow copies of unequal sizes.
668  if (DestRC != SrcRC && DestRC->getSize() != SrcRC->getSize())
669    return false;
670
671  if (DestRC == ARM::GPRRegisterClass) {
672    if (SrcRC == ARM::SPRRegisterClass)
673      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVRS), DestReg)
674                     .addReg(SrcReg));
675    else
676      AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
677                                          DestReg).addReg(SrcReg)));
678  } else {
679    unsigned Opc;
680
681    if (DestRC == ARM::SPRRegisterClass)
682      Opc = (SrcRC == ARM::GPRRegisterClass ? ARM::VMOVSR : ARM::VMOVS);
683    else if (DestRC == ARM::DPRRegisterClass)
684      Opc = ARM::VMOVD;
685    else if (DestRC == ARM::DPR_VFP2RegisterClass ||
686             SrcRC == ARM::DPR_VFP2RegisterClass)
687      // Always use neon reg-reg move if source or dest is NEON-only regclass.
688      Opc = ARM::VMOVDneon;
689    else if (DestRC == ARM::QPRRegisterClass)
690      Opc = ARM::VMOVQ;
691    else
692      return false;
693
694    AddDefaultPred(BuildMI(MBB, I, DL, get(Opc), DestReg)
695                   .addReg(SrcReg));
696  }
697
698  return true;
699}
700
701void ARMBaseInstrInfo::
702storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
703                    unsigned SrcReg, bool isKill, int FI,
704                    const TargetRegisterClass *RC) const {
705  DebugLoc DL = DebugLoc::getUnknownLoc();
706  if (I != MBB.end()) DL = I->getDebugLoc();
707  MachineFunction &MF = *MBB.getParent();
708  MachineFrameInfo &MFI = *MF.getFrameInfo();
709  unsigned Align = MFI.getObjectAlignment(FI);
710
711  MachineMemOperand *MMO =
712    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
713                            MachineMemOperand::MOStore, 0,
714                            MFI.getObjectSize(FI),
715                            Align);
716
717  // tGPR is used sometimes in ARM instructions that need to avoid using
718  // certain registers.  Just treat it as GPR here.
719  if (RC == ARM::tGPRRegisterClass)
720    RC = ARM::GPRRegisterClass;
721
722  if (RC == ARM::GPRRegisterClass) {
723    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
724                   .addReg(SrcReg, getKillRegState(isKill))
725                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
726  } else if (RC == ARM::DPRRegisterClass ||
727             RC == ARM::DPR_VFP2RegisterClass ||
728             RC == ARM::DPR_8RegisterClass) {
729    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
730                   .addReg(SrcReg, getKillRegState(isKill))
731                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
732  } else if (RC == ARM::SPRRegisterClass) {
733    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
734                   .addReg(SrcReg, getKillRegState(isKill))
735                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
736  } else {
737    assert((RC == ARM::QPRRegisterClass ||
738            RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
739    // FIXME: Neon instructions should support predicates
740    if (Align >= 16 && (getRegisterInfo().canRealignStack(MF))) {
741      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
742                     .addFrameIndex(FI).addImm(128)
743                     .addMemOperand(MMO)
744                     .addReg(SrcReg, getKillRegState(isKill)));
745    } else {
746      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRQ)).
747                     addReg(SrcReg, getKillRegState(isKill))
748                     .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
749    }
750  }
751}
752
753void ARMBaseInstrInfo::
754loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
755                     unsigned DestReg, int FI,
756                     const TargetRegisterClass *RC) const {
757  DebugLoc DL = DebugLoc::getUnknownLoc();
758  if (I != MBB.end()) DL = I->getDebugLoc();
759  MachineFunction &MF = *MBB.getParent();
760  MachineFrameInfo &MFI = *MF.getFrameInfo();
761  unsigned Align = MFI.getObjectAlignment(FI);
762
763  MachineMemOperand *MMO =
764    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
765                            MachineMemOperand::MOLoad, 0,
766                            MFI.getObjectSize(FI),
767                            Align);
768
769  // tGPR is used sometimes in ARM instructions that need to avoid using
770  // certain registers.  Just treat it as GPR here.
771  if (RC == ARM::tGPRRegisterClass)
772    RC = ARM::GPRRegisterClass;
773
774  if (RC == ARM::GPRRegisterClass) {
775    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
776                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
777  } else if (RC == ARM::DPRRegisterClass ||
778             RC == ARM::DPR_VFP2RegisterClass ||
779             RC == ARM::DPR_8RegisterClass) {
780    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
781                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
782  } else if (RC == ARM::SPRRegisterClass) {
783    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
784                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
785  } else {
786    assert((RC == ARM::QPRRegisterClass ||
787            RC == ARM::QPR_VFP2RegisterClass ||
788            RC == ARM::QPR_8RegisterClass) && "Unknown regclass!");
789    if (Align >= 16
790        && (getRegisterInfo().canRealignStack(MF))) {
791      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
792                     .addFrameIndex(FI).addImm(128)
793                     .addMemOperand(MMO));
794    } else {
795      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg)
796                     .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
797    }
798  }
799}
800
801MachineInstr *ARMBaseInstrInfo::
802foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
803                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
804  if (Ops.size() != 1) return NULL;
805
806  unsigned OpNum = Ops[0];
807  unsigned Opc = MI->getOpcode();
808  MachineInstr *NewMI = NULL;
809  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
810    // If it is updating CPSR, then it cannot be folded.
811    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
812      return NULL;
813    unsigned Pred = MI->getOperand(2).getImm();
814    unsigned PredReg = MI->getOperand(3).getReg();
815    if (OpNum == 0) { // move -> store
816      unsigned SrcReg = MI->getOperand(1).getReg();
817      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
818      bool isKill = MI->getOperand(1).isKill();
819      bool isUndef = MI->getOperand(1).isUndef();
820      if (Opc == ARM::MOVr)
821        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
822          .addReg(SrcReg,
823                  getKillRegState(isKill) | getUndefRegState(isUndef),
824                  SrcSubReg)
825          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
826      else // ARM::t2MOVr
827        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
828          .addReg(SrcReg,
829                  getKillRegState(isKill) | getUndefRegState(isUndef),
830                  SrcSubReg)
831          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
832    } else {          // move -> load
833      unsigned DstReg = MI->getOperand(0).getReg();
834      unsigned DstSubReg = MI->getOperand(0).getSubReg();
835      bool isDead = MI->getOperand(0).isDead();
836      bool isUndef = MI->getOperand(0).isUndef();
837      if (Opc == ARM::MOVr)
838        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
839          .addReg(DstReg,
840                  RegState::Define |
841                  getDeadRegState(isDead) |
842                  getUndefRegState(isUndef), DstSubReg)
843          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
844      else // ARM::t2MOVr
845        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
846          .addReg(DstReg,
847                  RegState::Define |
848                  getDeadRegState(isDead) |
849                  getUndefRegState(isUndef), DstSubReg)
850          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
851    }
852  } else if (Opc == ARM::tMOVgpr2gpr ||
853             Opc == ARM::tMOVtgpr2gpr ||
854             Opc == ARM::tMOVgpr2tgpr) {
855    if (OpNum == 0) { // move -> store
856      unsigned SrcReg = MI->getOperand(1).getReg();
857      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
858      bool isKill = MI->getOperand(1).isKill();
859      bool isUndef = MI->getOperand(1).isUndef();
860      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
861        .addReg(SrcReg,
862                getKillRegState(isKill) | getUndefRegState(isUndef),
863                SrcSubReg)
864        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
865    } else {          // move -> load
866      unsigned DstReg = MI->getOperand(0).getReg();
867      unsigned DstSubReg = MI->getOperand(0).getSubReg();
868      bool isDead = MI->getOperand(0).isDead();
869      bool isUndef = MI->getOperand(0).isUndef();
870      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
871        .addReg(DstReg,
872                RegState::Define |
873                getDeadRegState(isDead) |
874                getUndefRegState(isUndef),
875                DstSubReg)
876        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
877    }
878  } else if (Opc == ARM::VMOVS) {
879    unsigned Pred = MI->getOperand(2).getImm();
880    unsigned PredReg = MI->getOperand(3).getReg();
881    if (OpNum == 0) { // move -> store
882      unsigned SrcReg = MI->getOperand(1).getReg();
883      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
884      bool isKill = MI->getOperand(1).isKill();
885      bool isUndef = MI->getOperand(1).isUndef();
886      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
887        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
888                SrcSubReg)
889        .addFrameIndex(FI)
890        .addImm(0).addImm(Pred).addReg(PredReg);
891    } else {          // move -> load
892      unsigned DstReg = MI->getOperand(0).getReg();
893      unsigned DstSubReg = MI->getOperand(0).getSubReg();
894      bool isDead = MI->getOperand(0).isDead();
895      bool isUndef = MI->getOperand(0).isUndef();
896      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
897        .addReg(DstReg,
898                RegState::Define |
899                getDeadRegState(isDead) |
900                getUndefRegState(isUndef),
901                DstSubReg)
902        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
903    }
904  }
905  else if (Opc == ARM::VMOVD) {
906    unsigned Pred = MI->getOperand(2).getImm();
907    unsigned PredReg = MI->getOperand(3).getReg();
908    if (OpNum == 0) { // move -> store
909      unsigned SrcReg = MI->getOperand(1).getReg();
910      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
911      bool isKill = MI->getOperand(1).isKill();
912      bool isUndef = MI->getOperand(1).isUndef();
913      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
914        .addReg(SrcReg,
915                getKillRegState(isKill) | getUndefRegState(isUndef),
916                SrcSubReg)
917        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
918    } else {          // move -> load
919      unsigned DstReg = MI->getOperand(0).getReg();
920      unsigned DstSubReg = MI->getOperand(0).getSubReg();
921      bool isDead = MI->getOperand(0).isDead();
922      bool isUndef = MI->getOperand(0).isUndef();
923      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
924        .addReg(DstReg,
925                RegState::Define |
926                getDeadRegState(isDead) |
927                getUndefRegState(isUndef),
928                DstSubReg)
929        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
930    }
931  }
932
933  return NewMI;
934}
935
936MachineInstr*
937ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
938                                        MachineInstr* MI,
939                                        const SmallVectorImpl<unsigned> &Ops,
940                                        MachineInstr* LoadMI) const {
941  // FIXME
942  return 0;
943}
944
945bool
946ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
947                                   const SmallVectorImpl<unsigned> &Ops) const {
948  if (Ops.size() != 1) return false;
949
950  unsigned Opc = MI->getOpcode();
951  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
952    // If it is updating CPSR, then it cannot be folded.
953    return MI->getOperand(4).getReg() != ARM::CPSR ||
954      MI->getOperand(4).isDead();
955  } else if (Opc == ARM::tMOVgpr2gpr ||
956             Opc == ARM::tMOVtgpr2gpr ||
957             Opc == ARM::tMOVgpr2tgpr) {
958    return true;
959  } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD) {
960    return true;
961  } else if (Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
962    return false; // FIXME
963  }
964
965  return false;
966}
967
968/// Create a copy of a const pool value. Update CPI to the new index and return
969/// the label UID.
970static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
971  MachineConstantPool *MCP = MF.getConstantPool();
972  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
973
974  const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
975  assert(MCPE.isMachineConstantPoolEntry() &&
976         "Expecting a machine constantpool entry!");
977  ARMConstantPoolValue *ACPV =
978    static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
979
980  unsigned PCLabelId = AFI->createConstPoolEntryUId();
981  ARMConstantPoolValue *NewCPV = 0;
982  if (ACPV->isGlobalValue())
983    NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
984                                      ARMCP::CPValue, 4);
985  else if (ACPV->isExtSymbol())
986    NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
987                                      ACPV->getSymbol(), PCLabelId, 4);
988  else if (ACPV->isBlockAddress())
989    NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
990                                      ARMCP::CPBlockAddress, 4);
991  else
992    llvm_unreachable("Unexpected ARM constantpool value type!!");
993  CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
994  return PCLabelId;
995}
996
997void ARMBaseInstrInfo::
998reMaterialize(MachineBasicBlock &MBB,
999              MachineBasicBlock::iterator I,
1000              unsigned DestReg, unsigned SubIdx,
1001              const MachineInstr *Orig,
1002              const TargetRegisterInfo *TRI) const {
1003  if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
1004    DestReg = TRI->getSubReg(DestReg, SubIdx);
1005    SubIdx = 0;
1006  }
1007
1008  unsigned Opcode = Orig->getOpcode();
1009  switch (Opcode) {
1010  default: {
1011    MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1012    MI->getOperand(0).setReg(DestReg);
1013    MBB.insert(I, MI);
1014    break;
1015  }
1016  case ARM::tLDRpci_pic:
1017  case ARM::t2LDRpci_pic: {
1018    MachineFunction &MF = *MBB.getParent();
1019    unsigned CPI = Orig->getOperand(1).getIndex();
1020    unsigned PCLabelId = duplicateCPV(MF, CPI);
1021    MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
1022                                      DestReg)
1023      .addConstantPoolIndex(CPI).addImm(PCLabelId);
1024    (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
1025    break;
1026  }
1027  }
1028
1029  MachineInstr *NewMI = prior(I);
1030  NewMI->getOperand(0).setSubReg(SubIdx);
1031}
1032
1033MachineInstr *
1034ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
1035  MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
1036  switch(Orig->getOpcode()) {
1037  case ARM::tLDRpci_pic:
1038  case ARM::t2LDRpci_pic: {
1039    unsigned CPI = Orig->getOperand(1).getIndex();
1040    unsigned PCLabelId = duplicateCPV(MF, CPI);
1041    Orig->getOperand(1).setIndex(CPI);
1042    Orig->getOperand(2).setImm(PCLabelId);
1043    break;
1044  }
1045  }
1046  return MI;
1047}
1048
1049bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
1050                                        const MachineInstr *MI1) const {
1051  int Opcode = MI0->getOpcode();
1052  if (Opcode == ARM::t2LDRpci ||
1053      Opcode == ARM::t2LDRpci_pic ||
1054      Opcode == ARM::tLDRpci ||
1055      Opcode == ARM::tLDRpci_pic) {
1056    if (MI1->getOpcode() != Opcode)
1057      return false;
1058    if (MI0->getNumOperands() != MI1->getNumOperands())
1059      return false;
1060
1061    const MachineOperand &MO0 = MI0->getOperand(1);
1062    const MachineOperand &MO1 = MI1->getOperand(1);
1063    if (MO0.getOffset() != MO1.getOffset())
1064      return false;
1065
1066    const MachineFunction *MF = MI0->getParent()->getParent();
1067    const MachineConstantPool *MCP = MF->getConstantPool();
1068    int CPI0 = MO0.getIndex();
1069    int CPI1 = MO1.getIndex();
1070    const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1071    const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1072    ARMConstantPoolValue *ACPV0 =
1073      static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1074    ARMConstantPoolValue *ACPV1 =
1075      static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1076    return ACPV0->hasSameValue(ACPV1);
1077  }
1078
1079  return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1080}
1081
1082/// getInstrPredicate - If instruction is predicated, returns its predicate
1083/// condition, otherwise returns AL. It also returns the condition code
1084/// register by reference.
1085ARMCC::CondCodes
1086llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1087  int PIdx = MI->findFirstPredOperandIdx();
1088  if (PIdx == -1) {
1089    PredReg = 0;
1090    return ARMCC::AL;
1091  }
1092
1093  PredReg = MI->getOperand(PIdx+1).getReg();
1094  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1095}
1096
1097
1098int llvm::getMatchingCondBranchOpcode(int Opc) {
1099  if (Opc == ARM::B)
1100    return ARM::Bcc;
1101  else if (Opc == ARM::tB)
1102    return ARM::tBcc;
1103  else if (Opc == ARM::t2B)
1104      return ARM::t2Bcc;
1105
1106  llvm_unreachable("Unknown unconditional branch opcode!");
1107  return 0;
1108}
1109
1110
1111void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1112                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1113                               unsigned DestReg, unsigned BaseReg, int NumBytes,
1114                               ARMCC::CondCodes Pred, unsigned PredReg,
1115                               const ARMBaseInstrInfo &TII) {
1116  bool isSub = NumBytes < 0;
1117  if (isSub) NumBytes = -NumBytes;
1118
1119  while (NumBytes) {
1120    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1121    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1122    assert(ThisVal && "Didn't extract field correctly");
1123
1124    // We will handle these bits from offset, clear them.
1125    NumBytes &= ~ThisVal;
1126
1127    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1128
1129    // Build the new ADD / SUB.
1130    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1131    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1132      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1133      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
1134    BaseReg = DestReg;
1135  }
1136}
1137
1138bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1139                                unsigned FrameReg, int &Offset,
1140                                const ARMBaseInstrInfo &TII) {
1141  unsigned Opcode = MI.getOpcode();
1142  const TargetInstrDesc &Desc = MI.getDesc();
1143  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1144  bool isSub = false;
1145
1146  // Memory operands in inline assembly always use AddrMode2.
1147  if (Opcode == ARM::INLINEASM)
1148    AddrMode = ARMII::AddrMode2;
1149
1150  if (Opcode == ARM::ADDri) {
1151    Offset += MI.getOperand(FrameRegIdx+1).getImm();
1152    if (Offset == 0) {
1153      // Turn it into a move.
1154      MI.setDesc(TII.get(ARM::MOVr));
1155      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1156      MI.RemoveOperand(FrameRegIdx+1);
1157      Offset = 0;
1158      return true;
1159    } else if (Offset < 0) {
1160      Offset = -Offset;
1161      isSub = true;
1162      MI.setDesc(TII.get(ARM::SUBri));
1163    }
1164
1165    // Common case: small offset, fits into instruction.
1166    if (ARM_AM::getSOImmVal(Offset) != -1) {
1167      // Replace the FrameIndex with sp / fp
1168      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1169      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1170      Offset = 0;
1171      return true;
1172    }
1173
1174    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1175    // as possible.
1176    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1177    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1178
1179    // We will handle these bits from offset, clear them.
1180    Offset &= ~ThisImmVal;
1181
1182    // Get the properly encoded SOImmVal field.
1183    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1184           "Bit extraction didn't work?");
1185    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1186 } else {
1187    unsigned ImmIdx = 0;
1188    int InstrOffs = 0;
1189    unsigned NumBits = 0;
1190    unsigned Scale = 1;
1191    switch (AddrMode) {
1192    case ARMII::AddrMode2: {
1193      ImmIdx = FrameRegIdx+2;
1194      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1195      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1196        InstrOffs *= -1;
1197      NumBits = 12;
1198      break;
1199    }
1200    case ARMII::AddrMode3: {
1201      ImmIdx = FrameRegIdx+2;
1202      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1203      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1204        InstrOffs *= -1;
1205      NumBits = 8;
1206      break;
1207    }
1208    case ARMII::AddrMode4:
1209    case ARMII::AddrMode6:
1210      // Can't fold any offset even if it's zero.
1211      return false;
1212    case ARMII::AddrMode5: {
1213      ImmIdx = FrameRegIdx+1;
1214      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1215      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1216        InstrOffs *= -1;
1217      NumBits = 8;
1218      Scale = 4;
1219      break;
1220    }
1221    default:
1222      llvm_unreachable("Unsupported addressing mode!");
1223      break;
1224    }
1225
1226    Offset += InstrOffs * Scale;
1227    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1228    if (Offset < 0) {
1229      Offset = -Offset;
1230      isSub = true;
1231    }
1232
1233    // Attempt to fold address comp. if opcode has offset bits
1234    if (NumBits > 0) {
1235      // Common case: small offset, fits into instruction.
1236      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1237      int ImmedOffset = Offset / Scale;
1238      unsigned Mask = (1 << NumBits) - 1;
1239      if ((unsigned)Offset <= Mask * Scale) {
1240        // Replace the FrameIndex with sp
1241        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1242        if (isSub)
1243          ImmedOffset |= 1 << NumBits;
1244        ImmOp.ChangeToImmediate(ImmedOffset);
1245        Offset = 0;
1246        return true;
1247      }
1248
1249      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1250      ImmedOffset = ImmedOffset & Mask;
1251      if (isSub)
1252        ImmedOffset |= 1 << NumBits;
1253      ImmOp.ChangeToImmediate(ImmedOffset);
1254      Offset &= ~(Mask*Scale);
1255    }
1256  }
1257
1258  Offset = (isSub) ? -Offset : Offset;
1259  return Offset == 0;
1260}
1261