ARMBaseInstrInfo.cpp revision 207618
1//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMGenInstrInfo.inc"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMRegisterInfo.h"
21#include "llvm/Constants.h"
22#include "llvm/Function.h"
23#include "llvm/GlobalValue.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineJumpTableInfo.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36using namespace llvm;
37
38static cl::opt<bool>
39EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
40               cl::desc("Enable ARM 2-addr to 3-addr conv"));
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
43  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
44    Subtarget(STI) {
45}
46
47MachineInstr *
48ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
49                                        MachineBasicBlock::iterator &MBBI,
50                                        LiveVariables *LV) const {
51  // FIXME: Thumb2 support.
52
53  if (!EnableARM3Addr)
54    return NULL;
55
56  MachineInstr *MI = MBBI;
57  MachineFunction &MF = *MI->getParent()->getParent();
58  unsigned TSFlags = MI->getDesc().TSFlags;
59  bool isPre = false;
60  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
61  default: return NULL;
62  case ARMII::IndexModePre:
63    isPre = true;
64    break;
65  case ARMII::IndexModePost:
66    break;
67  }
68
69  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
70  // operation.
71  unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
72  if (MemOpc == 0)
73    return NULL;
74
75  MachineInstr *UpdateMI = NULL;
76  MachineInstr *MemMI = NULL;
77  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
78  const TargetInstrDesc &TID = MI->getDesc();
79  unsigned NumOps = TID.getNumOperands();
80  bool isLoad = !TID.mayStore();
81  const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
82  const MachineOperand &Base = MI->getOperand(2);
83  const MachineOperand &Offset = MI->getOperand(NumOps-3);
84  unsigned WBReg = WB.getReg();
85  unsigned BaseReg = Base.getReg();
86  unsigned OffReg = Offset.getReg();
87  unsigned OffImm = MI->getOperand(NumOps-2).getImm();
88  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
89  switch (AddrMode) {
90  default:
91    assert(false && "Unknown indexed op!");
92    return NULL;
93  case ARMII::AddrMode2: {
94    bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
95    unsigned Amt = ARM_AM::getAM2Offset(OffImm);
96    if (OffReg == 0) {
97      if (ARM_AM::getSOImmVal(Amt) == -1)
98        // Can't encode it in a so_imm operand. This transformation will
99        // add more than 1 instruction. Abandon!
100        return NULL;
101      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
102                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
103        .addReg(BaseReg).addImm(Amt)
104        .addImm(Pred).addReg(0).addReg(0);
105    } else if (Amt != 0) {
106      ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
107      unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
108      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
109                         get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
110        .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
111        .addImm(Pred).addReg(0).addReg(0);
112    } else
113      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
114                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
115        .addReg(BaseReg).addReg(OffReg)
116        .addImm(Pred).addReg(0).addReg(0);
117    break;
118  }
119  case ARMII::AddrMode3 : {
120    bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
121    unsigned Amt = ARM_AM::getAM3Offset(OffImm);
122    if (OffReg == 0)
123      // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
124      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
125                         get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
126        .addReg(BaseReg).addImm(Amt)
127        .addImm(Pred).addReg(0).addReg(0);
128    else
129      UpdateMI = BuildMI(MF, MI->getDebugLoc(),
130                         get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
131        .addReg(BaseReg).addReg(OffReg)
132        .addImm(Pred).addReg(0).addReg(0);
133    break;
134  }
135  }
136
137  std::vector<MachineInstr*> NewMIs;
138  if (isPre) {
139    if (isLoad)
140      MemMI = BuildMI(MF, MI->getDebugLoc(),
141                      get(MemOpc), MI->getOperand(0).getReg())
142        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
143    else
144      MemMI = BuildMI(MF, MI->getDebugLoc(),
145                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
146        .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
147    NewMIs.push_back(MemMI);
148    NewMIs.push_back(UpdateMI);
149  } else {
150    if (isLoad)
151      MemMI = BuildMI(MF, MI->getDebugLoc(),
152                      get(MemOpc), MI->getOperand(0).getReg())
153        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
154    else
155      MemMI = BuildMI(MF, MI->getDebugLoc(),
156                      get(MemOpc)).addReg(MI->getOperand(1).getReg())
157        .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
158    if (WB.isDead())
159      UpdateMI->getOperand(0).setIsDead();
160    NewMIs.push_back(UpdateMI);
161    NewMIs.push_back(MemMI);
162  }
163
164  // Transfer LiveVariables states, kill / dead info.
165  if (LV) {
166    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167      MachineOperand &MO = MI->getOperand(i);
168      if (MO.isReg() && MO.getReg() &&
169          TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
170        unsigned Reg = MO.getReg();
171
172        LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
173        if (MO.isDef()) {
174          MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
175          if (MO.isDead())
176            LV->addVirtualRegisterDead(Reg, NewMI);
177        }
178        if (MO.isUse() && MO.isKill()) {
179          for (unsigned j = 0; j < 2; ++j) {
180            // Look at the two new MI's in reverse order.
181            MachineInstr *NewMI = NewMIs[j];
182            if (!NewMI->readsRegister(Reg))
183              continue;
184            LV->addVirtualRegisterKilled(Reg, NewMI);
185            if (VI.removeKill(MI))
186              VI.Kills.push_back(NewMI);
187            break;
188          }
189        }
190      }
191    }
192  }
193
194  MFI->insert(MBBI, NewMIs[1]);
195  MFI->insert(MBBI, NewMIs[0]);
196  return NewMIs[0];
197}
198
199// Branch analysis.
200bool
201ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
202                                MachineBasicBlock *&FBB,
203                                SmallVectorImpl<MachineOperand> &Cond,
204                                bool AllowModify) const {
205  // If the block has no terminators, it just falls into the block after it.
206  MachineBasicBlock::iterator I = MBB.end();
207  if (I == MBB.begin())
208    return false;
209  --I;
210  while (I->isDebugValue()) {
211    if (I == MBB.begin())
212      return false;
213    --I;
214  }
215  if (!isUnpredicatedTerminator(I))
216    return false;
217
218  // Get the last instruction in the block.
219  MachineInstr *LastInst = I;
220
221  // If there is only one terminator instruction, process it.
222  unsigned LastOpc = LastInst->getOpcode();
223  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
224    if (isUncondBranchOpcode(LastOpc)) {
225      TBB = LastInst->getOperand(0).getMBB();
226      return false;
227    }
228    if (isCondBranchOpcode(LastOpc)) {
229      // Block ends with fall-through condbranch.
230      TBB = LastInst->getOperand(0).getMBB();
231      Cond.push_back(LastInst->getOperand(1));
232      Cond.push_back(LastInst->getOperand(2));
233      return false;
234    }
235    return true;  // Can't handle indirect branch.
236  }
237
238  // Get the instruction before it if it is a terminator.
239  MachineInstr *SecondLastInst = I;
240
241  // If there are three terminators, we don't know what sort of block this is.
242  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
243    return true;
244
245  // If the block ends with a B and a Bcc, handle it.
246  unsigned SecondLastOpc = SecondLastInst->getOpcode();
247  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
248    TBB =  SecondLastInst->getOperand(0).getMBB();
249    Cond.push_back(SecondLastInst->getOperand(1));
250    Cond.push_back(SecondLastInst->getOperand(2));
251    FBB = LastInst->getOperand(0).getMBB();
252    return false;
253  }
254
255  // If the block ends with two unconditional branches, handle it.  The second
256  // one is not executed, so remove it.
257  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
258    TBB = SecondLastInst->getOperand(0).getMBB();
259    I = LastInst;
260    if (AllowModify)
261      I->eraseFromParent();
262    return false;
263  }
264
265  // ...likewise if it ends with a branch table followed by an unconditional
266  // branch. The branch folder can create these, and we must get rid of them for
267  // correctness of Thumb constant islands.
268  if ((isJumpTableBranchOpcode(SecondLastOpc) ||
269       isIndirectBranchOpcode(SecondLastOpc)) &&
270      isUncondBranchOpcode(LastOpc)) {
271    I = LastInst;
272    if (AllowModify)
273      I->eraseFromParent();
274    return true;
275  }
276
277  // Otherwise, can't handle this.
278  return true;
279}
280
281
282unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
283  MachineBasicBlock::iterator I = MBB.end();
284  if (I == MBB.begin()) return 0;
285  --I;
286  while (I->isDebugValue()) {
287    if (I == MBB.begin())
288      return 0;
289    --I;
290  }
291  if (!isUncondBranchOpcode(I->getOpcode()) &&
292      !isCondBranchOpcode(I->getOpcode()))
293    return 0;
294
295  // Remove the branch.
296  I->eraseFromParent();
297
298  I = MBB.end();
299
300  if (I == MBB.begin()) return 1;
301  --I;
302  if (!isCondBranchOpcode(I->getOpcode()))
303    return 1;
304
305  // Remove the branch.
306  I->eraseFromParent();
307  return 2;
308}
309
310unsigned
311ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
312                               MachineBasicBlock *FBB,
313                             const SmallVectorImpl<MachineOperand> &Cond) const {
314  // FIXME this should probably have a DebugLoc argument
315  DebugLoc dl;
316
317  ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
318  int BOpc   = !AFI->isThumbFunction()
319    ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
320  int BccOpc = !AFI->isThumbFunction()
321    ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
322
323  // Shouldn't be a fall through.
324  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
325  assert((Cond.size() == 2 || Cond.size() == 0) &&
326         "ARM branch conditions have two components!");
327
328  if (FBB == 0) {
329    if (Cond.empty()) // Unconditional branch?
330      BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
331    else
332      BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
333        .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
334    return 1;
335  }
336
337  // Two-way conditional branch.
338  BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
339    .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
340  BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
341  return 2;
342}
343
344bool ARMBaseInstrInfo::
345ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
346  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
347  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
348  return false;
349}
350
351bool ARMBaseInstrInfo::
352PredicateInstruction(MachineInstr *MI,
353                     const SmallVectorImpl<MachineOperand> &Pred) const {
354  unsigned Opc = MI->getOpcode();
355  if (isUncondBranchOpcode(Opc)) {
356    MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
357    MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
358    MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
359    return true;
360  }
361
362  int PIdx = MI->findFirstPredOperandIdx();
363  if (PIdx != -1) {
364    MachineOperand &PMO = MI->getOperand(PIdx);
365    PMO.setImm(Pred[0].getImm());
366    MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
367    return true;
368  }
369  return false;
370}
371
372bool ARMBaseInstrInfo::
373SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
374                  const SmallVectorImpl<MachineOperand> &Pred2) const {
375  if (Pred1.size() > 2 || Pred2.size() > 2)
376    return false;
377
378  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
379  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
380  if (CC1 == CC2)
381    return true;
382
383  switch (CC1) {
384  default:
385    return false;
386  case ARMCC::AL:
387    return true;
388  case ARMCC::HS:
389    return CC2 == ARMCC::HI;
390  case ARMCC::LS:
391    return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
392  case ARMCC::GE:
393    return CC2 == ARMCC::GT;
394  case ARMCC::LE:
395    return CC2 == ARMCC::LT;
396  }
397}
398
399bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
400                                    std::vector<MachineOperand> &Pred) const {
401  // FIXME: This confuses implicit_def with optional CPSR def.
402  const TargetInstrDesc &TID = MI->getDesc();
403  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
404    return false;
405
406  bool Found = false;
407  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
408    const MachineOperand &MO = MI->getOperand(i);
409    if (MO.isReg() && MO.getReg() == ARM::CPSR) {
410      Pred.push_back(MO);
411      Found = true;
412    }
413  }
414
415  return Found;
416}
417
418/// isPredicable - Return true if the specified instruction can be predicated.
419/// By default, this returns true for every instruction with a
420/// PredicateOperand.
421bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
422  const TargetInstrDesc &TID = MI->getDesc();
423  if (!TID.isPredicable())
424    return false;
425
426  if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
427    ARMFunctionInfo *AFI =
428      MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
429    return AFI->isThumb2Function();
430  }
431  return true;
432}
433
434/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
435DISABLE_INLINE
436static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
437                                unsigned JTI);
438static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
439                                unsigned JTI) {
440  assert(JTI < JT.size());
441  return JT[JTI].MBBs.size();
442}
443
444/// GetInstSize - Return the size of the specified MachineInstr.
445///
446unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
447  const MachineBasicBlock &MBB = *MI->getParent();
448  const MachineFunction *MF = MBB.getParent();
449  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
450
451  // Basic size info comes from the TSFlags field.
452  const TargetInstrDesc &TID = MI->getDesc();
453  unsigned TSFlags = TID.TSFlags;
454
455  unsigned Opc = MI->getOpcode();
456  switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
457  default: {
458    // If this machine instr is an inline asm, measure it.
459    if (MI->getOpcode() == ARM::INLINEASM)
460      return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
461    if (MI->isLabel())
462      return 0;
463    switch (Opc) {
464    default:
465      llvm_unreachable("Unknown or unset size field for instr!");
466    case TargetOpcode::IMPLICIT_DEF:
467    case TargetOpcode::KILL:
468    case TargetOpcode::DBG_LABEL:
469    case TargetOpcode::EH_LABEL:
470    case TargetOpcode::DBG_VALUE:
471      return 0;
472    }
473    break;
474  }
475  case ARMII::Size8Bytes: return 8;          // ARM instruction x 2.
476  case ARMII::Size4Bytes: return 4;          // ARM / Thumb2 instruction.
477  case ARMII::Size2Bytes: return 2;          // Thumb1 instruction.
478  case ARMII::SizeSpecial: {
479    switch (Opc) {
480    case ARM::CONSTPOOL_ENTRY:
481      // If this machine instr is a constant pool entry, its size is recorded as
482      // operand #2.
483      return MI->getOperand(2).getImm();
484    case ARM::Int_eh_sjlj_setjmp:
485    case ARM::Int_eh_sjlj_setjmp_nofp:
486      return 24;
487    case ARM::tInt_eh_sjlj_setjmp:
488    case ARM::t2Int_eh_sjlj_setjmp:
489    case ARM::t2Int_eh_sjlj_setjmp_nofp:
490      return 14;
491    case ARM::BR_JTr:
492    case ARM::BR_JTm:
493    case ARM::BR_JTadd:
494    case ARM::tBR_JTr:
495    case ARM::t2BR_JT:
496    case ARM::t2TBB:
497    case ARM::t2TBH: {
498      // These are jumptable branches, i.e. a branch followed by an inlined
499      // jumptable. The size is 4 + 4 * number of entries. For TBB, each
500      // entry is one byte; TBH two byte each.
501      unsigned EntrySize = (Opc == ARM::t2TBB)
502        ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
503      unsigned NumOps = TID.getNumOperands();
504      MachineOperand JTOP =
505        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
506      unsigned JTI = JTOP.getIndex();
507      const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
508      assert(MJTI != 0);
509      const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
510      assert(JTI < JT.size());
511      // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
512      // 4 aligned. The assembler / linker may add 2 byte padding just before
513      // the JT entries.  The size does not include this padding; the
514      // constant islands pass does separate bookkeeping for it.
515      // FIXME: If we know the size of the function is less than (1 << 16) *2
516      // bytes, we can use 16-bit entries instead. Then there won't be an
517      // alignment issue.
518      unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
519      unsigned NumEntries = getNumJTEntries(JT, JTI);
520      if (Opc == ARM::t2TBB && (NumEntries & 1))
521        // Make sure the instruction that follows TBB is 2-byte aligned.
522        // FIXME: Constant island pass should insert an "ALIGN" instruction
523        // instead.
524        ++NumEntries;
525      return NumEntries * EntrySize + InstSize;
526    }
527    default:
528      // Otherwise, pseudo-instruction sizes are zero.
529      return 0;
530    }
531  }
532  }
533  return 0; // Not reached
534}
535
536/// Return true if the instruction is a register to register move and
537/// leave the source and dest operands in the passed parameters.
538///
539bool
540ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
541                              unsigned &SrcReg, unsigned &DstReg,
542                              unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
543  SrcSubIdx = DstSubIdx = 0; // No sub-registers.
544
545  switch (MI.getOpcode()) {
546  default: break;
547  case ARM::VMOVS:
548  case ARM::VMOVD:
549  case ARM::VMOVDneon:
550  case ARM::VMOVQ: {
551    SrcReg = MI.getOperand(1).getReg();
552    DstReg = MI.getOperand(0).getReg();
553    return true;
554  }
555  case ARM::MOVr:
556  case ARM::tMOVr:
557  case ARM::tMOVgpr2tgpr:
558  case ARM::tMOVtgpr2gpr:
559  case ARM::tMOVgpr2gpr:
560  case ARM::t2MOVr: {
561    assert(MI.getDesc().getNumOperands() >= 2 &&
562           MI.getOperand(0).isReg() &&
563           MI.getOperand(1).isReg() &&
564           "Invalid ARM MOV instruction");
565    SrcReg = MI.getOperand(1).getReg();
566    DstReg = MI.getOperand(0).getReg();
567    return true;
568  }
569  }
570
571  return false;
572}
573
574unsigned
575ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
576                                      int &FrameIndex) const {
577  switch (MI->getOpcode()) {
578  default: break;
579  case ARM::LDR:
580  case ARM::t2LDRs:  // FIXME: don't use t2LDRs to access frame.
581    if (MI->getOperand(1).isFI() &&
582        MI->getOperand(2).isReg() &&
583        MI->getOperand(3).isImm() &&
584        MI->getOperand(2).getReg() == 0 &&
585        MI->getOperand(3).getImm() == 0) {
586      FrameIndex = MI->getOperand(1).getIndex();
587      return MI->getOperand(0).getReg();
588    }
589    break;
590  case ARM::t2LDRi12:
591  case ARM::tRestore:
592    if (MI->getOperand(1).isFI() &&
593        MI->getOperand(2).isImm() &&
594        MI->getOperand(2).getImm() == 0) {
595      FrameIndex = MI->getOperand(1).getIndex();
596      return MI->getOperand(0).getReg();
597    }
598    break;
599  case ARM::VLDRD:
600  case ARM::VLDRS:
601    if (MI->getOperand(1).isFI() &&
602        MI->getOperand(2).isImm() &&
603        MI->getOperand(2).getImm() == 0) {
604      FrameIndex = MI->getOperand(1).getIndex();
605      return MI->getOperand(0).getReg();
606    }
607    break;
608  }
609
610  return 0;
611}
612
613unsigned
614ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
615                                     int &FrameIndex) const {
616  switch (MI->getOpcode()) {
617  default: break;
618  case ARM::STR:
619  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
620    if (MI->getOperand(1).isFI() &&
621        MI->getOperand(2).isReg() &&
622        MI->getOperand(3).isImm() &&
623        MI->getOperand(2).getReg() == 0 &&
624        MI->getOperand(3).getImm() == 0) {
625      FrameIndex = MI->getOperand(1).getIndex();
626      return MI->getOperand(0).getReg();
627    }
628    break;
629  case ARM::t2STRi12:
630  case ARM::tSpill:
631    if (MI->getOperand(1).isFI() &&
632        MI->getOperand(2).isImm() &&
633        MI->getOperand(2).getImm() == 0) {
634      FrameIndex = MI->getOperand(1).getIndex();
635      return MI->getOperand(0).getReg();
636    }
637    break;
638  case ARM::VSTRD:
639  case ARM::VSTRS:
640    if (MI->getOperand(1).isFI() &&
641        MI->getOperand(2).isImm() &&
642        MI->getOperand(2).getImm() == 0) {
643      FrameIndex = MI->getOperand(1).getIndex();
644      return MI->getOperand(0).getReg();
645    }
646    break;
647  }
648
649  return 0;
650}
651
652bool
653ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
654                               MachineBasicBlock::iterator I,
655                               unsigned DestReg, unsigned SrcReg,
656                               const TargetRegisterClass *DestRC,
657                               const TargetRegisterClass *SrcRC) const {
658  DebugLoc DL;
659  if (I != MBB.end()) DL = I->getDebugLoc();
660
661  // tGPR is used sometimes in ARM instructions that need to avoid using
662  // certain registers.  Just treat it as GPR here.
663  if (DestRC == ARM::tGPRRegisterClass)
664    DestRC = ARM::GPRRegisterClass;
665  if (SrcRC == ARM::tGPRRegisterClass)
666    SrcRC = ARM::GPRRegisterClass;
667
668  // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
669  if (DestRC == ARM::DPR_8RegisterClass)
670    DestRC = ARM::DPR_VFP2RegisterClass;
671  if (SrcRC == ARM::DPR_8RegisterClass)
672    SrcRC = ARM::DPR_VFP2RegisterClass;
673
674  // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
675  if (DestRC == ARM::QPR_VFP2RegisterClass ||
676      DestRC == ARM::QPR_8RegisterClass)
677    DestRC = ARM::QPRRegisterClass;
678  if (SrcRC == ARM::QPR_VFP2RegisterClass ||
679      SrcRC == ARM::QPR_8RegisterClass)
680    SrcRC = ARM::QPRRegisterClass;
681
682  // Disallow copies of unequal sizes.
683  if (DestRC != SrcRC && DestRC->getSize() != SrcRC->getSize())
684    return false;
685
686  if (DestRC == ARM::GPRRegisterClass) {
687    if (SrcRC == ARM::SPRRegisterClass)
688      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVRS), DestReg)
689                     .addReg(SrcReg));
690    else
691      AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
692                                          DestReg).addReg(SrcReg)));
693  } else {
694    unsigned Opc;
695
696    if (DestRC == ARM::SPRRegisterClass)
697      Opc = (SrcRC == ARM::GPRRegisterClass ? ARM::VMOVSR : ARM::VMOVS);
698    else if (DestRC == ARM::DPRRegisterClass)
699      Opc = ARM::VMOVD;
700    else if (DestRC == ARM::DPR_VFP2RegisterClass ||
701             SrcRC == ARM::DPR_VFP2RegisterClass)
702      // Always use neon reg-reg move if source or dest is NEON-only regclass.
703      Opc = ARM::VMOVDneon;
704    else if (DestRC == ARM::QPRRegisterClass)
705      Opc = ARM::VMOVQ;
706    else
707      return false;
708
709    AddDefaultPred(BuildMI(MBB, I, DL, get(Opc), DestReg)
710                   .addReg(SrcReg));
711  }
712
713  return true;
714}
715
716void ARMBaseInstrInfo::
717storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
718                    unsigned SrcReg, bool isKill, int FI,
719                    const TargetRegisterClass *RC) const {
720  DebugLoc DL;
721  if (I != MBB.end()) DL = I->getDebugLoc();
722  MachineFunction &MF = *MBB.getParent();
723  MachineFrameInfo &MFI = *MF.getFrameInfo();
724  unsigned Align = MFI.getObjectAlignment(FI);
725
726  MachineMemOperand *MMO =
727    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
728                            MachineMemOperand::MOStore, 0,
729                            MFI.getObjectSize(FI),
730                            Align);
731
732  // tGPR is used sometimes in ARM instructions that need to avoid using
733  // certain registers.  Just treat it as GPR here.
734  if (RC == ARM::tGPRRegisterClass)
735    RC = ARM::GPRRegisterClass;
736
737  if (RC == ARM::GPRRegisterClass) {
738    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
739                   .addReg(SrcReg, getKillRegState(isKill))
740                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
741  } else if (RC == ARM::DPRRegisterClass ||
742             RC == ARM::DPR_VFP2RegisterClass ||
743             RC == ARM::DPR_8RegisterClass) {
744    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
745                   .addReg(SrcReg, getKillRegState(isKill))
746                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
747  } else if (RC == ARM::SPRRegisterClass) {
748    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
749                   .addReg(SrcReg, getKillRegState(isKill))
750                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
751  } else {
752    assert((RC == ARM::QPRRegisterClass ||
753            RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
754    // FIXME: Neon instructions should support predicates
755    if (Align >= 16 && (getRegisterInfo().canRealignStack(MF))) {
756      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
757                     .addFrameIndex(FI).addImm(128)
758                     .addMemOperand(MMO)
759                     .addReg(SrcReg, getKillRegState(isKill)));
760    } else {
761      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQ)).
762                     addReg(SrcReg, getKillRegState(isKill))
763                     .addFrameIndex(FI)
764                     .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
765                     .addMemOperand(MMO));
766    }
767  }
768}
769
770void ARMBaseInstrInfo::
771loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
772                     unsigned DestReg, int FI,
773                     const TargetRegisterClass *RC) const {
774  DebugLoc DL;
775  if (I != MBB.end()) DL = I->getDebugLoc();
776  MachineFunction &MF = *MBB.getParent();
777  MachineFrameInfo &MFI = *MF.getFrameInfo();
778  unsigned Align = MFI.getObjectAlignment(FI);
779
780  MachineMemOperand *MMO =
781    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
782                            MachineMemOperand::MOLoad, 0,
783                            MFI.getObjectSize(FI),
784                            Align);
785
786  // tGPR is used sometimes in ARM instructions that need to avoid using
787  // certain registers.  Just treat it as GPR here.
788  if (RC == ARM::tGPRRegisterClass)
789    RC = ARM::GPRRegisterClass;
790
791  if (RC == ARM::GPRRegisterClass) {
792    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
793                   .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
794  } else if (RC == ARM::DPRRegisterClass ||
795             RC == ARM::DPR_VFP2RegisterClass ||
796             RC == ARM::DPR_8RegisterClass) {
797    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
798                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
799  } else if (RC == ARM::SPRRegisterClass) {
800    AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
801                   .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
802  } else {
803    assert((RC == ARM::QPRRegisterClass ||
804            RC == ARM::QPR_VFP2RegisterClass ||
805            RC == ARM::QPR_8RegisterClass) && "Unknown regclass!");
806    if (Align >= 16
807        && (getRegisterInfo().canRealignStack(MF))) {
808      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
809                     .addFrameIndex(FI).addImm(128)
810                     .addMemOperand(MMO));
811    } else {
812      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg)
813                     .addFrameIndex(FI)
814                     .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
815                     .addMemOperand(MMO));
816    }
817  }
818}
819
820MachineInstr*
821ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
822                                           int FrameIx, uint64_t Offset,
823                                           const MDNode *MDPtr,
824                                           DebugLoc DL) const {
825  MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
826    .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
827  return &*MIB;
828}
829
830MachineInstr *ARMBaseInstrInfo::
831foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
832                      const SmallVectorImpl<unsigned> &Ops, int FI) const {
833  if (Ops.size() != 1) return NULL;
834
835  unsigned OpNum = Ops[0];
836  unsigned Opc = MI->getOpcode();
837  MachineInstr *NewMI = NULL;
838  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
839    // If it is updating CPSR, then it cannot be folded.
840    if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
841      return NULL;
842    unsigned Pred = MI->getOperand(2).getImm();
843    unsigned PredReg = MI->getOperand(3).getReg();
844    if (OpNum == 0) { // move -> store
845      unsigned SrcReg = MI->getOperand(1).getReg();
846      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
847      bool isKill = MI->getOperand(1).isKill();
848      bool isUndef = MI->getOperand(1).isUndef();
849      if (Opc == ARM::MOVr)
850        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
851          .addReg(SrcReg,
852                  getKillRegState(isKill) | getUndefRegState(isUndef),
853                  SrcSubReg)
854          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
855      else // ARM::t2MOVr
856        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
857          .addReg(SrcReg,
858                  getKillRegState(isKill) | getUndefRegState(isUndef),
859                  SrcSubReg)
860          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
861    } else {          // move -> load
862      unsigned DstReg = MI->getOperand(0).getReg();
863      unsigned DstSubReg = MI->getOperand(0).getSubReg();
864      bool isDead = MI->getOperand(0).isDead();
865      bool isUndef = MI->getOperand(0).isUndef();
866      if (Opc == ARM::MOVr)
867        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
868          .addReg(DstReg,
869                  RegState::Define |
870                  getDeadRegState(isDead) |
871                  getUndefRegState(isUndef), DstSubReg)
872          .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
873      else // ARM::t2MOVr
874        NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
875          .addReg(DstReg,
876                  RegState::Define |
877                  getDeadRegState(isDead) |
878                  getUndefRegState(isUndef), DstSubReg)
879          .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
880    }
881  } else if (Opc == ARM::tMOVgpr2gpr ||
882             Opc == ARM::tMOVtgpr2gpr ||
883             Opc == ARM::tMOVgpr2tgpr) {
884    if (OpNum == 0) { // move -> store
885      unsigned SrcReg = MI->getOperand(1).getReg();
886      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
887      bool isKill = MI->getOperand(1).isKill();
888      bool isUndef = MI->getOperand(1).isUndef();
889      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
890        .addReg(SrcReg,
891                getKillRegState(isKill) | getUndefRegState(isUndef),
892                SrcSubReg)
893        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
894    } else {          // move -> load
895      unsigned DstReg = MI->getOperand(0).getReg();
896      unsigned DstSubReg = MI->getOperand(0).getSubReg();
897      bool isDead = MI->getOperand(0).isDead();
898      bool isUndef = MI->getOperand(0).isUndef();
899      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
900        .addReg(DstReg,
901                RegState::Define |
902                getDeadRegState(isDead) |
903                getUndefRegState(isUndef),
904                DstSubReg)
905        .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
906    }
907  } else if (Opc == ARM::VMOVS) {
908    unsigned Pred = MI->getOperand(2).getImm();
909    unsigned PredReg = MI->getOperand(3).getReg();
910    if (OpNum == 0) { // move -> store
911      unsigned SrcReg = MI->getOperand(1).getReg();
912      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
913      bool isKill = MI->getOperand(1).isKill();
914      bool isUndef = MI->getOperand(1).isUndef();
915      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
916        .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
917                SrcSubReg)
918        .addFrameIndex(FI)
919        .addImm(0).addImm(Pred).addReg(PredReg);
920    } else {          // move -> load
921      unsigned DstReg = MI->getOperand(0).getReg();
922      unsigned DstSubReg = MI->getOperand(0).getSubReg();
923      bool isDead = MI->getOperand(0).isDead();
924      bool isUndef = MI->getOperand(0).isUndef();
925      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
926        .addReg(DstReg,
927                RegState::Define |
928                getDeadRegState(isDead) |
929                getUndefRegState(isUndef),
930                DstSubReg)
931        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
932    }
933  }
934  else if (Opc == ARM::VMOVD) {
935    unsigned Pred = MI->getOperand(2).getImm();
936    unsigned PredReg = MI->getOperand(3).getReg();
937    if (OpNum == 0) { // move -> store
938      unsigned SrcReg = MI->getOperand(1).getReg();
939      unsigned SrcSubReg = MI->getOperand(1).getSubReg();
940      bool isKill = MI->getOperand(1).isKill();
941      bool isUndef = MI->getOperand(1).isUndef();
942      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
943        .addReg(SrcReg,
944                getKillRegState(isKill) | getUndefRegState(isUndef),
945                SrcSubReg)
946        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
947    } else {          // move -> load
948      unsigned DstReg = MI->getOperand(0).getReg();
949      unsigned DstSubReg = MI->getOperand(0).getSubReg();
950      bool isDead = MI->getOperand(0).isDead();
951      bool isUndef = MI->getOperand(0).isUndef();
952      NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
953        .addReg(DstReg,
954                RegState::Define |
955                getDeadRegState(isDead) |
956                getUndefRegState(isUndef),
957                DstSubReg)
958        .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
959    }
960  }
961
962  return NewMI;
963}
964
965MachineInstr*
966ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
967                                        MachineInstr* MI,
968                                        const SmallVectorImpl<unsigned> &Ops,
969                                        MachineInstr* LoadMI) const {
970  // FIXME
971  return 0;
972}
973
974bool
975ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
976                                   const SmallVectorImpl<unsigned> &Ops) const {
977  if (Ops.size() != 1) return false;
978
979  unsigned Opc = MI->getOpcode();
980  if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
981    // If it is updating CPSR, then it cannot be folded.
982    return MI->getOperand(4).getReg() != ARM::CPSR ||
983      MI->getOperand(4).isDead();
984  } else if (Opc == ARM::tMOVgpr2gpr ||
985             Opc == ARM::tMOVtgpr2gpr ||
986             Opc == ARM::tMOVgpr2tgpr) {
987    return true;
988  } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD) {
989    return true;
990  } else if (Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
991    return false; // FIXME
992  }
993
994  return false;
995}
996
997/// Create a copy of a const pool value. Update CPI to the new index and return
998/// the label UID.
999static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1000  MachineConstantPool *MCP = MF.getConstantPool();
1001  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1002
1003  const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1004  assert(MCPE.isMachineConstantPoolEntry() &&
1005         "Expecting a machine constantpool entry!");
1006  ARMConstantPoolValue *ACPV =
1007    static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1008
1009  unsigned PCLabelId = AFI->createConstPoolEntryUId();
1010  ARMConstantPoolValue *NewCPV = 0;
1011  if (ACPV->isGlobalValue())
1012    NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
1013                                      ARMCP::CPValue, 4);
1014  else if (ACPV->isExtSymbol())
1015    NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
1016                                      ACPV->getSymbol(), PCLabelId, 4);
1017  else if (ACPV->isBlockAddress())
1018    NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
1019                                      ARMCP::CPBlockAddress, 4);
1020  else
1021    llvm_unreachable("Unexpected ARM constantpool value type!!");
1022  CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
1023  return PCLabelId;
1024}
1025
1026void ARMBaseInstrInfo::
1027reMaterialize(MachineBasicBlock &MBB,
1028              MachineBasicBlock::iterator I,
1029              unsigned DestReg, unsigned SubIdx,
1030              const MachineInstr *Orig,
1031              const TargetRegisterInfo *TRI) const {
1032  if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
1033    DestReg = TRI->getSubReg(DestReg, SubIdx);
1034    SubIdx = 0;
1035  }
1036
1037  unsigned Opcode = Orig->getOpcode();
1038  switch (Opcode) {
1039  default: {
1040    MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1041    MI->getOperand(0).setReg(DestReg);
1042    MBB.insert(I, MI);
1043    break;
1044  }
1045  case ARM::tLDRpci_pic:
1046  case ARM::t2LDRpci_pic: {
1047    MachineFunction &MF = *MBB.getParent();
1048    unsigned CPI = Orig->getOperand(1).getIndex();
1049    unsigned PCLabelId = duplicateCPV(MF, CPI);
1050    MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
1051                                      DestReg)
1052      .addConstantPoolIndex(CPI).addImm(PCLabelId);
1053    (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
1054    break;
1055  }
1056  }
1057
1058  MachineInstr *NewMI = prior(I);
1059  NewMI->getOperand(0).setSubReg(SubIdx);
1060}
1061
1062MachineInstr *
1063ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
1064  MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
1065  switch(Orig->getOpcode()) {
1066  case ARM::tLDRpci_pic:
1067  case ARM::t2LDRpci_pic: {
1068    unsigned CPI = Orig->getOperand(1).getIndex();
1069    unsigned PCLabelId = duplicateCPV(MF, CPI);
1070    Orig->getOperand(1).setIndex(CPI);
1071    Orig->getOperand(2).setImm(PCLabelId);
1072    break;
1073  }
1074  }
1075  return MI;
1076}
1077
1078bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
1079                                        const MachineInstr *MI1) const {
1080  int Opcode = MI0->getOpcode();
1081  if (Opcode == ARM::t2LDRpci ||
1082      Opcode == ARM::t2LDRpci_pic ||
1083      Opcode == ARM::tLDRpci ||
1084      Opcode == ARM::tLDRpci_pic) {
1085    if (MI1->getOpcode() != Opcode)
1086      return false;
1087    if (MI0->getNumOperands() != MI1->getNumOperands())
1088      return false;
1089
1090    const MachineOperand &MO0 = MI0->getOperand(1);
1091    const MachineOperand &MO1 = MI1->getOperand(1);
1092    if (MO0.getOffset() != MO1.getOffset())
1093      return false;
1094
1095    const MachineFunction *MF = MI0->getParent()->getParent();
1096    const MachineConstantPool *MCP = MF->getConstantPool();
1097    int CPI0 = MO0.getIndex();
1098    int CPI1 = MO1.getIndex();
1099    const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1100    const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1101    ARMConstantPoolValue *ACPV0 =
1102      static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1103    ARMConstantPoolValue *ACPV1 =
1104      static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1105    return ACPV0->hasSameValue(ACPV1);
1106  }
1107
1108  return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1109}
1110
1111/// getInstrPredicate - If instruction is predicated, returns its predicate
1112/// condition, otherwise returns AL. It also returns the condition code
1113/// register by reference.
1114ARMCC::CondCodes
1115llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1116  int PIdx = MI->findFirstPredOperandIdx();
1117  if (PIdx == -1) {
1118    PredReg = 0;
1119    return ARMCC::AL;
1120  }
1121
1122  PredReg = MI->getOperand(PIdx+1).getReg();
1123  return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1124}
1125
1126
1127int llvm::getMatchingCondBranchOpcode(int Opc) {
1128  if (Opc == ARM::B)
1129    return ARM::Bcc;
1130  else if (Opc == ARM::tB)
1131    return ARM::tBcc;
1132  else if (Opc == ARM::t2B)
1133      return ARM::t2Bcc;
1134
1135  llvm_unreachable("Unknown unconditional branch opcode!");
1136  return 0;
1137}
1138
1139
1140void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1141                               MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1142                               unsigned DestReg, unsigned BaseReg, int NumBytes,
1143                               ARMCC::CondCodes Pred, unsigned PredReg,
1144                               const ARMBaseInstrInfo &TII) {
1145  bool isSub = NumBytes < 0;
1146  if (isSub) NumBytes = -NumBytes;
1147
1148  while (NumBytes) {
1149    unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1150    unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1151    assert(ThisVal && "Didn't extract field correctly");
1152
1153    // We will handle these bits from offset, clear them.
1154    NumBytes &= ~ThisVal;
1155
1156    assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1157
1158    // Build the new ADD / SUB.
1159    unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1160    BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1161      .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1162      .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
1163    BaseReg = DestReg;
1164  }
1165}
1166
1167bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1168                                unsigned FrameReg, int &Offset,
1169                                const ARMBaseInstrInfo &TII) {
1170  unsigned Opcode = MI.getOpcode();
1171  const TargetInstrDesc &Desc = MI.getDesc();
1172  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1173  bool isSub = false;
1174
1175  // Memory operands in inline assembly always use AddrMode2.
1176  if (Opcode == ARM::INLINEASM)
1177    AddrMode = ARMII::AddrMode2;
1178
1179  if (Opcode == ARM::ADDri) {
1180    Offset += MI.getOperand(FrameRegIdx+1).getImm();
1181    if (Offset == 0) {
1182      // Turn it into a move.
1183      MI.setDesc(TII.get(ARM::MOVr));
1184      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1185      MI.RemoveOperand(FrameRegIdx+1);
1186      Offset = 0;
1187      return true;
1188    } else if (Offset < 0) {
1189      Offset = -Offset;
1190      isSub = true;
1191      MI.setDesc(TII.get(ARM::SUBri));
1192    }
1193
1194    // Common case: small offset, fits into instruction.
1195    if (ARM_AM::getSOImmVal(Offset) != -1) {
1196      // Replace the FrameIndex with sp / fp
1197      MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1198      MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1199      Offset = 0;
1200      return true;
1201    }
1202
1203    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1204    // as possible.
1205    unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1206    unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1207
1208    // We will handle these bits from offset, clear them.
1209    Offset &= ~ThisImmVal;
1210
1211    // Get the properly encoded SOImmVal field.
1212    assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1213           "Bit extraction didn't work?");
1214    MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1215 } else {
1216    unsigned ImmIdx = 0;
1217    int InstrOffs = 0;
1218    unsigned NumBits = 0;
1219    unsigned Scale = 1;
1220    switch (AddrMode) {
1221    case ARMII::AddrMode2: {
1222      ImmIdx = FrameRegIdx+2;
1223      InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1224      if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1225        InstrOffs *= -1;
1226      NumBits = 12;
1227      break;
1228    }
1229    case ARMII::AddrMode3: {
1230      ImmIdx = FrameRegIdx+2;
1231      InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1232      if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1233        InstrOffs *= -1;
1234      NumBits = 8;
1235      break;
1236    }
1237    case ARMII::AddrMode4:
1238    case ARMII::AddrMode6:
1239      // Can't fold any offset even if it's zero.
1240      return false;
1241    case ARMII::AddrMode5: {
1242      ImmIdx = FrameRegIdx+1;
1243      InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1244      if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1245        InstrOffs *= -1;
1246      NumBits = 8;
1247      Scale = 4;
1248      break;
1249    }
1250    default:
1251      llvm_unreachable("Unsupported addressing mode!");
1252      break;
1253    }
1254
1255    Offset += InstrOffs * Scale;
1256    assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1257    if (Offset < 0) {
1258      Offset = -Offset;
1259      isSub = true;
1260    }
1261
1262    // Attempt to fold address comp. if opcode has offset bits
1263    if (NumBits > 0) {
1264      // Common case: small offset, fits into instruction.
1265      MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1266      int ImmedOffset = Offset / Scale;
1267      unsigned Mask = (1 << NumBits) - 1;
1268      if ((unsigned)Offset <= Mask * Scale) {
1269        // Replace the FrameIndex with sp
1270        MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1271        if (isSub)
1272          ImmedOffset |= 1 << NumBits;
1273        ImmOp.ChangeToImmediate(ImmedOffset);
1274        Offset = 0;
1275        return true;
1276      }
1277
1278      // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1279      ImmedOffset = ImmedOffset & Mask;
1280      if (isSub)
1281        ImmedOffset |= 1 << NumBits;
1282      ImmOp.ChangeToImmediate(ImmedOffset);
1283      Offset &= ~(Mask*Scale);
1284    }
1285  }
1286
1287  Offset = (isSub) ? -Offset : Offset;
1288  return Offset == 0;
1289}
1290