Deleted Added
full compact
1//===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Thumb-2 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Thumb2InstrInfo.h"
15#include "ARM.h"
16#include "ARMConstantPoolValue.h"
17#include "ARMAddressingModes.h"
18#include "ARMGenInstrInfo.inc"
19#include "ARMMachineFunctionInfo.h"
20#include "Thumb2HazardRecognizer.h"
21#include "Thumb2InstrInfo.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/PseudoSourceValue.h"
26#include "llvm/ADT/SmallVector.h"
25#include "Thumb2InstrInfo.h"
27#include "llvm/Support/CommandLine.h"
28
29using namespace llvm;
30
31static cl::opt<unsigned>
32IfCvtLimit("thumb2-ifcvt-limit", cl::Hidden,
33 cl::desc("Thumb2 if-conversion limit (default 3)"),
34 cl::init(3));
35
36static cl::opt<unsigned>
37IfCvtDiamondLimit("thumb2-ifcvt-diamond-limit", cl::Hidden,
38 cl::desc("Thumb2 diamond if-conversion limit (default 3)"),
39 cl::init(3));
40
41Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
42 : ARMBaseInstrInfo(STI), RI(*this, STI) {
43}
44
45unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
46 // FIXME
47 return 0;
48}
49
38bool
39Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
40 MachineBasicBlock::iterator I,
41 unsigned DestReg, unsigned SrcReg,
42 const TargetRegisterClass *DestRC,
43 const TargetRegisterClass *SrcRC,
44 DebugLoc DL) const {
45 if (DestRC == ARM::GPRRegisterClass) {
46 if (SrcRC == ARM::GPRRegisterClass) {
47 BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
48 return true;
49 } else if (SrcRC == ARM::tGPRRegisterClass) {
50 BuildMI(MBB, I, DL, get(ARM::tMOVtgpr2gpr), DestReg).addReg(SrcReg);
51 return true;
50void
51Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
52 MachineBasicBlock *NewDest) const {
53 MachineBasicBlock *MBB = Tail->getParent();
54 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
55 if (!AFI->hasITBlocks()) {
56 TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
57 return;
58 }
59
60 // If the first instruction of Tail is predicated, we may have to update
61 // the IT instruction.
62 unsigned PredReg = 0;
63 ARMCC::CondCodes CC = llvm::getInstrPredicate(Tail, PredReg);
64 MachineBasicBlock::iterator MBBI = Tail;
65 if (CC != ARMCC::AL)
66 // Expecting at least the t2IT instruction before it.
67 --MBBI;
68
69 // Actually replace the tail.
70 TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
71
72 // Fix up IT.
73 if (CC != ARMCC::AL) {
74 MachineBasicBlock::iterator E = MBB->begin();
75 unsigned Count = 4; // At most 4 instructions in an IT block.
76 while (Count && MBBI != E) {
77 if (MBBI->isDebugValue()) {
78 --MBBI;
79 continue;
80 }
81 if (MBBI->getOpcode() == ARM::t2IT) {
82 unsigned Mask = MBBI->getOperand(1).getImm();
83 if (Count == 4)
84 MBBI->eraseFromParent();
85 else {
86 unsigned MaskOn = 1 << Count;
87 unsigned MaskOff = ~(MaskOn - 1);
88 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
89 }
90 return;
91 }
92 --MBBI;
93 --Count;
94 }
53 } else if (DestRC == ARM::tGPRRegisterClass) {
54 if (SrcRC == ARM::GPRRegisterClass) {
55 BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
56 return true;
57 } else if (SrcRC == ARM::tGPRRegisterClass) {
58 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
59 return true;
60 }
95
96 // Ctrl flow can reach here if branch folding is run before IT block
97 // formation pass.
98 }
99}
100
101bool
102Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
103 MachineBasicBlock::iterator MBBI) const {
104 unsigned PredReg = 0;
105 return llvm::getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
106}
107
108bool Thumb2InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
109 unsigned NumInstrs) const {
110 return NumInstrs && NumInstrs <= IfCvtLimit;
111}
112
113bool Thumb2InstrInfo::
114isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
115 MachineBasicBlock &FMBB, unsigned NumF) const {
116 // FIXME: Catch optimization such as:
117 // r0 = movne
118 // r0 = moveq
119 return NumT && NumF &&
120 NumT <= (IfCvtDiamondLimit) && NumF <= (IfCvtDiamondLimit);
121}
122
123void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
124 MachineBasicBlock::iterator I, DebugLoc DL,
125 unsigned DestReg, unsigned SrcReg,
126 bool KillSrc) const {
127 // Handle SPR, DPR, and QPR copies.
64 return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC, DL);
128 if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
129 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
130
131 bool tDest = ARM::tGPRRegClass.contains(DestReg);
132 bool tSrc = ARM::tGPRRegClass.contains(SrcReg);
133 unsigned Opc = ARM::tMOVgpr2gpr;
134 if (tDest && tSrc)
135 Opc = ARM::tMOVr;
136 else if (tSrc)
137 Opc = ARM::tMOVtgpr2gpr;
138 else if (tDest)
139 Opc = ARM::tMOVgpr2tgpr;
140
141 BuildMI(MBB, I, DL, get(Opc), DestReg)
142 .addReg(SrcReg, getKillRegState(KillSrc));
143}
144
145void Thumb2InstrInfo::
146storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
147 unsigned SrcReg, bool isKill, int FI,
148 const TargetRegisterClass *RC,
149 const TargetRegisterInfo *TRI) const {
72 if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
150 if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
151 RC == ARM::tcGPRRegisterClass) {
152 DebugLoc DL;
153 if (I != MBB.end()) DL = I->getDebugLoc();
154
155 MachineFunction &MF = *MBB.getParent();
156 MachineFrameInfo &MFI = *MF.getFrameInfo();
157 MachineMemOperand *MMO =
158 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
159 MachineMemOperand::MOStore, 0,
160 MFI.getObjectSize(FI),
161 MFI.getObjectAlignment(FI));
162 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12))
163 .addReg(SrcReg, getKillRegState(isKill))
164 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
165 return;
166 }
167
168 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI);
169}
170
171void Thumb2InstrInfo::
172loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
173 unsigned DestReg, int FI,
174 const TargetRegisterClass *RC,
175 const TargetRegisterInfo *TRI) const {
97 if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
176 if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
177 RC == ARM::tcGPRRegisterClass) {
178 DebugLoc DL;
179 if (I != MBB.end()) DL = I->getDebugLoc();
180
181 MachineFunction &MF = *MBB.getParent();
182 MachineFrameInfo &MFI = *MF.getFrameInfo();
183 MachineMemOperand *MMO =
184 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
185 MachineMemOperand::MOLoad, 0,
186 MFI.getObjectSize(FI),
187 MFI.getObjectAlignment(FI));
188 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
189 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
190 return;
191 }
192
193 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
194}
195
196ScheduleHazardRecognizer *Thumb2InstrInfo::
197CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
198 return (ScheduleHazardRecognizer *)new Thumb2HazardRecognizer(II);
199}
200
201void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
202 MachineBasicBlock::iterator &MBBI, DebugLoc dl,
203 unsigned DestReg, unsigned BaseReg, int NumBytes,
204 ARMCC::CondCodes Pred, unsigned PredReg,
205 const ARMBaseInstrInfo &TII) {
206 bool isSub = NumBytes < 0;
207 if (isSub) NumBytes = -NumBytes;
208
209 // If profitable, use a movw or movt to materialize the offset.
210 // FIXME: Use the scavenger to grab a scratch register.
211 if (DestReg != ARM::SP && DestReg != BaseReg &&
212 NumBytes >= 4096 &&
213 ARM_AM::getT2SOImmVal(NumBytes) == -1) {
214 bool Fits = false;
215 if (NumBytes < 65536) {
216 // Use a movw to materialize the 16-bit constant.
217 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
218 .addImm(NumBytes)
134 .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
219 .addImm((unsigned)Pred).addReg(PredReg);
220 Fits = true;
221 } else if ((NumBytes & 0xffff) == 0) {
222 // Use a movt to materialize the 32-bit constant.
223 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
224 .addReg(DestReg)
225 .addImm(NumBytes >> 16)
141 .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
226 .addImm((unsigned)Pred).addReg(PredReg);
227 Fits = true;
228 }
229
230 if (Fits) {
231 if (isSub) {
232 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
233 .addReg(BaseReg, RegState::Kill)
234 .addReg(DestReg, RegState::Kill)
235 .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
236 } else {
237 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
238 .addReg(DestReg, RegState::Kill)
239 .addReg(BaseReg, RegState::Kill)
240 .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
241 }
242 return;
243 }
244 }
245
246 while (NumBytes) {
247 unsigned ThisVal = NumBytes;
248 unsigned Opc = 0;
249 if (DestReg == ARM::SP && BaseReg != ARM::SP) {
250 // mov sp, rn. Note t2MOVr cannot be used.
251 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr),DestReg).addReg(BaseReg);
252 BaseReg = ARM::SP;
253 continue;
254 }
255
256 bool HasCCOut = true;
257 if (BaseReg == ARM::SP) {
258 // sub sp, sp, #imm7
259 if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) {
260 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
261 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
262 // FIXME: Fix Thumb1 immediate encoding.
263 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
264 .addReg(BaseReg).addImm(ThisVal/4);
265 NumBytes = 0;
266 continue;
267 }
268
269 // sub rd, sp, so_imm
270 Opc = isSub ? ARM::t2SUBrSPi : ARM::t2ADDrSPi;
271 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
272 NumBytes = 0;
273 } else {
274 // FIXME: Move this to ARMAddressingModes.h?
275 unsigned RotAmt = CountLeadingZeros_32(ThisVal);
276 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
277 NumBytes &= ~ThisVal;
278 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
279 "Bit extraction didn't work?");
280 }
281 } else {
282 assert(DestReg != ARM::SP && BaseReg != ARM::SP);
283 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri;
284 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
285 NumBytes = 0;
286 } else if (ThisVal < 4096) {
287 Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12;
288 HasCCOut = false;
289 NumBytes = 0;
290 } else {
291 // FIXME: Move this to ARMAddressingModes.h?
292 unsigned RotAmt = CountLeadingZeros_32(ThisVal);
293 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
294 NumBytes &= ~ThisVal;
295 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
296 "Bit extraction didn't work?");
297 }
298 }
299
300 // Build the new ADD / SUB.
301 MachineInstrBuilder MIB =
302 AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
303 .addReg(BaseReg, RegState::Kill)
304 .addImm(ThisVal));
305 if (HasCCOut)
306 AddDefaultCC(MIB);
307
308 BaseReg = DestReg;
309 }
310}
311
312static unsigned
313negativeOffsetOpcode(unsigned opcode)
314{
315 switch (opcode) {
316 case ARM::t2LDRi12: return ARM::t2LDRi8;
317 case ARM::t2LDRHi12: return ARM::t2LDRHi8;
318 case ARM::t2LDRBi12: return ARM::t2LDRBi8;
319 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
320 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
321 case ARM::t2STRi12: return ARM::t2STRi8;
322 case ARM::t2STRBi12: return ARM::t2STRBi8;
323 case ARM::t2STRHi12: return ARM::t2STRHi8;
324
325 case ARM::t2LDRi8:
326 case ARM::t2LDRHi8:
327 case ARM::t2LDRBi8:
328 case ARM::t2LDRSHi8:
329 case ARM::t2LDRSBi8:
330 case ARM::t2STRi8:
331 case ARM::t2STRBi8:
332 case ARM::t2STRHi8:
333 return opcode;
334
335 default:
336 break;
337 }
338
339 return 0;
340}
341
342static unsigned
343positiveOffsetOpcode(unsigned opcode)
344{
345 switch (opcode) {
346 case ARM::t2LDRi8: return ARM::t2LDRi12;
347 case ARM::t2LDRHi8: return ARM::t2LDRHi12;
348 case ARM::t2LDRBi8: return ARM::t2LDRBi12;
349 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
350 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
351 case ARM::t2STRi8: return ARM::t2STRi12;
352 case ARM::t2STRBi8: return ARM::t2STRBi12;
353 case ARM::t2STRHi8: return ARM::t2STRHi12;
354
355 case ARM::t2LDRi12:
356 case ARM::t2LDRHi12:
357 case ARM::t2LDRBi12:
358 case ARM::t2LDRSHi12:
359 case ARM::t2LDRSBi12:
360 case ARM::t2STRi12:
361 case ARM::t2STRBi12:
362 case ARM::t2STRHi12:
363 return opcode;
364
365 default:
366 break;
367 }
368
369 return 0;
370}
371
372static unsigned
373immediateOffsetOpcode(unsigned opcode)
374{
375 switch (opcode) {
376 case ARM::t2LDRs: return ARM::t2LDRi12;
377 case ARM::t2LDRHs: return ARM::t2LDRHi12;
378 case ARM::t2LDRBs: return ARM::t2LDRBi12;
379 case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
380 case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
381 case ARM::t2STRs: return ARM::t2STRi12;
382 case ARM::t2STRBs: return ARM::t2STRBi12;
383 case ARM::t2STRHs: return ARM::t2STRHi12;
384
385 case ARM::t2LDRi12:
386 case ARM::t2LDRHi12:
387 case ARM::t2LDRBi12:
388 case ARM::t2LDRSHi12:
389 case ARM::t2LDRSBi12:
390 case ARM::t2STRi12:
391 case ARM::t2STRBi12:
392 case ARM::t2STRHi12:
393 case ARM::t2LDRi8:
394 case ARM::t2LDRHi8:
395 case ARM::t2LDRBi8:
396 case ARM::t2LDRSHi8:
397 case ARM::t2LDRSBi8:
398 case ARM::t2STRi8:
399 case ARM::t2STRBi8:
400 case ARM::t2STRHi8:
401 return opcode;
402
403 default:
404 break;
405 }
406
407 return 0;
408}
409
410bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
411 unsigned FrameReg, int &Offset,
412 const ARMBaseInstrInfo &TII) {
413 unsigned Opcode = MI.getOpcode();
414 const TargetInstrDesc &Desc = MI.getDesc();
415 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
416 bool isSub = false;
417
418 // Memory operands in inline assembly always use AddrModeT2_i12.
419 if (Opcode == ARM::INLINEASM)
420 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
421
422 if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
423 Offset += MI.getOperand(FrameRegIdx+1).getImm();
424
425 unsigned PredReg;
426 if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) {
427 // Turn it into a move.
428 MI.setDesc(TII.get(ARM::tMOVgpr2gpr));
429 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
430 // Remove offset and remaining explicit predicate operands.
431 do MI.RemoveOperand(FrameRegIdx+1);
432 while (MI.getNumOperands() > FrameRegIdx+1 &&
433 (!MI.getOperand(FrameRegIdx+1).isReg() ||
434 !MI.getOperand(FrameRegIdx+1).isImm()));
435 return true;
436 }
437
438 bool isSP = FrameReg == ARM::SP;
439 bool HasCCOut = Opcode != ARM::t2ADDri12;
440
441 if (Offset < 0) {
442 Offset = -Offset;
443 isSub = true;
444 MI.setDesc(TII.get(isSP ? ARM::t2SUBrSPi : ARM::t2SUBri));
445 } else {
446 MI.setDesc(TII.get(isSP ? ARM::t2ADDrSPi : ARM::t2ADDri));
447 }
448
449 // Common case: small offset, fits into instruction.
450 if (ARM_AM::getT2SOImmVal(Offset) != -1) {
451 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
452 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
453 // Add cc_out operand if the original instruction did not have one.
454 if (!HasCCOut)
455 MI.addOperand(MachineOperand::CreateReg(0, false));
456 Offset = 0;
457 return true;
458 }
459 // Another common case: imm12.
460 if (Offset < 4096 &&
461 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
462 unsigned NewOpc = isSP
463 ? (isSub ? ARM::t2SUBrSPi12 : ARM::t2ADDrSPi12)
464 : (isSub ? ARM::t2SUBri12 : ARM::t2ADDri12);
465 MI.setDesc(TII.get(NewOpc));
466 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
467 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
468 // Remove the cc_out operand.
469 if (HasCCOut)
470 MI.RemoveOperand(MI.getNumOperands()-1);
471 Offset = 0;
472 return true;
473 }
474
475 // Otherwise, extract 8 adjacent bits from the immediate into this
476 // t2ADDri/t2SUBri.
477 unsigned RotAmt = CountLeadingZeros_32(Offset);
478 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
479
480 // We will handle these bits from offset, clear them.
481 Offset &= ~ThisImmVal;
482
483 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
484 "Bit extraction didn't work?");
485 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
486 // Add cc_out operand if the original instruction did not have one.
487 if (!HasCCOut)
488 MI.addOperand(MachineOperand::CreateReg(0, false));
489
490 } else {
491
492 // AddrMode4 and AddrMode6 cannot handle any offset.
493 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
494 return false;
495
496 // AddrModeT2_so cannot handle any offset. If there is no offset
497 // register then we change to an immediate version.
498 unsigned NewOpc = Opcode;
499 if (AddrMode == ARMII::AddrModeT2_so) {
500 unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg();
501 if (OffsetReg != 0) {
502 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
503 return Offset == 0;
504 }
505
506 MI.RemoveOperand(FrameRegIdx+1);
507 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
508 NewOpc = immediateOffsetOpcode(Opcode);
509 AddrMode = ARMII::AddrModeT2_i12;
510 }
511
512 unsigned NumBits = 0;
513 unsigned Scale = 1;
514 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) {
515 // i8 supports only negative, and i12 supports only positive, so
516 // based on Offset sign convert Opcode to the appropriate
517 // instruction
518 Offset += MI.getOperand(FrameRegIdx+1).getImm();
519 if (Offset < 0) {
520 NewOpc = negativeOffsetOpcode(Opcode);
521 NumBits = 8;
522 isSub = true;
523 Offset = -Offset;
524 } else {
525 NewOpc = positiveOffsetOpcode(Opcode);
526 NumBits = 12;
527 }
528 } else if (AddrMode == ARMII::AddrMode5) {
529 // VFP address mode.
530 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
531 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
532 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
533 InstrOffs *= -1;
534 NumBits = 8;
535 Scale = 4;
536 Offset += InstrOffs * 4;
537 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
538 if (Offset < 0) {
539 Offset = -Offset;
540 isSub = true;
541 }
542 } else {
543 llvm_unreachable("Unsupported addressing mode!");
544 }
545
546 if (NewOpc != Opcode)
547 MI.setDesc(TII.get(NewOpc));
548
549 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
550
551 // Attempt to fold address computation
552 // Common case: small offset, fits into instruction.
553 int ImmedOffset = Offset / Scale;
554 unsigned Mask = (1 << NumBits) - 1;
555 if ((unsigned)Offset <= Mask * Scale) {
556 // Replace the FrameIndex with fp/sp
557 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
558 if (isSub) {
559 if (AddrMode == ARMII::AddrMode5)
560 // FIXME: Not consistent.
561 ImmedOffset |= 1 << NumBits;
562 else
563 ImmedOffset = -ImmedOffset;
564 }
565 ImmOp.ChangeToImmediate(ImmedOffset);
566 Offset = 0;
567 return true;
568 }
569
570 // Otherwise, offset doesn't fit. Pull in what we can to simplify
571 ImmedOffset = ImmedOffset & Mask;
572 if (isSub) {
573 if (AddrMode == ARMII::AddrMode5)
574 // FIXME: Not consistent.
575 ImmedOffset |= 1 << NumBits;
576 else {
577 ImmedOffset = -ImmedOffset;
578 if (ImmedOffset == 0)
579 // Change the opcode back if the encoded offset is zero.
580 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
581 }
582 }
583 ImmOp.ChangeToImmediate(ImmedOffset);
584 Offset &= ~(Mask*Scale);
585 }
586
587 Offset = (isSub) ? -Offset : Offset;
588 return Offset == 0;
589}
590
591/// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
592/// two-addrss instruction inserted by two-address pass.
593void
594Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
595 MachineInstr *UseMI,
596 const TargetRegisterInfo &TRI) const {
597 if (SrcMI->getOpcode() != ARM::tMOVgpr2gpr ||
598 SrcMI->getOperand(1).isKill())
599 return;
600
601 unsigned PredReg = 0;
602 ARMCC::CondCodes CC = llvm::getInstrPredicate(UseMI, PredReg);
603 if (CC == ARMCC::AL || PredReg != ARM::CPSR)
604 return;
605
606 // Schedule the copy so it doesn't come between previous instructions
607 // and UseMI which can form an IT block.
608 unsigned SrcReg = SrcMI->getOperand(1).getReg();
609 ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
610 MachineBasicBlock *MBB = UseMI->getParent();
611 MachineBasicBlock::iterator MBBI = SrcMI;
612 unsigned NumInsts = 0;
613 while (--MBBI != MBB->begin()) {
614 if (MBBI->isDebugValue())
615 continue;
616
617 MachineInstr *NMI = &*MBBI;
618 ARMCC::CondCodes NCC = llvm::getInstrPredicate(NMI, PredReg);
619 if (!(NCC == CC || NCC == OCC) ||
620 NMI->modifiesRegister(SrcReg, &TRI) ||
621 NMI->definesRegister(ARM::CPSR))
622 break;
623 if (++NumInsts == 4)
624 // Too many in a row!
625 return;
626 }
627
628 if (NumInsts) {
629 MBB->remove(SrcMI);
630 MBB->insert(++MBBI, SrcMI);
631 }
632}
633
634ARMCC::CondCodes
635llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
636 unsigned Opc = MI->getOpcode();
637 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
638 return ARMCC::AL;
639 return llvm::getInstrPredicate(MI, PredReg);
640}