ARMAsmParser.cpp revision 234353
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_508s4Neg() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = -CE->getValue();
587    // explicitly exclude zero. we want that to use the normal 0_508 version.
588    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
589  }
590  bool isImm0_255() const {
591    if (!isImm()) return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = CE->getValue();
595    return Value >= 0 && Value < 256;
596  }
597  bool isImm0_4095() const {
598    if (!isImm()) return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 4096;
603  }
604  bool isImm0_4095Neg() const {
605    if (!isImm()) return false;
606    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
607    if (!CE) return false;
608    int64_t Value = -CE->getValue();
609    return Value > 0 && Value < 4096;
610  }
611  bool isImm0_1() const {
612    if (!isImm()) return false;
613    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
614    if (!CE) return false;
615    int64_t Value = CE->getValue();
616    return Value >= 0 && Value < 2;
617  }
618  bool isImm0_3() const {
619    if (!isImm()) return false;
620    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
621    if (!CE) return false;
622    int64_t Value = CE->getValue();
623    return Value >= 0 && Value < 4;
624  }
625  bool isImm0_7() const {
626    if (!isImm()) return false;
627    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
628    if (!CE) return false;
629    int64_t Value = CE->getValue();
630    return Value >= 0 && Value < 8;
631  }
632  bool isImm0_15() const {
633    if (!isImm()) return false;
634    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
635    if (!CE) return false;
636    int64_t Value = CE->getValue();
637    return Value >= 0 && Value < 16;
638  }
639  bool isImm0_31() const {
640    if (!isImm()) return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value >= 0 && Value < 32;
645  }
646  bool isImm0_63() const {
647    if (!isImm()) return false;
648    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
649    if (!CE) return false;
650    int64_t Value = CE->getValue();
651    return Value >= 0 && Value < 64;
652  }
653  bool isImm8() const {
654    if (!isImm()) return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value == 8;
659  }
660  bool isImm16() const {
661    if (!isImm()) return false;
662    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
663    if (!CE) return false;
664    int64_t Value = CE->getValue();
665    return Value == 16;
666  }
667  bool isImm32() const {
668    if (!isImm()) return false;
669    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
670    if (!CE) return false;
671    int64_t Value = CE->getValue();
672    return Value == 32;
673  }
674  bool isShrImm8() const {
675    if (!isImm()) return false;
676    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677    if (!CE) return false;
678    int64_t Value = CE->getValue();
679    return Value > 0 && Value <= 8;
680  }
681  bool isShrImm16() const {
682    if (!isImm()) return false;
683    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
684    if (!CE) return false;
685    int64_t Value = CE->getValue();
686    return Value > 0 && Value <= 16;
687  }
688  bool isShrImm32() const {
689    if (!isImm()) return false;
690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
691    if (!CE) return false;
692    int64_t Value = CE->getValue();
693    return Value > 0 && Value <= 32;
694  }
695  bool isShrImm64() const {
696    if (!isImm()) return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value <= 64;
701  }
702  bool isImm1_7() const {
703    if (!isImm()) return false;
704    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
705    if (!CE) return false;
706    int64_t Value = CE->getValue();
707    return Value > 0 && Value < 8;
708  }
709  bool isImm1_15() const {
710    if (!isImm()) return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 16;
715  }
716  bool isImm1_31() const {
717    if (!isImm()) return false;
718    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
719    if (!CE) return false;
720    int64_t Value = CE->getValue();
721    return Value > 0 && Value < 32;
722  }
723  bool isImm1_16() const {
724    if (!isImm()) return false;
725    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
726    if (!CE) return false;
727    int64_t Value = CE->getValue();
728    return Value > 0 && Value < 17;
729  }
730  bool isImm1_32() const {
731    if (!isImm()) return false;
732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
733    if (!CE) return false;
734    int64_t Value = CE->getValue();
735    return Value > 0 && Value < 33;
736  }
737  bool isImm0_32() const {
738    if (!isImm()) return false;
739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740    if (!CE) return false;
741    int64_t Value = CE->getValue();
742    return Value >= 0 && Value < 33;
743  }
744  bool isImm0_65535() const {
745    if (!isImm()) return false;
746    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747    if (!CE) return false;
748    int64_t Value = CE->getValue();
749    return Value >= 0 && Value < 65536;
750  }
751  bool isImm0_65535Expr() const {
752    if (!isImm()) return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    // If it's not a constant expression, it'll generate a fixup and be
755    // handled later.
756    if (!CE) return true;
757    int64_t Value = CE->getValue();
758    return Value >= 0 && Value < 65536;
759  }
760  bool isImm24bit() const {
761    if (!isImm()) return false;
762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
763    if (!CE) return false;
764    int64_t Value = CE->getValue();
765    return Value >= 0 && Value <= 0xffffff;
766  }
767  bool isImmThumbSR() const {
768    if (!isImm()) return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value > 0 && Value < 33;
773  }
774  bool isPKHLSLImm() const {
775    if (!isImm()) return false;
776    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
777    if (!CE) return false;
778    int64_t Value = CE->getValue();
779    return Value >= 0 && Value < 32;
780  }
781  bool isPKHASRImm() const {
782    if (!isImm()) return false;
783    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
784    if (!CE) return false;
785    int64_t Value = CE->getValue();
786    return Value > 0 && Value <= 32;
787  }
788  bool isARMSOImm() const {
789    if (!isImm()) return false;
790    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
791    if (!CE) return false;
792    int64_t Value = CE->getValue();
793    return ARM_AM::getSOImmVal(Value) != -1;
794  }
795  bool isARMSOImmNot() const {
796    if (!isImm()) return false;
797    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
798    if (!CE) return false;
799    int64_t Value = CE->getValue();
800    return ARM_AM::getSOImmVal(~Value) != -1;
801  }
802  bool isARMSOImmNeg() const {
803    if (!isImm()) return false;
804    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
805    if (!CE) return false;
806    int64_t Value = CE->getValue();
807    // Only use this when not representable as a plain so_imm.
808    return ARM_AM::getSOImmVal(Value) == -1 &&
809      ARM_AM::getSOImmVal(-Value) != -1;
810  }
811  bool isT2SOImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return ARM_AM::getT2SOImmVal(Value) != -1;
817  }
818  bool isT2SOImmNot() const {
819    if (!isImm()) return false;
820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821    if (!CE) return false;
822    int64_t Value = CE->getValue();
823    return ARM_AM::getT2SOImmVal(~Value) != -1;
824  }
825  bool isT2SOImmNeg() const {
826    if (!isImm()) return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    // Only use this when not representable as a plain so_imm.
831    return ARM_AM::getT2SOImmVal(Value) == -1 &&
832      ARM_AM::getT2SOImmVal(-Value) != -1;
833  }
834  bool isSetEndImm() const {
835    if (!isImm()) return false;
836    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837    if (!CE) return false;
838    int64_t Value = CE->getValue();
839    return Value == 1 || Value == 0;
840  }
841  bool isReg() const { return Kind == k_Register; }
842  bool isRegList() const { return Kind == k_RegisterList; }
843  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
844  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
845  bool isToken() const { return Kind == k_Token; }
846  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
847  bool isMemory() const { return Kind == k_Memory; }
848  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
849  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
850  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
851  bool isRotImm() const { return Kind == k_RotateImmediate; }
852  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
853  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
854  bool isPostIdxReg() const {
855    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
856  }
857  bool isMemNoOffset(bool alignOK = false) const {
858    if (!isMemory())
859      return false;
860    // No offset of any kind.
861    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
862     (alignOK || Memory.Alignment == 0);
863  }
864  bool isMemPCRelImm12() const {
865    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
866      return false;
867    // Base register must be PC.
868    if (Memory.BaseRegNum != ARM::PC)
869      return false;
870    // Immediate offset in range [-4095, 4095].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
874  }
875  bool isAlignedMemory() const {
876    return isMemNoOffset(true);
877  }
878  bool isAddrMode2() const {
879    if (!isMemory() || Memory.Alignment != 0) return false;
880    // Check for register offset.
881    if (Memory.OffsetRegNum) return true;
882    // Immediate offset in range [-4095, 4095].
883    if (!Memory.OffsetImm) return true;
884    int64_t Val = Memory.OffsetImm->getValue();
885    return Val > -4096 && Val < 4096;
886  }
887  bool isAM2OffsetImm() const {
888    if (!isImm()) return false;
889    // Immediate offset in range [-4095, 4095].
890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
891    if (!CE) return false;
892    int64_t Val = CE->getValue();
893    return Val > -4096 && Val < 4096;
894  }
895  bool isAddrMode3() const {
896    // If we have an immediate that's not a constant, treat it as a label
897    // reference needing a fixup. If it is a constant, it's something else
898    // and we reject it.
899    if (isImm() && !isa<MCConstantExpr>(getImm()))
900      return true;
901    if (!isMemory() || Memory.Alignment != 0) return false;
902    // No shifts are legal for AM3.
903    if (Memory.ShiftType != ARM_AM::no_shift) return false;
904    // Check for register offset.
905    if (Memory.OffsetRegNum) return true;
906    // Immediate offset in range [-255, 255].
907    if (!Memory.OffsetImm) return true;
908    int64_t Val = Memory.OffsetImm->getValue();
909    return Val > -256 && Val < 256;
910  }
911  bool isAM3Offset() const {
912    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
913      return false;
914    if (Kind == k_PostIndexRegister)
915      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
916    // Immediate offset in range [-255, 255].
917    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
918    if (!CE) return false;
919    int64_t Val = CE->getValue();
920    // Special case, #-0 is INT32_MIN.
921    return (Val > -256 && Val < 256) || Val == INT32_MIN;
922  }
923  bool isAddrMode5() const {
924    // If we have an immediate that's not a constant, treat it as a label
925    // reference needing a fixup. If it is a constant, it's something else
926    // and we reject it.
927    if (isImm() && !isa<MCConstantExpr>(getImm()))
928      return true;
929    if (!isMemory() || Memory.Alignment != 0) return false;
930    // Check for register offset.
931    if (Memory.OffsetRegNum) return false;
932    // Immediate offset in range [-1020, 1020] and a multiple of 4.
933    if (!Memory.OffsetImm) return true;
934    int64_t Val = Memory.OffsetImm->getValue();
935    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
936      Val == INT32_MIN;
937  }
938  bool isMemTBB() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
940        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
941      return false;
942    return true;
943  }
944  bool isMemTBH() const {
945    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
946        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
947        Memory.Alignment != 0 )
948      return false;
949    return true;
950  }
951  bool isMemRegOffset() const {
952    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
953      return false;
954    return true;
955  }
956  bool isT2MemRegOffset() const {
957    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
958        Memory.Alignment != 0)
959      return false;
960    // Only lsl #{0, 1, 2, 3} allowed.
961    if (Memory.ShiftType == ARM_AM::no_shift)
962      return true;
963    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
964      return false;
965    return true;
966  }
967  bool isMemThumbRR() const {
968    // Thumb reg+reg addressing is simple. Just two registers, a base and
969    // an offset. No shifts, negations or any other complicating factors.
970    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
971        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
972      return false;
973    return isARMLowRegister(Memory.BaseRegNum) &&
974      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
975  }
976  bool isMemThumbRIs4() const {
977    if (!isMemory() || Memory.OffsetRegNum != 0 ||
978        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
979      return false;
980    // Immediate offset, multiple of 4 in range [0, 124].
981    if (!Memory.OffsetImm) return true;
982    int64_t Val = Memory.OffsetImm->getValue();
983    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
984  }
985  bool isMemThumbRIs2() const {
986    if (!isMemory() || Memory.OffsetRegNum != 0 ||
987        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
988      return false;
989    // Immediate offset, multiple of 4 in range [0, 62].
990    if (!Memory.OffsetImm) return true;
991    int64_t Val = Memory.OffsetImm->getValue();
992    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
993  }
994  bool isMemThumbRIs1() const {
995    if (!isMemory() || Memory.OffsetRegNum != 0 ||
996        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
997      return false;
998    // Immediate offset in range [0, 31].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 31;
1002  }
1003  bool isMemThumbSPI() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1005        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1006      return false;
1007    // Immediate offset, multiple of 4 in range [0, 1020].
1008    if (!Memory.OffsetImm) return true;
1009    int64_t Val = Memory.OffsetImm->getValue();
1010    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1011  }
1012  bool isMemImm8s4Offset() const {
1013    // If we have an immediate that's not a constant, treat it as a label
1014    // reference needing a fixup. If it is a constant, it's something else
1015    // and we reject it.
1016    if (isImm() && !isa<MCConstantExpr>(getImm()))
1017      return true;
1018    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1019      return false;
1020    // Immediate offset a multiple of 4 in range [-1020, 1020].
1021    if (!Memory.OffsetImm) return true;
1022    int64_t Val = Memory.OffsetImm->getValue();
1023    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1024  }
1025  bool isMemImm0_1020s4Offset() const {
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Immediate offset a multiple of 4 in range [0, 1020].
1029    if (!Memory.OffsetImm) return true;
1030    int64_t Val = Memory.OffsetImm->getValue();
1031    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1032  }
1033  bool isMemImm8Offset() const {
1034    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1035      return false;
1036    // Base reg of PC isn't allowed for these encodings.
1037    if (Memory.BaseRegNum == ARM::PC) return false;
1038    // Immediate offset in range [-255, 255].
1039    if (!Memory.OffsetImm) return true;
1040    int64_t Val = Memory.OffsetImm->getValue();
1041    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1042  }
1043  bool isMemPosImm8Offset() const {
1044    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1045      return false;
1046    // Immediate offset in range [0, 255].
1047    if (!Memory.OffsetImm) return true;
1048    int64_t Val = Memory.OffsetImm->getValue();
1049    return Val >= 0 && Val < 256;
1050  }
1051  bool isMemNegImm8Offset() const {
1052    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053      return false;
1054    // Base reg of PC isn't allowed for these encodings.
1055    if (Memory.BaseRegNum == ARM::PC) return false;
1056    // Immediate offset in range [-255, -1].
1057    if (!Memory.OffsetImm) return false;
1058    int64_t Val = Memory.OffsetImm->getValue();
1059    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1060  }
1061  bool isMemUImm12Offset() const {
1062    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1063      return false;
1064    // Immediate offset in range [0, 4095].
1065    if (!Memory.OffsetImm) return true;
1066    int64_t Val = Memory.OffsetImm->getValue();
1067    return (Val >= 0 && Val < 4096);
1068  }
1069  bool isMemImm12Offset() const {
1070    // If we have an immediate that's not a constant, treat it as a label
1071    // reference needing a fixup. If it is a constant, it's something else
1072    // and we reject it.
1073    if (isImm() && !isa<MCConstantExpr>(getImm()))
1074      return true;
1075
1076    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1077      return false;
1078    // Immediate offset in range [-4095, 4095].
1079    if (!Memory.OffsetImm) return true;
1080    int64_t Val = Memory.OffsetImm->getValue();
1081    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1082  }
1083  bool isPostIdxImm8() const {
1084    if (!isImm()) return false;
1085    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1086    if (!CE) return false;
1087    int64_t Val = CE->getValue();
1088    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1089  }
1090  bool isPostIdxImm8s4() const {
1091    if (!isImm()) return false;
1092    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1093    if (!CE) return false;
1094    int64_t Val = CE->getValue();
1095    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1096      (Val == INT32_MIN);
1097  }
1098
1099  bool isMSRMask() const { return Kind == k_MSRMask; }
1100  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1101
1102  // NEON operands.
1103  bool isSingleSpacedVectorList() const {
1104    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1105  }
1106  bool isDoubleSpacedVectorList() const {
1107    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1108  }
1109  bool isVecListOneD() const {
1110    if (!isSingleSpacedVectorList()) return false;
1111    return VectorList.Count == 1;
1112  }
1113
1114  bool isVecListDPair() const {
1115    if (!isSingleSpacedVectorList()) return false;
1116    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1117              .contains(VectorList.RegNum));
1118  }
1119
1120  bool isVecListThreeD() const {
1121    if (!isSingleSpacedVectorList()) return false;
1122    return VectorList.Count == 3;
1123  }
1124
1125  bool isVecListFourD() const {
1126    if (!isSingleSpacedVectorList()) return false;
1127    return VectorList.Count == 4;
1128  }
1129
1130  bool isVecListDPairSpaced() const {
1131    if (isSingleSpacedVectorList()) return false;
1132    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1133              .contains(VectorList.RegNum));
1134  }
1135
1136  bool isVecListThreeQ() const {
1137    if (!isDoubleSpacedVectorList()) return false;
1138    return VectorList.Count == 3;
1139  }
1140
1141  bool isVecListFourQ() const {
1142    if (!isDoubleSpacedVectorList()) return false;
1143    return VectorList.Count == 4;
1144  }
1145
1146  bool isSingleSpacedVectorAllLanes() const {
1147    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1148  }
1149  bool isDoubleSpacedVectorAllLanes() const {
1150    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1151  }
1152  bool isVecListOneDAllLanes() const {
1153    if (!isSingleSpacedVectorAllLanes()) return false;
1154    return VectorList.Count == 1;
1155  }
1156
1157  bool isVecListDPairAllLanes() const {
1158    if (!isSingleSpacedVectorAllLanes()) return false;
1159    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1160              .contains(VectorList.RegNum));
1161  }
1162
1163  bool isVecListDPairSpacedAllLanes() const {
1164    if (!isDoubleSpacedVectorAllLanes()) return false;
1165    return VectorList.Count == 2;
1166  }
1167
1168  bool isVecListThreeDAllLanes() const {
1169    if (!isSingleSpacedVectorAllLanes()) return false;
1170    return VectorList.Count == 3;
1171  }
1172
1173  bool isVecListThreeQAllLanes() const {
1174    if (!isDoubleSpacedVectorAllLanes()) return false;
1175    return VectorList.Count == 3;
1176  }
1177
1178  bool isVecListFourDAllLanes() const {
1179    if (!isSingleSpacedVectorAllLanes()) return false;
1180    return VectorList.Count == 4;
1181  }
1182
1183  bool isVecListFourQAllLanes() const {
1184    if (!isDoubleSpacedVectorAllLanes()) return false;
1185    return VectorList.Count == 4;
1186  }
1187
1188  bool isSingleSpacedVectorIndexed() const {
1189    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1190  }
1191  bool isDoubleSpacedVectorIndexed() const {
1192    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1193  }
1194  bool isVecListOneDByteIndexed() const {
1195    if (!isSingleSpacedVectorIndexed()) return false;
1196    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1197  }
1198
1199  bool isVecListOneDHWordIndexed() const {
1200    if (!isSingleSpacedVectorIndexed()) return false;
1201    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1202  }
1203
1204  bool isVecListOneDWordIndexed() const {
1205    if (!isSingleSpacedVectorIndexed()) return false;
1206    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1207  }
1208
1209  bool isVecListTwoDByteIndexed() const {
1210    if (!isSingleSpacedVectorIndexed()) return false;
1211    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1212  }
1213
1214  bool isVecListTwoDHWordIndexed() const {
1215    if (!isSingleSpacedVectorIndexed()) return false;
1216    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1217  }
1218
1219  bool isVecListTwoQWordIndexed() const {
1220    if (!isDoubleSpacedVectorIndexed()) return false;
1221    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1222  }
1223
1224  bool isVecListTwoQHWordIndexed() const {
1225    if (!isDoubleSpacedVectorIndexed()) return false;
1226    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1227  }
1228
1229  bool isVecListTwoDWordIndexed() const {
1230    if (!isSingleSpacedVectorIndexed()) return false;
1231    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1232  }
1233
1234  bool isVecListThreeDByteIndexed() const {
1235    if (!isSingleSpacedVectorIndexed()) return false;
1236    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1237  }
1238
1239  bool isVecListThreeDHWordIndexed() const {
1240    if (!isSingleSpacedVectorIndexed()) return false;
1241    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1242  }
1243
1244  bool isVecListThreeQWordIndexed() const {
1245    if (!isDoubleSpacedVectorIndexed()) return false;
1246    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1247  }
1248
1249  bool isVecListThreeQHWordIndexed() const {
1250    if (!isDoubleSpacedVectorIndexed()) return false;
1251    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1252  }
1253
1254  bool isVecListThreeDWordIndexed() const {
1255    if (!isSingleSpacedVectorIndexed()) return false;
1256    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1257  }
1258
1259  bool isVecListFourDByteIndexed() const {
1260    if (!isSingleSpacedVectorIndexed()) return false;
1261    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1262  }
1263
1264  bool isVecListFourDHWordIndexed() const {
1265    if (!isSingleSpacedVectorIndexed()) return false;
1266    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1267  }
1268
1269  bool isVecListFourQWordIndexed() const {
1270    if (!isDoubleSpacedVectorIndexed()) return false;
1271    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1272  }
1273
1274  bool isVecListFourQHWordIndexed() const {
1275    if (!isDoubleSpacedVectorIndexed()) return false;
1276    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1277  }
1278
1279  bool isVecListFourDWordIndexed() const {
1280    if (!isSingleSpacedVectorIndexed()) return false;
1281    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1282  }
1283
1284  bool isVectorIndex8() const {
1285    if (Kind != k_VectorIndex) return false;
1286    return VectorIndex.Val < 8;
1287  }
1288  bool isVectorIndex16() const {
1289    if (Kind != k_VectorIndex) return false;
1290    return VectorIndex.Val < 4;
1291  }
1292  bool isVectorIndex32() const {
1293    if (Kind != k_VectorIndex) return false;
1294    return VectorIndex.Val < 2;
1295  }
1296
1297  bool isNEONi8splat() const {
1298    if (!isImm()) return false;
1299    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1300    // Must be a constant.
1301    if (!CE) return false;
1302    int64_t Value = CE->getValue();
1303    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1304    // value.
1305    return Value >= 0 && Value < 256;
1306  }
1307
1308  bool isNEONi16splat() const {
1309    if (!isImm()) return false;
1310    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311    // Must be a constant.
1312    if (!CE) return false;
1313    int64_t Value = CE->getValue();
1314    // i16 value in the range [0,255] or [0x0100, 0xff00]
1315    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1316  }
1317
1318  bool isNEONi32splat() const {
1319    if (!isImm()) return false;
1320    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1321    // Must be a constant.
1322    if (!CE) return false;
1323    int64_t Value = CE->getValue();
1324    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1325    return (Value >= 0 && Value < 256) ||
1326      (Value >= 0x0100 && Value <= 0xff00) ||
1327      (Value >= 0x010000 && Value <= 0xff0000) ||
1328      (Value >= 0x01000000 && Value <= 0xff000000);
1329  }
1330
1331  bool isNEONi32vmov() const {
1332    if (!isImm()) return false;
1333    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1334    // Must be a constant.
1335    if (!CE) return false;
1336    int64_t Value = CE->getValue();
1337    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1338    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1339    return (Value >= 0 && Value < 256) ||
1340      (Value >= 0x0100 && Value <= 0xff00) ||
1341      (Value >= 0x010000 && Value <= 0xff0000) ||
1342      (Value >= 0x01000000 && Value <= 0xff000000) ||
1343      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1344      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1345  }
1346  bool isNEONi32vmovNeg() const {
1347    if (!isImm()) return false;
1348    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1349    // Must be a constant.
1350    if (!CE) return false;
1351    int64_t Value = ~CE->getValue();
1352    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1353    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1354    return (Value >= 0 && Value < 256) ||
1355      (Value >= 0x0100 && Value <= 0xff00) ||
1356      (Value >= 0x010000 && Value <= 0xff0000) ||
1357      (Value >= 0x01000000 && Value <= 0xff000000) ||
1358      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1359      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1360  }
1361
1362  bool isNEONi64splat() const {
1363    if (!isImm()) return false;
1364    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1365    // Must be a constant.
1366    if (!CE) return false;
1367    uint64_t Value = CE->getValue();
1368    // i64 value with each byte being either 0 or 0xff.
1369    for (unsigned i = 0; i < 8; ++i)
1370      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1371    return true;
1372  }
1373
1374  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1375    // Add as immediates when possible.  Null MCExpr = 0.
1376    if (Expr == 0)
1377      Inst.addOperand(MCOperand::CreateImm(0));
1378    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1379      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1380    else
1381      Inst.addOperand(MCOperand::CreateExpr(Expr));
1382  }
1383
1384  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1385    assert(N == 2 && "Invalid number of operands!");
1386    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1387    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1388    Inst.addOperand(MCOperand::CreateReg(RegNum));
1389  }
1390
1391  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1392    assert(N == 1 && "Invalid number of operands!");
1393    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1394  }
1395
1396  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1397    assert(N == 1 && "Invalid number of operands!");
1398    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1399  }
1400
1401  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1402    assert(N == 1 && "Invalid number of operands!");
1403    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1404  }
1405
1406  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1407    assert(N == 1 && "Invalid number of operands!");
1408    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1409  }
1410
1411  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1412    assert(N == 1 && "Invalid number of operands!");
1413    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1414  }
1415
1416  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1417    assert(N == 1 && "Invalid number of operands!");
1418    Inst.addOperand(MCOperand::CreateReg(getReg()));
1419  }
1420
1421  void addRegOperands(MCInst &Inst, unsigned N) const {
1422    assert(N == 1 && "Invalid number of operands!");
1423    Inst.addOperand(MCOperand::CreateReg(getReg()));
1424  }
1425
1426  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1427    assert(N == 3 && "Invalid number of operands!");
1428    assert(isRegShiftedReg() &&
1429           "addRegShiftedRegOperands() on non RegShiftedReg!");
1430    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1431    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1432    Inst.addOperand(MCOperand::CreateImm(
1433      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1434  }
1435
1436  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1437    assert(N == 2 && "Invalid number of operands!");
1438    assert(isRegShiftedImm() &&
1439           "addRegShiftedImmOperands() on non RegShiftedImm!");
1440    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1441    Inst.addOperand(MCOperand::CreateImm(
1442      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1443  }
1444
1445  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1446    assert(N == 1 && "Invalid number of operands!");
1447    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1448                                         ShifterImm.Imm));
1449  }
1450
1451  void addRegListOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    const SmallVectorImpl<unsigned> &RegList = getRegList();
1454    for (SmallVectorImpl<unsigned>::const_iterator
1455           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1456      Inst.addOperand(MCOperand::CreateReg(*I));
1457  }
1458
1459  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1460    addRegListOperands(Inst, N);
1461  }
1462
1463  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1464    addRegListOperands(Inst, N);
1465  }
1466
1467  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1470    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1471  }
1472
1473  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    // Munge the lsb/width into a bitfield mask.
1476    unsigned lsb = Bitfield.LSB;
1477    unsigned width = Bitfield.Width;
1478    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1479    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1480                      (32 - (lsb + width)));
1481    Inst.addOperand(MCOperand::CreateImm(Mask));
1482  }
1483
1484  void addImmOperands(MCInst &Inst, unsigned N) const {
1485    assert(N == 1 && "Invalid number of operands!");
1486    addExpr(Inst, getImm());
1487  }
1488
1489  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1490    assert(N == 1 && "Invalid number of operands!");
1491    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1492    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1493  }
1494
1495  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1498    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1499  }
1500
1501  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1502    assert(N == 1 && "Invalid number of operands!");
1503    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1504    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1505    Inst.addOperand(MCOperand::CreateImm(Val));
1506  }
1507
1508  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1509    assert(N == 1 && "Invalid number of operands!");
1510    // FIXME: We really want to scale the value here, but the LDRD/STRD
1511    // instruction don't encode operands that way yet.
1512    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1513    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1514  }
1515
1516  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1517    assert(N == 1 && "Invalid number of operands!");
1518    // The immediate is scaled by four in the encoding and is stored
1519    // in the MCInst as such. Lop off the low two bits here.
1520    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1521    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1522  }
1523
1524  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // The immediate is scaled by four in the encoding and is stored
1527    // in the MCInst as such. Lop off the low two bits here.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1530  }
1531
1532  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1533    assert(N == 1 && "Invalid number of operands!");
1534    // The immediate is scaled by four in the encoding and is stored
1535    // in the MCInst as such. Lop off the low two bits here.
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1538  }
1539
1540  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1541    assert(N == 1 && "Invalid number of operands!");
1542    // The constant encodes as the immediate-1, and we store in the instruction
1543    // the bits as encoded, so subtract off one here.
1544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1545    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1546  }
1547
1548  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    // The constant encodes as the immediate-1, and we store in the instruction
1551    // the bits as encoded, so subtract off one here.
1552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1553    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1554  }
1555
1556  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1557    assert(N == 1 && "Invalid number of operands!");
1558    // The constant encodes as the immediate, except for 32, which encodes as
1559    // zero.
1560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1561    unsigned Imm = CE->getValue();
1562    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1563  }
1564
1565  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1568    // the instruction as well.
1569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1570    int Val = CE->getValue();
1571    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1572  }
1573
1574  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1575    assert(N == 1 && "Invalid number of operands!");
1576    // The operand is actually a t2_so_imm, but we have its bitwise
1577    // negation in the assembly source, so twiddle it here.
1578    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1579    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1580  }
1581
1582  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1583    assert(N == 1 && "Invalid number of operands!");
1584    // The operand is actually a t2_so_imm, but we have its
1585    // negation in the assembly source, so twiddle it here.
1586    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1588  }
1589
1590  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1591    assert(N == 1 && "Invalid number of operands!");
1592    // The operand is actually an imm0_4095, but we have its
1593    // negation in the assembly source, so twiddle it here.
1594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1595    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1596  }
1597
1598  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 1 && "Invalid number of operands!");
1600    // The operand is actually a so_imm, but we have its bitwise
1601    // negation in the assembly source, so twiddle it here.
1602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1604  }
1605
1606  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1607    assert(N == 1 && "Invalid number of operands!");
1608    // The operand is actually a so_imm, but we have its
1609    // negation in the assembly source, so twiddle it here.
1610    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1612  }
1613
1614  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 1 && "Invalid number of operands!");
1616    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1617  }
1618
1619  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1620    assert(N == 1 && "Invalid number of operands!");
1621    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1622  }
1623
1624  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1625    assert(N == 1 && "Invalid number of operands!");
1626    int32_t Imm = Memory.OffsetImm->getValue();
1627    // FIXME: Handle #-0
1628    if (Imm == INT32_MIN) Imm = 0;
1629    Inst.addOperand(MCOperand::CreateImm(Imm));
1630  }
1631
1632  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1633    assert(N == 2 && "Invalid number of operands!");
1634    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1635    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1636  }
1637
1638  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1639    assert(N == 3 && "Invalid number of operands!");
1640    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1641    if (!Memory.OffsetRegNum) {
1642      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1643      // Special case for #-0
1644      if (Val == INT32_MIN) Val = 0;
1645      if (Val < 0) Val = -Val;
1646      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1647    } else {
1648      // For register offset, we encode the shift type and negation flag
1649      // here.
1650      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1651                              Memory.ShiftImm, Memory.ShiftType);
1652    }
1653    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1654    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1655    Inst.addOperand(MCOperand::CreateImm(Val));
1656  }
1657
1658  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1659    assert(N == 2 && "Invalid number of operands!");
1660    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1661    assert(CE && "non-constant AM2OffsetImm operand!");
1662    int32_t Val = CE->getValue();
1663    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1664    // Special case for #-0
1665    if (Val == INT32_MIN) Val = 0;
1666    if (Val < 0) Val = -Val;
1667    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1668    Inst.addOperand(MCOperand::CreateReg(0));
1669    Inst.addOperand(MCOperand::CreateImm(Val));
1670  }
1671
1672  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1673    assert(N == 3 && "Invalid number of operands!");
1674    // If we have an immediate that's not a constant, treat it as a label
1675    // reference needing a fixup. If it is a constant, it's something else
1676    // and we reject it.
1677    if (isImm()) {
1678      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1679      Inst.addOperand(MCOperand::CreateReg(0));
1680      Inst.addOperand(MCOperand::CreateImm(0));
1681      return;
1682    }
1683
1684    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1685    if (!Memory.OffsetRegNum) {
1686      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1687      // Special case for #-0
1688      if (Val == INT32_MIN) Val = 0;
1689      if (Val < 0) Val = -Val;
1690      Val = ARM_AM::getAM3Opc(AddSub, Val);
1691    } else {
1692      // For register offset, we encode the shift type and negation flag
1693      // here.
1694      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1695    }
1696    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1697    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1698    Inst.addOperand(MCOperand::CreateImm(Val));
1699  }
1700
1701  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1702    assert(N == 2 && "Invalid number of operands!");
1703    if (Kind == k_PostIndexRegister) {
1704      int32_t Val =
1705        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1706      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1707      Inst.addOperand(MCOperand::CreateImm(Val));
1708      return;
1709    }
1710
1711    // Constant offset.
1712    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1713    int32_t Val = CE->getValue();
1714    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1715    // Special case for #-0
1716    if (Val == INT32_MIN) Val = 0;
1717    if (Val < 0) Val = -Val;
1718    Val = ARM_AM::getAM3Opc(AddSub, Val);
1719    Inst.addOperand(MCOperand::CreateReg(0));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    // If we have an immediate that's not a constant, treat it as a label
1726    // reference needing a fixup. If it is a constant, it's something else
1727    // and we reject it.
1728    if (isImm()) {
1729      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1730      Inst.addOperand(MCOperand::CreateImm(0));
1731      return;
1732    }
1733
1734    // The lower two bits are always zero and as such are not encoded.
1735    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1736    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1737    // Special case for #-0
1738    if (Val == INT32_MIN) Val = 0;
1739    if (Val < 0) Val = -Val;
1740    Val = ARM_AM::getAM5Opc(AddSub, Val);
1741    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1742    Inst.addOperand(MCOperand::CreateImm(Val));
1743  }
1744
1745  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1746    assert(N == 2 && "Invalid number of operands!");
1747    // If we have an immediate that's not a constant, treat it as a label
1748    // reference needing a fixup. If it is a constant, it's something else
1749    // and we reject it.
1750    if (isImm()) {
1751      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1752      Inst.addOperand(MCOperand::CreateImm(0));
1753      return;
1754    }
1755
1756    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1757    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1758    Inst.addOperand(MCOperand::CreateImm(Val));
1759  }
1760
1761  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    // The lower two bits are always zero and as such are not encoded.
1764    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1765    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1766    Inst.addOperand(MCOperand::CreateImm(Val));
1767  }
1768
1769  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1770    assert(N == 2 && "Invalid number of operands!");
1771    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateImm(Val));
1774  }
1775
1776  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1777    addMemImm8OffsetOperands(Inst, N);
1778  }
1779
1780  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1781    addMemImm8OffsetOperands(Inst, N);
1782  }
1783
1784  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1785    assert(N == 2 && "Invalid number of operands!");
1786    // If this is an immediate, it's a label reference.
1787    if (isImm()) {
1788      addExpr(Inst, getImm());
1789      Inst.addOperand(MCOperand::CreateImm(0));
1790      return;
1791    }
1792
1793    // Otherwise, it's a normal memory reg+offset.
1794    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1795    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1796    Inst.addOperand(MCOperand::CreateImm(Val));
1797  }
1798
1799  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 2 && "Invalid number of operands!");
1801    // If this is an immediate, it's a label reference.
1802    if (isImm()) {
1803      addExpr(Inst, getImm());
1804      Inst.addOperand(MCOperand::CreateImm(0));
1805      return;
1806    }
1807
1808    // Otherwise, it's a normal memory reg+offset.
1809    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 2 && "Invalid number of operands!");
1816    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1817    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1818  }
1819
1820  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1821    assert(N == 2 && "Invalid number of operands!");
1822    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1823    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1824  }
1825
1826  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1827    assert(N == 3 && "Invalid number of operands!");
1828    unsigned Val =
1829      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1830                        Memory.ShiftImm, Memory.ShiftType);
1831    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1832    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1833    Inst.addOperand(MCOperand::CreateImm(Val));
1834  }
1835
1836  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1837    assert(N == 3 && "Invalid number of operands!");
1838    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1839    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1840    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1841  }
1842
1843  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1844    assert(N == 2 && "Invalid number of operands!");
1845    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1846    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1847  }
1848
1849  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1850    assert(N == 2 && "Invalid number of operands!");
1851    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1852    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1853    Inst.addOperand(MCOperand::CreateImm(Val));
1854  }
1855
1856  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1857    assert(N == 2 && "Invalid number of operands!");
1858    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1859    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1860    Inst.addOperand(MCOperand::CreateImm(Val));
1861  }
1862
1863  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1864    assert(N == 2 && "Invalid number of operands!");
1865    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1866    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1867    Inst.addOperand(MCOperand::CreateImm(Val));
1868  }
1869
1870  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1871    assert(N == 2 && "Invalid number of operands!");
1872    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1873    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1874    Inst.addOperand(MCOperand::CreateImm(Val));
1875  }
1876
1877  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1880    assert(CE && "non-constant post-idx-imm8 operand!");
1881    int Imm = CE->getValue();
1882    bool isAdd = Imm >= 0;
1883    if (Imm == INT32_MIN) Imm = 0;
1884    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1885    Inst.addOperand(MCOperand::CreateImm(Imm));
1886  }
1887
1888  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1889    assert(N == 1 && "Invalid number of operands!");
1890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1891    assert(CE && "non-constant post-idx-imm8s4 operand!");
1892    int Imm = CE->getValue();
1893    bool isAdd = Imm >= 0;
1894    if (Imm == INT32_MIN) Imm = 0;
1895    // Immediate is scaled by 4.
1896    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1897    Inst.addOperand(MCOperand::CreateImm(Imm));
1898  }
1899
1900  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1901    assert(N == 2 && "Invalid number of operands!");
1902    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1903    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1904  }
1905
1906  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1907    assert(N == 2 && "Invalid number of operands!");
1908    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1909    // The sign, shift type, and shift amount are encoded in a single operand
1910    // using the AM2 encoding helpers.
1911    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1912    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1913                                     PostIdxReg.ShiftTy);
1914    Inst.addOperand(MCOperand::CreateImm(Imm));
1915  }
1916
1917  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1918    assert(N == 1 && "Invalid number of operands!");
1919    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1920  }
1921
1922  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1923    assert(N == 1 && "Invalid number of operands!");
1924    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1925  }
1926
1927  void addVecListOperands(MCInst &Inst, unsigned N) const {
1928    assert(N == 1 && "Invalid number of operands!");
1929    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1930  }
1931
1932  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1933    assert(N == 2 && "Invalid number of operands!");
1934    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1935    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1936  }
1937
1938  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1939    assert(N == 1 && "Invalid number of operands!");
1940    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1941  }
1942
1943  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1944    assert(N == 1 && "Invalid number of operands!");
1945    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1946  }
1947
1948  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1949    assert(N == 1 && "Invalid number of operands!");
1950    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1951  }
1952
1953  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1954    assert(N == 1 && "Invalid number of operands!");
1955    // The immediate encodes the type of constant as well as the value.
1956    // Mask in that this is an i8 splat.
1957    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1959  }
1960
1961  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1962    assert(N == 1 && "Invalid number of operands!");
1963    // The immediate encodes the type of constant as well as the value.
1964    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1965    unsigned Value = CE->getValue();
1966    if (Value >= 256)
1967      Value = (Value >> 8) | 0xa00;
1968    else
1969      Value |= 0x800;
1970    Inst.addOperand(MCOperand::CreateImm(Value));
1971  }
1972
1973  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1974    assert(N == 1 && "Invalid number of operands!");
1975    // The immediate encodes the type of constant as well as the value.
1976    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1977    unsigned Value = CE->getValue();
1978    if (Value >= 256 && Value <= 0xff00)
1979      Value = (Value >> 8) | 0x200;
1980    else if (Value > 0xffff && Value <= 0xff0000)
1981      Value = (Value >> 16) | 0x400;
1982    else if (Value > 0xffffff)
1983      Value = (Value >> 24) | 0x600;
1984    Inst.addOperand(MCOperand::CreateImm(Value));
1985  }
1986
1987  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1988    assert(N == 1 && "Invalid number of operands!");
1989    // The immediate encodes the type of constant as well as the value.
1990    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1991    unsigned Value = CE->getValue();
1992    if (Value >= 256 && Value <= 0xffff)
1993      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1994    else if (Value > 0xffff && Value <= 0xffffff)
1995      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1996    else if (Value > 0xffffff)
1997      Value = (Value >> 24) | 0x600;
1998    Inst.addOperand(MCOperand::CreateImm(Value));
1999  }
2000
2001  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2002    assert(N == 1 && "Invalid number of operands!");
2003    // The immediate encodes the type of constant as well as the value.
2004    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2005    unsigned Value = ~CE->getValue();
2006    if (Value >= 256 && Value <= 0xffff)
2007      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2008    else if (Value > 0xffff && Value <= 0xffffff)
2009      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2010    else if (Value > 0xffffff)
2011      Value = (Value >> 24) | 0x600;
2012    Inst.addOperand(MCOperand::CreateImm(Value));
2013  }
2014
2015  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2016    assert(N == 1 && "Invalid number of operands!");
2017    // The immediate encodes the type of constant as well as the value.
2018    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2019    uint64_t Value = CE->getValue();
2020    unsigned Imm = 0;
2021    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2022      Imm |= (Value & 1) << i;
2023    }
2024    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2025  }
2026
2027  virtual void print(raw_ostream &OS) const;
2028
2029  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2030    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2031    Op->ITMask.Mask = Mask;
2032    Op->StartLoc = S;
2033    Op->EndLoc = S;
2034    return Op;
2035  }
2036
2037  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2038    ARMOperand *Op = new ARMOperand(k_CondCode);
2039    Op->CC.Val = CC;
2040    Op->StartLoc = S;
2041    Op->EndLoc = S;
2042    return Op;
2043  }
2044
2045  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2046    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2047    Op->Cop.Val = CopVal;
2048    Op->StartLoc = S;
2049    Op->EndLoc = S;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2054    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2055    Op->Cop.Val = CopVal;
2056    Op->StartLoc = S;
2057    Op->EndLoc = S;
2058    return Op;
2059  }
2060
2061  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2062    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2063    Op->Cop.Val = Val;
2064    Op->StartLoc = S;
2065    Op->EndLoc = E;
2066    return Op;
2067  }
2068
2069  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2070    ARMOperand *Op = new ARMOperand(k_CCOut);
2071    Op->Reg.RegNum = RegNum;
2072    Op->StartLoc = S;
2073    Op->EndLoc = S;
2074    return Op;
2075  }
2076
2077  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2078    ARMOperand *Op = new ARMOperand(k_Token);
2079    Op->Tok.Data = Str.data();
2080    Op->Tok.Length = Str.size();
2081    Op->StartLoc = S;
2082    Op->EndLoc = S;
2083    return Op;
2084  }
2085
2086  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2087    ARMOperand *Op = new ARMOperand(k_Register);
2088    Op->Reg.RegNum = RegNum;
2089    Op->StartLoc = S;
2090    Op->EndLoc = E;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2095                                           unsigned SrcReg,
2096                                           unsigned ShiftReg,
2097                                           unsigned ShiftImm,
2098                                           SMLoc S, SMLoc E) {
2099    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2100    Op->RegShiftedReg.ShiftTy = ShTy;
2101    Op->RegShiftedReg.SrcReg = SrcReg;
2102    Op->RegShiftedReg.ShiftReg = ShiftReg;
2103    Op->RegShiftedReg.ShiftImm = ShiftImm;
2104    Op->StartLoc = S;
2105    Op->EndLoc = E;
2106    return Op;
2107  }
2108
2109  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2110                                            unsigned SrcReg,
2111                                            unsigned ShiftImm,
2112                                            SMLoc S, SMLoc E) {
2113    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2114    Op->RegShiftedImm.ShiftTy = ShTy;
2115    Op->RegShiftedImm.SrcReg = SrcReg;
2116    Op->RegShiftedImm.ShiftImm = ShiftImm;
2117    Op->StartLoc = S;
2118    Op->EndLoc = E;
2119    return Op;
2120  }
2121
2122  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2123                                   SMLoc S, SMLoc E) {
2124    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2125    Op->ShifterImm.isASR = isASR;
2126    Op->ShifterImm.Imm = Imm;
2127    Op->StartLoc = S;
2128    Op->EndLoc = E;
2129    return Op;
2130  }
2131
2132  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2133    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2134    Op->RotImm.Imm = Imm;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2141                                    SMLoc S, SMLoc E) {
2142    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2143    Op->Bitfield.LSB = LSB;
2144    Op->Bitfield.Width = Width;
2145    Op->StartLoc = S;
2146    Op->EndLoc = E;
2147    return Op;
2148  }
2149
2150  static ARMOperand *
2151  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2152                SMLoc StartLoc, SMLoc EndLoc) {
2153    KindTy Kind = k_RegisterList;
2154
2155    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2156      Kind = k_DPRRegisterList;
2157    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2158             contains(Regs.front().first))
2159      Kind = k_SPRRegisterList;
2160
2161    ARMOperand *Op = new ARMOperand(Kind);
2162    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2163           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2164      Op->Registers.push_back(I->first);
2165    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2166    Op->StartLoc = StartLoc;
2167    Op->EndLoc = EndLoc;
2168    return Op;
2169  }
2170
2171  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2172                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2173    ARMOperand *Op = new ARMOperand(k_VectorList);
2174    Op->VectorList.RegNum = RegNum;
2175    Op->VectorList.Count = Count;
2176    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2177    Op->StartLoc = S;
2178    Op->EndLoc = E;
2179    return Op;
2180  }
2181
2182  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2183                                              bool isDoubleSpaced,
2184                                              SMLoc S, SMLoc E) {
2185    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2186    Op->VectorList.RegNum = RegNum;
2187    Op->VectorList.Count = Count;
2188    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2189    Op->StartLoc = S;
2190    Op->EndLoc = E;
2191    return Op;
2192  }
2193
2194  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2195                                             unsigned Index,
2196                                             bool isDoubleSpaced,
2197                                             SMLoc S, SMLoc E) {
2198    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2199    Op->VectorList.RegNum = RegNum;
2200    Op->VectorList.Count = Count;
2201    Op->VectorList.LaneIndex = Index;
2202    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2203    Op->StartLoc = S;
2204    Op->EndLoc = E;
2205    return Op;
2206  }
2207
2208  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2209                                       MCContext &Ctx) {
2210    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2211    Op->VectorIndex.Val = Idx;
2212    Op->StartLoc = S;
2213    Op->EndLoc = E;
2214    return Op;
2215  }
2216
2217  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2218    ARMOperand *Op = new ARMOperand(k_Immediate);
2219    Op->Imm.Val = Val;
2220    Op->StartLoc = S;
2221    Op->EndLoc = E;
2222    return Op;
2223  }
2224
2225  static ARMOperand *CreateMem(unsigned BaseRegNum,
2226                               const MCConstantExpr *OffsetImm,
2227                               unsigned OffsetRegNum,
2228                               ARM_AM::ShiftOpc ShiftType,
2229                               unsigned ShiftImm,
2230                               unsigned Alignment,
2231                               bool isNegative,
2232                               SMLoc S, SMLoc E) {
2233    ARMOperand *Op = new ARMOperand(k_Memory);
2234    Op->Memory.BaseRegNum = BaseRegNum;
2235    Op->Memory.OffsetImm = OffsetImm;
2236    Op->Memory.OffsetRegNum = OffsetRegNum;
2237    Op->Memory.ShiftType = ShiftType;
2238    Op->Memory.ShiftImm = ShiftImm;
2239    Op->Memory.Alignment = Alignment;
2240    Op->Memory.isNegative = isNegative;
2241    Op->StartLoc = S;
2242    Op->EndLoc = E;
2243    return Op;
2244  }
2245
2246  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2247                                      ARM_AM::ShiftOpc ShiftTy,
2248                                      unsigned ShiftImm,
2249                                      SMLoc S, SMLoc E) {
2250    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2251    Op->PostIdxReg.RegNum = RegNum;
2252    Op->PostIdxReg.isAdd = isAdd;
2253    Op->PostIdxReg.ShiftTy = ShiftTy;
2254    Op->PostIdxReg.ShiftImm = ShiftImm;
2255    Op->StartLoc = S;
2256    Op->EndLoc = E;
2257    return Op;
2258  }
2259
2260  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2261    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2262    Op->MBOpt.Val = Opt;
2263    Op->StartLoc = S;
2264    Op->EndLoc = S;
2265    return Op;
2266  }
2267
2268  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2269    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2270    Op->IFlags.Val = IFlags;
2271    Op->StartLoc = S;
2272    Op->EndLoc = S;
2273    return Op;
2274  }
2275
2276  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2277    ARMOperand *Op = new ARMOperand(k_MSRMask);
2278    Op->MMask.Val = MMask;
2279    Op->StartLoc = S;
2280    Op->EndLoc = S;
2281    return Op;
2282  }
2283};
2284
2285} // end anonymous namespace.
2286
2287void ARMOperand::print(raw_ostream &OS) const {
2288  switch (Kind) {
2289  case k_CondCode:
2290    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2291    break;
2292  case k_CCOut:
2293    OS << "<ccout " << getReg() << ">";
2294    break;
2295  case k_ITCondMask: {
2296    static const char *MaskStr[] = {
2297      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2298      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2299    };
2300    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2301    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2302    break;
2303  }
2304  case k_CoprocNum:
2305    OS << "<coprocessor number: " << getCoproc() << ">";
2306    break;
2307  case k_CoprocReg:
2308    OS << "<coprocessor register: " << getCoproc() << ">";
2309    break;
2310  case k_CoprocOption:
2311    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2312    break;
2313  case k_MSRMask:
2314    OS << "<mask: " << getMSRMask() << ">";
2315    break;
2316  case k_Immediate:
2317    getImm()->print(OS);
2318    break;
2319  case k_MemBarrierOpt:
2320    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2321    break;
2322  case k_Memory:
2323    OS << "<memory "
2324       << " base:" << Memory.BaseRegNum;
2325    OS << ">";
2326    break;
2327  case k_PostIndexRegister:
2328    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2329       << PostIdxReg.RegNum;
2330    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2331      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2332         << PostIdxReg.ShiftImm;
2333    OS << ">";
2334    break;
2335  case k_ProcIFlags: {
2336    OS << "<ARM_PROC::";
2337    unsigned IFlags = getProcIFlags();
2338    for (int i=2; i >= 0; --i)
2339      if (IFlags & (1 << i))
2340        OS << ARM_PROC::IFlagsToString(1 << i);
2341    OS << ">";
2342    break;
2343  }
2344  case k_Register:
2345    OS << "<register " << getReg() << ">";
2346    break;
2347  case k_ShifterImmediate:
2348    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2349       << " #" << ShifterImm.Imm << ">";
2350    break;
2351  case k_ShiftedRegister:
2352    OS << "<so_reg_reg "
2353       << RegShiftedReg.SrcReg << " "
2354       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2355       << " " << RegShiftedReg.ShiftReg << ">";
2356    break;
2357  case k_ShiftedImmediate:
2358    OS << "<so_reg_imm "
2359       << RegShiftedImm.SrcReg << " "
2360       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2361       << " #" << RegShiftedImm.ShiftImm << ">";
2362    break;
2363  case k_RotateImmediate:
2364    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2365    break;
2366  case k_BitfieldDescriptor:
2367    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2368       << ", width: " << Bitfield.Width << ">";
2369    break;
2370  case k_RegisterList:
2371  case k_DPRRegisterList:
2372  case k_SPRRegisterList: {
2373    OS << "<register_list ";
2374
2375    const SmallVectorImpl<unsigned> &RegList = getRegList();
2376    for (SmallVectorImpl<unsigned>::const_iterator
2377           I = RegList.begin(), E = RegList.end(); I != E; ) {
2378      OS << *I;
2379      if (++I < E) OS << ", ";
2380    }
2381
2382    OS << ">";
2383    break;
2384  }
2385  case k_VectorList:
2386    OS << "<vector_list " << VectorList.Count << " * "
2387       << VectorList.RegNum << ">";
2388    break;
2389  case k_VectorListAllLanes:
2390    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2391       << VectorList.RegNum << ">";
2392    break;
2393  case k_VectorListIndexed:
2394    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2395       << VectorList.Count << " * " << VectorList.RegNum << ">";
2396    break;
2397  case k_Token:
2398    OS << "'" << getToken() << "'";
2399    break;
2400  case k_VectorIndex:
2401    OS << "<vectorindex " << getVectorIndex() << ">";
2402    break;
2403  }
2404}
2405
2406/// @name Auto-generated Match Functions
2407/// {
2408
2409static unsigned MatchRegisterName(StringRef Name);
2410
2411/// }
2412
2413bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2414                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2415  StartLoc = Parser.getTok().getLoc();
2416  RegNo = tryParseRegister();
2417  EndLoc = Parser.getTok().getLoc();
2418
2419  return (RegNo == (unsigned)-1);
2420}
2421
2422/// Try to parse a register name.  The token must be an Identifier when called,
2423/// and if it is a register name the token is eaten and the register number is
2424/// returned.  Otherwise return -1.
2425///
2426int ARMAsmParser::tryParseRegister() {
2427  const AsmToken &Tok = Parser.getTok();
2428  if (Tok.isNot(AsmToken::Identifier)) return -1;
2429
2430  std::string lowerCase = Tok.getString().lower();
2431  unsigned RegNum = MatchRegisterName(lowerCase);
2432  if (!RegNum) {
2433    RegNum = StringSwitch<unsigned>(lowerCase)
2434      .Case("r13", ARM::SP)
2435      .Case("r14", ARM::LR)
2436      .Case("r15", ARM::PC)
2437      .Case("ip", ARM::R12)
2438      // Additional register name aliases for 'gas' compatibility.
2439      .Case("a1", ARM::R0)
2440      .Case("a2", ARM::R1)
2441      .Case("a3", ARM::R2)
2442      .Case("a4", ARM::R3)
2443      .Case("v1", ARM::R4)
2444      .Case("v2", ARM::R5)
2445      .Case("v3", ARM::R6)
2446      .Case("v4", ARM::R7)
2447      .Case("v5", ARM::R8)
2448      .Case("v6", ARM::R9)
2449      .Case("v7", ARM::R10)
2450      .Case("v8", ARM::R11)
2451      .Case("sb", ARM::R9)
2452      .Case("sl", ARM::R10)
2453      .Case("fp", ARM::R11)
2454      .Default(0);
2455  }
2456  if (!RegNum) {
2457    // Check for aliases registered via .req. Canonicalize to lower case.
2458    // That's more consistent since register names are case insensitive, and
2459    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2460    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2461    // If no match, return failure.
2462    if (Entry == RegisterReqs.end())
2463      return -1;
2464    Parser.Lex(); // Eat identifier token.
2465    return Entry->getValue();
2466  }
2467
2468  Parser.Lex(); // Eat identifier token.
2469
2470  return RegNum;
2471}
2472
2473// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2474// If a recoverable error occurs, return 1. If an irrecoverable error
2475// occurs, return -1. An irrecoverable error is one where tokens have been
2476// consumed in the process of trying to parse the shifter (i.e., when it is
2477// indeed a shifter operand, but malformed).
2478int ARMAsmParser::tryParseShiftRegister(
2479                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2480  SMLoc S = Parser.getTok().getLoc();
2481  const AsmToken &Tok = Parser.getTok();
2482  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2483
2484  std::string lowerCase = Tok.getString().lower();
2485  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2486      .Case("asl", ARM_AM::lsl)
2487      .Case("lsl", ARM_AM::lsl)
2488      .Case("lsr", ARM_AM::lsr)
2489      .Case("asr", ARM_AM::asr)
2490      .Case("ror", ARM_AM::ror)
2491      .Case("rrx", ARM_AM::rrx)
2492      .Default(ARM_AM::no_shift);
2493
2494  if (ShiftTy == ARM_AM::no_shift)
2495    return 1;
2496
2497  Parser.Lex(); // Eat the operator.
2498
2499  // The source register for the shift has already been added to the
2500  // operand list, so we need to pop it off and combine it into the shifted
2501  // register operand instead.
2502  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2503  if (!PrevOp->isReg())
2504    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2505  int SrcReg = PrevOp->getReg();
2506  int64_t Imm = 0;
2507  int ShiftReg = 0;
2508  if (ShiftTy == ARM_AM::rrx) {
2509    // RRX Doesn't have an explicit shift amount. The encoder expects
2510    // the shift register to be the same as the source register. Seems odd,
2511    // but OK.
2512    ShiftReg = SrcReg;
2513  } else {
2514    // Figure out if this is shifted by a constant or a register (for non-RRX).
2515    if (Parser.getTok().is(AsmToken::Hash) ||
2516        Parser.getTok().is(AsmToken::Dollar)) {
2517      Parser.Lex(); // Eat hash.
2518      SMLoc ImmLoc = Parser.getTok().getLoc();
2519      const MCExpr *ShiftExpr = 0;
2520      if (getParser().ParseExpression(ShiftExpr)) {
2521        Error(ImmLoc, "invalid immediate shift value");
2522        return -1;
2523      }
2524      // The expression must be evaluatable as an immediate.
2525      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2526      if (!CE) {
2527        Error(ImmLoc, "invalid immediate shift value");
2528        return -1;
2529      }
2530      // Range check the immediate.
2531      // lsl, ror: 0 <= imm <= 31
2532      // lsr, asr: 0 <= imm <= 32
2533      Imm = CE->getValue();
2534      if (Imm < 0 ||
2535          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2536          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2537        Error(ImmLoc, "immediate shift value out of range");
2538        return -1;
2539      }
2540      // shift by zero is a nop. Always send it through as lsl.
2541      // ('as' compatibility)
2542      if (Imm == 0)
2543        ShiftTy = ARM_AM::lsl;
2544    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2545      ShiftReg = tryParseRegister();
2546      SMLoc L = Parser.getTok().getLoc();
2547      if (ShiftReg == -1) {
2548        Error (L, "expected immediate or register in shift operand");
2549        return -1;
2550      }
2551    } else {
2552      Error (Parser.getTok().getLoc(),
2553                    "expected immediate or register in shift operand");
2554      return -1;
2555    }
2556  }
2557
2558  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2559    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2560                                                         ShiftReg, Imm,
2561                                               S, Parser.getTok().getLoc()));
2562  else
2563    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2564                                               S, Parser.getTok().getLoc()));
2565
2566  return 0;
2567}
2568
2569
2570/// Try to parse a register name.  The token must be an Identifier when called.
2571/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2572/// if there is a "writeback". 'true' if it's not a register.
2573///
2574/// TODO this is likely to change to allow different register types and or to
2575/// parse for a specific register type.
2576bool ARMAsmParser::
2577tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2578  SMLoc S = Parser.getTok().getLoc();
2579  int RegNo = tryParseRegister();
2580  if (RegNo == -1)
2581    return true;
2582
2583  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2584
2585  const AsmToken &ExclaimTok = Parser.getTok();
2586  if (ExclaimTok.is(AsmToken::Exclaim)) {
2587    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2588                                               ExclaimTok.getLoc()));
2589    Parser.Lex(); // Eat exclaim token
2590    return false;
2591  }
2592
2593  // Also check for an index operand. This is only legal for vector registers,
2594  // but that'll get caught OK in operand matching, so we don't need to
2595  // explicitly filter everything else out here.
2596  if (Parser.getTok().is(AsmToken::LBrac)) {
2597    SMLoc SIdx = Parser.getTok().getLoc();
2598    Parser.Lex(); // Eat left bracket token.
2599
2600    const MCExpr *ImmVal;
2601    if (getParser().ParseExpression(ImmVal))
2602      return true;
2603    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2604    if (!MCE)
2605      return TokError("immediate value expected for vector index");
2606
2607    SMLoc E = Parser.getTok().getLoc();
2608    if (Parser.getTok().isNot(AsmToken::RBrac))
2609      return Error(E, "']' expected");
2610
2611    Parser.Lex(); // Eat right bracket token.
2612
2613    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2614                                                     SIdx, E,
2615                                                     getContext()));
2616  }
2617
2618  return false;
2619}
2620
2621/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2622/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2623/// "c5", ...
2624static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2625  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2626  // but efficient.
2627  switch (Name.size()) {
2628  default: return -1;
2629  case 2:
2630    if (Name[0] != CoprocOp)
2631      return -1;
2632    switch (Name[1]) {
2633    default:  return -1;
2634    case '0': return 0;
2635    case '1': return 1;
2636    case '2': return 2;
2637    case '3': return 3;
2638    case '4': return 4;
2639    case '5': return 5;
2640    case '6': return 6;
2641    case '7': return 7;
2642    case '8': return 8;
2643    case '9': return 9;
2644    }
2645  case 3:
2646    if (Name[0] != CoprocOp || Name[1] != '1')
2647      return -1;
2648    switch (Name[2]) {
2649    default:  return -1;
2650    case '0': return 10;
2651    case '1': return 11;
2652    case '2': return 12;
2653    case '3': return 13;
2654    case '4': return 14;
2655    case '5': return 15;
2656    }
2657  }
2658}
2659
2660/// parseITCondCode - Try to parse a condition code for an IT instruction.
2661ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2662parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2663  SMLoc S = Parser.getTok().getLoc();
2664  const AsmToken &Tok = Parser.getTok();
2665  if (!Tok.is(AsmToken::Identifier))
2666    return MatchOperand_NoMatch;
2667  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2668    .Case("eq", ARMCC::EQ)
2669    .Case("ne", ARMCC::NE)
2670    .Case("hs", ARMCC::HS)
2671    .Case("cs", ARMCC::HS)
2672    .Case("lo", ARMCC::LO)
2673    .Case("cc", ARMCC::LO)
2674    .Case("mi", ARMCC::MI)
2675    .Case("pl", ARMCC::PL)
2676    .Case("vs", ARMCC::VS)
2677    .Case("vc", ARMCC::VC)
2678    .Case("hi", ARMCC::HI)
2679    .Case("ls", ARMCC::LS)
2680    .Case("ge", ARMCC::GE)
2681    .Case("lt", ARMCC::LT)
2682    .Case("gt", ARMCC::GT)
2683    .Case("le", ARMCC::LE)
2684    .Case("al", ARMCC::AL)
2685    .Default(~0U);
2686  if (CC == ~0U)
2687    return MatchOperand_NoMatch;
2688  Parser.Lex(); // Eat the token.
2689
2690  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2691
2692  return MatchOperand_Success;
2693}
2694
2695/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2696/// token must be an Identifier when called, and if it is a coprocessor
2697/// number, the token is eaten and the operand is added to the operand list.
2698ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2699parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2700  SMLoc S = Parser.getTok().getLoc();
2701  const AsmToken &Tok = Parser.getTok();
2702  if (Tok.isNot(AsmToken::Identifier))
2703    return MatchOperand_NoMatch;
2704
2705  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2706  if (Num == -1)
2707    return MatchOperand_NoMatch;
2708
2709  Parser.Lex(); // Eat identifier token.
2710  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2711  return MatchOperand_Success;
2712}
2713
2714/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2715/// token must be an Identifier when called, and if it is a coprocessor
2716/// number, the token is eaten and the operand is added to the operand list.
2717ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2718parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2719  SMLoc S = Parser.getTok().getLoc();
2720  const AsmToken &Tok = Parser.getTok();
2721  if (Tok.isNot(AsmToken::Identifier))
2722    return MatchOperand_NoMatch;
2723
2724  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2725  if (Reg == -1)
2726    return MatchOperand_NoMatch;
2727
2728  Parser.Lex(); // Eat identifier token.
2729  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2730  return MatchOperand_Success;
2731}
2732
2733/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2734/// coproc_option : '{' imm0_255 '}'
2735ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2736parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2737  SMLoc S = Parser.getTok().getLoc();
2738
2739  // If this isn't a '{', this isn't a coprocessor immediate operand.
2740  if (Parser.getTok().isNot(AsmToken::LCurly))
2741    return MatchOperand_NoMatch;
2742  Parser.Lex(); // Eat the '{'
2743
2744  const MCExpr *Expr;
2745  SMLoc Loc = Parser.getTok().getLoc();
2746  if (getParser().ParseExpression(Expr)) {
2747    Error(Loc, "illegal expression");
2748    return MatchOperand_ParseFail;
2749  }
2750  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2751  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2752    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2753    return MatchOperand_ParseFail;
2754  }
2755  int Val = CE->getValue();
2756
2757  // Check for and consume the closing '}'
2758  if (Parser.getTok().isNot(AsmToken::RCurly))
2759    return MatchOperand_ParseFail;
2760  SMLoc E = Parser.getTok().getLoc();
2761  Parser.Lex(); // Eat the '}'
2762
2763  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2764  return MatchOperand_Success;
2765}
2766
2767// For register list parsing, we need to map from raw GPR register numbering
2768// to the enumeration values. The enumeration values aren't sorted by
2769// register number due to our using "sp", "lr" and "pc" as canonical names.
2770static unsigned getNextRegister(unsigned Reg) {
2771  // If this is a GPR, we need to do it manually, otherwise we can rely
2772  // on the sort ordering of the enumeration since the other reg-classes
2773  // are sane.
2774  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2775    return Reg + 1;
2776  switch(Reg) {
2777  default: llvm_unreachable("Invalid GPR number!");
2778  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2779  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2780  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2781  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2782  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2783  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2784  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2785  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2786  }
2787}
2788
2789// Return the low-subreg of a given Q register.
2790static unsigned getDRegFromQReg(unsigned QReg) {
2791  switch (QReg) {
2792  default: llvm_unreachable("expected a Q register!");
2793  case ARM::Q0:  return ARM::D0;
2794  case ARM::Q1:  return ARM::D2;
2795  case ARM::Q2:  return ARM::D4;
2796  case ARM::Q3:  return ARM::D6;
2797  case ARM::Q4:  return ARM::D8;
2798  case ARM::Q5:  return ARM::D10;
2799  case ARM::Q6:  return ARM::D12;
2800  case ARM::Q7:  return ARM::D14;
2801  case ARM::Q8:  return ARM::D16;
2802  case ARM::Q9:  return ARM::D18;
2803  case ARM::Q10: return ARM::D20;
2804  case ARM::Q11: return ARM::D22;
2805  case ARM::Q12: return ARM::D24;
2806  case ARM::Q13: return ARM::D26;
2807  case ARM::Q14: return ARM::D28;
2808  case ARM::Q15: return ARM::D30;
2809  }
2810}
2811
2812/// Parse a register list.
2813bool ARMAsmParser::
2814parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2815  assert(Parser.getTok().is(AsmToken::LCurly) &&
2816         "Token is not a Left Curly Brace");
2817  SMLoc S = Parser.getTok().getLoc();
2818  Parser.Lex(); // Eat '{' token.
2819  SMLoc RegLoc = Parser.getTok().getLoc();
2820
2821  // Check the first register in the list to see what register class
2822  // this is a list of.
2823  int Reg = tryParseRegister();
2824  if (Reg == -1)
2825    return Error(RegLoc, "register expected");
2826
2827  // The reglist instructions have at most 16 registers, so reserve
2828  // space for that many.
2829  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2830
2831  // Allow Q regs and just interpret them as the two D sub-registers.
2832  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2833    Reg = getDRegFromQReg(Reg);
2834    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2835    ++Reg;
2836  }
2837  const MCRegisterClass *RC;
2838  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2839    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2840  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2841    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2842  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2843    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2844  else
2845    return Error(RegLoc, "invalid register in register list");
2846
2847  // Store the register.
2848  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2849
2850  // This starts immediately after the first register token in the list,
2851  // so we can see either a comma or a minus (range separator) as a legal
2852  // next token.
2853  while (Parser.getTok().is(AsmToken::Comma) ||
2854         Parser.getTok().is(AsmToken::Minus)) {
2855    if (Parser.getTok().is(AsmToken::Minus)) {
2856      Parser.Lex(); // Eat the minus.
2857      SMLoc EndLoc = Parser.getTok().getLoc();
2858      int EndReg = tryParseRegister();
2859      if (EndReg == -1)
2860        return Error(EndLoc, "register expected");
2861      // Allow Q regs and just interpret them as the two D sub-registers.
2862      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2863        EndReg = getDRegFromQReg(EndReg) + 1;
2864      // If the register is the same as the start reg, there's nothing
2865      // more to do.
2866      if (Reg == EndReg)
2867        continue;
2868      // The register must be in the same register class as the first.
2869      if (!RC->contains(EndReg))
2870        return Error(EndLoc, "invalid register in register list");
2871      // Ranges must go from low to high.
2872      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2873        return Error(EndLoc, "bad range in register list");
2874
2875      // Add all the registers in the range to the register list.
2876      while (Reg != EndReg) {
2877        Reg = getNextRegister(Reg);
2878        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2879      }
2880      continue;
2881    }
2882    Parser.Lex(); // Eat the comma.
2883    RegLoc = Parser.getTok().getLoc();
2884    int OldReg = Reg;
2885    const AsmToken RegTok = Parser.getTok();
2886    Reg = tryParseRegister();
2887    if (Reg == -1)
2888      return Error(RegLoc, "register expected");
2889    // Allow Q regs and just interpret them as the two D sub-registers.
2890    bool isQReg = false;
2891    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2892      Reg = getDRegFromQReg(Reg);
2893      isQReg = true;
2894    }
2895    // The register must be in the same register class as the first.
2896    if (!RC->contains(Reg))
2897      return Error(RegLoc, "invalid register in register list");
2898    // List must be monotonically increasing.
2899    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2900      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2901        Warning(RegLoc, "register list not in ascending order");
2902      else
2903        return Error(RegLoc, "register list not in ascending order");
2904    }
2905    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2906      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2907              ") in register list");
2908      continue;
2909    }
2910    // VFP register lists must also be contiguous.
2911    // It's OK to use the enumeration values directly here rather, as the
2912    // VFP register classes have the enum sorted properly.
2913    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2914        Reg != OldReg + 1)
2915      return Error(RegLoc, "non-contiguous register range");
2916    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2917    if (isQReg)
2918      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2919  }
2920
2921  SMLoc E = Parser.getTok().getLoc();
2922  if (Parser.getTok().isNot(AsmToken::RCurly))
2923    return Error(E, "'}' expected");
2924  Parser.Lex(); // Eat '}' token.
2925
2926  // Push the register list operand.
2927  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2928
2929  // The ARM system instruction variants for LDM/STM have a '^' token here.
2930  if (Parser.getTok().is(AsmToken::Caret)) {
2931    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2932    Parser.Lex(); // Eat '^' token.
2933  }
2934
2935  return false;
2936}
2937
2938// Helper function to parse the lane index for vector lists.
2939ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2940parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2941  Index = 0; // Always return a defined index value.
2942  if (Parser.getTok().is(AsmToken::LBrac)) {
2943    Parser.Lex(); // Eat the '['.
2944    if (Parser.getTok().is(AsmToken::RBrac)) {
2945      // "Dn[]" is the 'all lanes' syntax.
2946      LaneKind = AllLanes;
2947      Parser.Lex(); // Eat the ']'.
2948      return MatchOperand_Success;
2949    }
2950
2951    // There's an optional '#' token here. Normally there wouldn't be, but
2952    // inline assemble puts one in, and it's friendly to accept that.
2953    if (Parser.getTok().is(AsmToken::Hash))
2954      Parser.Lex(); // Eat the '#'
2955
2956    const MCExpr *LaneIndex;
2957    SMLoc Loc = Parser.getTok().getLoc();
2958    if (getParser().ParseExpression(LaneIndex)) {
2959      Error(Loc, "illegal expression");
2960      return MatchOperand_ParseFail;
2961    }
2962    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2963    if (!CE) {
2964      Error(Loc, "lane index must be empty or an integer");
2965      return MatchOperand_ParseFail;
2966    }
2967    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2968      Error(Parser.getTok().getLoc(), "']' expected");
2969      return MatchOperand_ParseFail;
2970    }
2971    Parser.Lex(); // Eat the ']'.
2972    int64_t Val = CE->getValue();
2973
2974    // FIXME: Make this range check context sensitive for .8, .16, .32.
2975    if (Val < 0 || Val > 7) {
2976      Error(Parser.getTok().getLoc(), "lane index out of range");
2977      return MatchOperand_ParseFail;
2978    }
2979    Index = Val;
2980    LaneKind = IndexedLane;
2981    return MatchOperand_Success;
2982  }
2983  LaneKind = NoLanes;
2984  return MatchOperand_Success;
2985}
2986
2987// parse a vector register list
2988ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2989parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2990  VectorLaneTy LaneKind;
2991  unsigned LaneIndex;
2992  SMLoc S = Parser.getTok().getLoc();
2993  // As an extension (to match gas), support a plain D register or Q register
2994  // (without encosing curly braces) as a single or double entry list,
2995  // respectively.
2996  if (Parser.getTok().is(AsmToken::Identifier)) {
2997    int Reg = tryParseRegister();
2998    if (Reg == -1)
2999      return MatchOperand_NoMatch;
3000    SMLoc E = Parser.getTok().getLoc();
3001    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3002      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3003      if (Res != MatchOperand_Success)
3004        return Res;
3005      switch (LaneKind) {
3006      case NoLanes:
3007        E = Parser.getTok().getLoc();
3008        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3009        break;
3010      case AllLanes:
3011        E = Parser.getTok().getLoc();
3012        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3013                                                                S, E));
3014        break;
3015      case IndexedLane:
3016        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3017                                                               LaneIndex,
3018                                                               false, S, E));
3019        break;
3020      }
3021      return MatchOperand_Success;
3022    }
3023    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3024      Reg = getDRegFromQReg(Reg);
3025      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3026      if (Res != MatchOperand_Success)
3027        return Res;
3028      switch (LaneKind) {
3029      case NoLanes:
3030        E = Parser.getTok().getLoc();
3031        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3032                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3033        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3034        break;
3035      case AllLanes:
3036        E = Parser.getTok().getLoc();
3037        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3038                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3039        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3040                                                                S, E));
3041        break;
3042      case IndexedLane:
3043        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3044                                                               LaneIndex,
3045                                                               false, S, E));
3046        break;
3047      }
3048      return MatchOperand_Success;
3049    }
3050    Error(S, "vector register expected");
3051    return MatchOperand_ParseFail;
3052  }
3053
3054  if (Parser.getTok().isNot(AsmToken::LCurly))
3055    return MatchOperand_NoMatch;
3056
3057  Parser.Lex(); // Eat '{' token.
3058  SMLoc RegLoc = Parser.getTok().getLoc();
3059
3060  int Reg = tryParseRegister();
3061  if (Reg == -1) {
3062    Error(RegLoc, "register expected");
3063    return MatchOperand_ParseFail;
3064  }
3065  unsigned Count = 1;
3066  int Spacing = 0;
3067  unsigned FirstReg = Reg;
3068  // The list is of D registers, but we also allow Q regs and just interpret
3069  // them as the two D sub-registers.
3070  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3071    FirstReg = Reg = getDRegFromQReg(Reg);
3072    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3073                 // it's ambiguous with four-register single spaced.
3074    ++Reg;
3075    ++Count;
3076  }
3077  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3078    return MatchOperand_ParseFail;
3079
3080  while (Parser.getTok().is(AsmToken::Comma) ||
3081         Parser.getTok().is(AsmToken::Minus)) {
3082    if (Parser.getTok().is(AsmToken::Minus)) {
3083      if (!Spacing)
3084        Spacing = 1; // Register range implies a single spaced list.
3085      else if (Spacing == 2) {
3086        Error(Parser.getTok().getLoc(),
3087              "sequential registers in double spaced list");
3088        return MatchOperand_ParseFail;
3089      }
3090      Parser.Lex(); // Eat the minus.
3091      SMLoc EndLoc = Parser.getTok().getLoc();
3092      int EndReg = tryParseRegister();
3093      if (EndReg == -1) {
3094        Error(EndLoc, "register expected");
3095        return MatchOperand_ParseFail;
3096      }
3097      // Allow Q regs and just interpret them as the two D sub-registers.
3098      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3099        EndReg = getDRegFromQReg(EndReg) + 1;
3100      // If the register is the same as the start reg, there's nothing
3101      // more to do.
3102      if (Reg == EndReg)
3103        continue;
3104      // The register must be in the same register class as the first.
3105      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3106        Error(EndLoc, "invalid register in register list");
3107        return MatchOperand_ParseFail;
3108      }
3109      // Ranges must go from low to high.
3110      if (Reg > EndReg) {
3111        Error(EndLoc, "bad range in register list");
3112        return MatchOperand_ParseFail;
3113      }
3114      // Parse the lane specifier if present.
3115      VectorLaneTy NextLaneKind;
3116      unsigned NextLaneIndex;
3117      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3118        return MatchOperand_ParseFail;
3119      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3120        Error(EndLoc, "mismatched lane index in register list");
3121        return MatchOperand_ParseFail;
3122      }
3123      EndLoc = Parser.getTok().getLoc();
3124
3125      // Add all the registers in the range to the register list.
3126      Count += EndReg - Reg;
3127      Reg = EndReg;
3128      continue;
3129    }
3130    Parser.Lex(); // Eat the comma.
3131    RegLoc = Parser.getTok().getLoc();
3132    int OldReg = Reg;
3133    Reg = tryParseRegister();
3134    if (Reg == -1) {
3135      Error(RegLoc, "register expected");
3136      return MatchOperand_ParseFail;
3137    }
3138    // vector register lists must be contiguous.
3139    // It's OK to use the enumeration values directly here rather, as the
3140    // VFP register classes have the enum sorted properly.
3141    //
3142    // The list is of D registers, but we also allow Q regs and just interpret
3143    // them as the two D sub-registers.
3144    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3145      if (!Spacing)
3146        Spacing = 1; // Register range implies a single spaced list.
3147      else if (Spacing == 2) {
3148        Error(RegLoc,
3149              "invalid register in double-spaced list (must be 'D' register')");
3150        return MatchOperand_ParseFail;
3151      }
3152      Reg = getDRegFromQReg(Reg);
3153      if (Reg != OldReg + 1) {
3154        Error(RegLoc, "non-contiguous register range");
3155        return MatchOperand_ParseFail;
3156      }
3157      ++Reg;
3158      Count += 2;
3159      // Parse the lane specifier if present.
3160      VectorLaneTy NextLaneKind;
3161      unsigned NextLaneIndex;
3162      SMLoc EndLoc = Parser.getTok().getLoc();
3163      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3164        return MatchOperand_ParseFail;
3165      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3166        Error(EndLoc, "mismatched lane index in register list");
3167        return MatchOperand_ParseFail;
3168      }
3169      continue;
3170    }
3171    // Normal D register.
3172    // Figure out the register spacing (single or double) of the list if
3173    // we don't know it already.
3174    if (!Spacing)
3175      Spacing = 1 + (Reg == OldReg + 2);
3176
3177    // Just check that it's contiguous and keep going.
3178    if (Reg != OldReg + Spacing) {
3179      Error(RegLoc, "non-contiguous register range");
3180      return MatchOperand_ParseFail;
3181    }
3182    ++Count;
3183    // Parse the lane specifier if present.
3184    VectorLaneTy NextLaneKind;
3185    unsigned NextLaneIndex;
3186    SMLoc EndLoc = Parser.getTok().getLoc();
3187    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3188      return MatchOperand_ParseFail;
3189    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3190      Error(EndLoc, "mismatched lane index in register list");
3191      return MatchOperand_ParseFail;
3192    }
3193  }
3194
3195  SMLoc E = Parser.getTok().getLoc();
3196  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3197    Error(E, "'}' expected");
3198    return MatchOperand_ParseFail;
3199  }
3200  Parser.Lex(); // Eat '}' token.
3201
3202  switch (LaneKind) {
3203  case NoLanes:
3204    // Two-register operands have been converted to the
3205    // composite register classes.
3206    if (Count == 2) {
3207      const MCRegisterClass *RC = (Spacing == 1) ?
3208        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3209        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3210      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3211    }
3212
3213    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3214                                                    (Spacing == 2), S, E));
3215    break;
3216  case AllLanes:
3217    // Two-register operands have been converted to the
3218    // composite register classes.
3219    if (Count == 2) {
3220      const MCRegisterClass *RC = (Spacing == 1) ?
3221        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3222        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3223      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3224    }
3225    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3226                                                            (Spacing == 2),
3227                                                            S, E));
3228    break;
3229  case IndexedLane:
3230    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3231                                                           LaneIndex,
3232                                                           (Spacing == 2),
3233                                                           S, E));
3234    break;
3235  }
3236  return MatchOperand_Success;
3237}
3238
3239/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3240ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3241parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3242  SMLoc S = Parser.getTok().getLoc();
3243  const AsmToken &Tok = Parser.getTok();
3244  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3245  StringRef OptStr = Tok.getString();
3246
3247  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3248    .Case("sy",    ARM_MB::SY)
3249    .Case("st",    ARM_MB::ST)
3250    .Case("sh",    ARM_MB::ISH)
3251    .Case("ish",   ARM_MB::ISH)
3252    .Case("shst",  ARM_MB::ISHST)
3253    .Case("ishst", ARM_MB::ISHST)
3254    .Case("nsh",   ARM_MB::NSH)
3255    .Case("un",    ARM_MB::NSH)
3256    .Case("nshst", ARM_MB::NSHST)
3257    .Case("unst",  ARM_MB::NSHST)
3258    .Case("osh",   ARM_MB::OSH)
3259    .Case("oshst", ARM_MB::OSHST)
3260    .Default(~0U);
3261
3262  if (Opt == ~0U)
3263    return MatchOperand_NoMatch;
3264
3265  Parser.Lex(); // Eat identifier token.
3266  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3267  return MatchOperand_Success;
3268}
3269
3270/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3271ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3272parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3273  SMLoc S = Parser.getTok().getLoc();
3274  const AsmToken &Tok = Parser.getTok();
3275  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3276  StringRef IFlagsStr = Tok.getString();
3277
3278  // An iflags string of "none" is interpreted to mean that none of the AIF
3279  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3280  unsigned IFlags = 0;
3281  if (IFlagsStr != "none") {
3282        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3283      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3284        .Case("a", ARM_PROC::A)
3285        .Case("i", ARM_PROC::I)
3286        .Case("f", ARM_PROC::F)
3287        .Default(~0U);
3288
3289      // If some specific iflag is already set, it means that some letter is
3290      // present more than once, this is not acceptable.
3291      if (Flag == ~0U || (IFlags & Flag))
3292        return MatchOperand_NoMatch;
3293
3294      IFlags |= Flag;
3295    }
3296  }
3297
3298  Parser.Lex(); // Eat identifier token.
3299  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3300  return MatchOperand_Success;
3301}
3302
3303/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3304ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3305parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3306  SMLoc S = Parser.getTok().getLoc();
3307  const AsmToken &Tok = Parser.getTok();
3308  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3309  StringRef Mask = Tok.getString();
3310
3311  if (isMClass()) {
3312    // See ARMv6-M 10.1.1
3313    std::string Name = Mask.lower();
3314    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3315      .Case("apsr", 0)
3316      .Case("iapsr", 1)
3317      .Case("eapsr", 2)
3318      .Case("xpsr", 3)
3319      .Case("ipsr", 5)
3320      .Case("epsr", 6)
3321      .Case("iepsr", 7)
3322      .Case("msp", 8)
3323      .Case("psp", 9)
3324      .Case("primask", 16)
3325      .Case("basepri", 17)
3326      .Case("basepri_max", 18)
3327      .Case("faultmask", 19)
3328      .Case("control", 20)
3329      .Default(~0U);
3330
3331    if (FlagsVal == ~0U)
3332      return MatchOperand_NoMatch;
3333
3334    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3335      // basepri, basepri_max and faultmask only valid for V7m.
3336      return MatchOperand_NoMatch;
3337
3338    Parser.Lex(); // Eat identifier token.
3339    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3340    return MatchOperand_Success;
3341  }
3342
3343  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3344  size_t Start = 0, Next = Mask.find('_');
3345  StringRef Flags = "";
3346  std::string SpecReg = Mask.slice(Start, Next).lower();
3347  if (Next != StringRef::npos)
3348    Flags = Mask.slice(Next+1, Mask.size());
3349
3350  // FlagsVal contains the complete mask:
3351  // 3-0: Mask
3352  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3353  unsigned FlagsVal = 0;
3354
3355  if (SpecReg == "apsr") {
3356    FlagsVal = StringSwitch<unsigned>(Flags)
3357    .Case("nzcvq",  0x8) // same as CPSR_f
3358    .Case("g",      0x4) // same as CPSR_s
3359    .Case("nzcvqg", 0xc) // same as CPSR_fs
3360    .Default(~0U);
3361
3362    if (FlagsVal == ~0U) {
3363      if (!Flags.empty())
3364        return MatchOperand_NoMatch;
3365      else
3366        FlagsVal = 8; // No flag
3367    }
3368  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3369    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3370    if (Flags == "all" || Flags == "")
3371      Flags = "fc";
3372    for (int i = 0, e = Flags.size(); i != e; ++i) {
3373      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3374      .Case("c", 1)
3375      .Case("x", 2)
3376      .Case("s", 4)
3377      .Case("f", 8)
3378      .Default(~0U);
3379
3380      // If some specific flag is already set, it means that some letter is
3381      // present more than once, this is not acceptable.
3382      if (FlagsVal == ~0U || (FlagsVal & Flag))
3383        return MatchOperand_NoMatch;
3384      FlagsVal |= Flag;
3385    }
3386  } else // No match for special register.
3387    return MatchOperand_NoMatch;
3388
3389  // Special register without flags is NOT equivalent to "fc" flags.
3390  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3391  // two lines would enable gas compatibility at the expense of breaking
3392  // round-tripping.
3393  //
3394  // if (!FlagsVal)
3395  //  FlagsVal = 0x9;
3396
3397  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3398  if (SpecReg == "spsr")
3399    FlagsVal |= 16;
3400
3401  Parser.Lex(); // Eat identifier token.
3402  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3403  return MatchOperand_Success;
3404}
3405
3406ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3407parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3408            int Low, int High) {
3409  const AsmToken &Tok = Parser.getTok();
3410  if (Tok.isNot(AsmToken::Identifier)) {
3411    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3412    return MatchOperand_ParseFail;
3413  }
3414  StringRef ShiftName = Tok.getString();
3415  std::string LowerOp = Op.lower();
3416  std::string UpperOp = Op.upper();
3417  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3418    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3419    return MatchOperand_ParseFail;
3420  }
3421  Parser.Lex(); // Eat shift type token.
3422
3423  // There must be a '#' and a shift amount.
3424  if (Parser.getTok().isNot(AsmToken::Hash) &&
3425      Parser.getTok().isNot(AsmToken::Dollar)) {
3426    Error(Parser.getTok().getLoc(), "'#' expected");
3427    return MatchOperand_ParseFail;
3428  }
3429  Parser.Lex(); // Eat hash token.
3430
3431  const MCExpr *ShiftAmount;
3432  SMLoc Loc = Parser.getTok().getLoc();
3433  if (getParser().ParseExpression(ShiftAmount)) {
3434    Error(Loc, "illegal expression");
3435    return MatchOperand_ParseFail;
3436  }
3437  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3438  if (!CE) {
3439    Error(Loc, "constant expression expected");
3440    return MatchOperand_ParseFail;
3441  }
3442  int Val = CE->getValue();
3443  if (Val < Low || Val > High) {
3444    Error(Loc, "immediate value out of range");
3445    return MatchOperand_ParseFail;
3446  }
3447
3448  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3449
3450  return MatchOperand_Success;
3451}
3452
3453ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3454parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3455  const AsmToken &Tok = Parser.getTok();
3456  SMLoc S = Tok.getLoc();
3457  if (Tok.isNot(AsmToken::Identifier)) {
3458    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3459    return MatchOperand_ParseFail;
3460  }
3461  int Val = StringSwitch<int>(Tok.getString())
3462    .Case("be", 1)
3463    .Case("le", 0)
3464    .Default(-1);
3465  Parser.Lex(); // Eat the token.
3466
3467  if (Val == -1) {
3468    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3469    return MatchOperand_ParseFail;
3470  }
3471  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3472                                                                  getContext()),
3473                                           S, Parser.getTok().getLoc()));
3474  return MatchOperand_Success;
3475}
3476
3477/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3478/// instructions. Legal values are:
3479///     lsl #n  'n' in [0,31]
3480///     asr #n  'n' in [1,32]
3481///             n == 32 encoded as n == 0.
3482ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3483parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3484  const AsmToken &Tok = Parser.getTok();
3485  SMLoc S = Tok.getLoc();
3486  if (Tok.isNot(AsmToken::Identifier)) {
3487    Error(S, "shift operator 'asr' or 'lsl' expected");
3488    return MatchOperand_ParseFail;
3489  }
3490  StringRef ShiftName = Tok.getString();
3491  bool isASR;
3492  if (ShiftName == "lsl" || ShiftName == "LSL")
3493    isASR = false;
3494  else if (ShiftName == "asr" || ShiftName == "ASR")
3495    isASR = true;
3496  else {
3497    Error(S, "shift operator 'asr' or 'lsl' expected");
3498    return MatchOperand_ParseFail;
3499  }
3500  Parser.Lex(); // Eat the operator.
3501
3502  // A '#' and a shift amount.
3503  if (Parser.getTok().isNot(AsmToken::Hash) &&
3504      Parser.getTok().isNot(AsmToken::Dollar)) {
3505    Error(Parser.getTok().getLoc(), "'#' expected");
3506    return MatchOperand_ParseFail;
3507  }
3508  Parser.Lex(); // Eat hash token.
3509
3510  const MCExpr *ShiftAmount;
3511  SMLoc E = Parser.getTok().getLoc();
3512  if (getParser().ParseExpression(ShiftAmount)) {
3513    Error(E, "malformed shift expression");
3514    return MatchOperand_ParseFail;
3515  }
3516  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3517  if (!CE) {
3518    Error(E, "shift amount must be an immediate");
3519    return MatchOperand_ParseFail;
3520  }
3521
3522  int64_t Val = CE->getValue();
3523  if (isASR) {
3524    // Shift amount must be in [1,32]
3525    if (Val < 1 || Val > 32) {
3526      Error(E, "'asr' shift amount must be in range [1,32]");
3527      return MatchOperand_ParseFail;
3528    }
3529    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3530    if (isThumb() && Val == 32) {
3531      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3532      return MatchOperand_ParseFail;
3533    }
3534    if (Val == 32) Val = 0;
3535  } else {
3536    // Shift amount must be in [1,32]
3537    if (Val < 0 || Val > 31) {
3538      Error(E, "'lsr' shift amount must be in range [0,31]");
3539      return MatchOperand_ParseFail;
3540    }
3541  }
3542
3543  E = Parser.getTok().getLoc();
3544  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3545
3546  return MatchOperand_Success;
3547}
3548
3549/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3550/// of instructions. Legal values are:
3551///     ror #n  'n' in {0, 8, 16, 24}
3552ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3553parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3554  const AsmToken &Tok = Parser.getTok();
3555  SMLoc S = Tok.getLoc();
3556  if (Tok.isNot(AsmToken::Identifier))
3557    return MatchOperand_NoMatch;
3558  StringRef ShiftName = Tok.getString();
3559  if (ShiftName != "ror" && ShiftName != "ROR")
3560    return MatchOperand_NoMatch;
3561  Parser.Lex(); // Eat the operator.
3562
3563  // A '#' and a rotate amount.
3564  if (Parser.getTok().isNot(AsmToken::Hash) &&
3565      Parser.getTok().isNot(AsmToken::Dollar)) {
3566    Error(Parser.getTok().getLoc(), "'#' expected");
3567    return MatchOperand_ParseFail;
3568  }
3569  Parser.Lex(); // Eat hash token.
3570
3571  const MCExpr *ShiftAmount;
3572  SMLoc E = Parser.getTok().getLoc();
3573  if (getParser().ParseExpression(ShiftAmount)) {
3574    Error(E, "malformed rotate expression");
3575    return MatchOperand_ParseFail;
3576  }
3577  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3578  if (!CE) {
3579    Error(E, "rotate amount must be an immediate");
3580    return MatchOperand_ParseFail;
3581  }
3582
3583  int64_t Val = CE->getValue();
3584  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3585  // normally, zero is represented in asm by omitting the rotate operand
3586  // entirely.
3587  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3588    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3589    return MatchOperand_ParseFail;
3590  }
3591
3592  E = Parser.getTok().getLoc();
3593  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3594
3595  return MatchOperand_Success;
3596}
3597
3598ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3599parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3600  SMLoc S = Parser.getTok().getLoc();
3601  // The bitfield descriptor is really two operands, the LSB and the width.
3602  if (Parser.getTok().isNot(AsmToken::Hash) &&
3603      Parser.getTok().isNot(AsmToken::Dollar)) {
3604    Error(Parser.getTok().getLoc(), "'#' expected");
3605    return MatchOperand_ParseFail;
3606  }
3607  Parser.Lex(); // Eat hash token.
3608
3609  const MCExpr *LSBExpr;
3610  SMLoc E = Parser.getTok().getLoc();
3611  if (getParser().ParseExpression(LSBExpr)) {
3612    Error(E, "malformed immediate expression");
3613    return MatchOperand_ParseFail;
3614  }
3615  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3616  if (!CE) {
3617    Error(E, "'lsb' operand must be an immediate");
3618    return MatchOperand_ParseFail;
3619  }
3620
3621  int64_t LSB = CE->getValue();
3622  // The LSB must be in the range [0,31]
3623  if (LSB < 0 || LSB > 31) {
3624    Error(E, "'lsb' operand must be in the range [0,31]");
3625    return MatchOperand_ParseFail;
3626  }
3627  E = Parser.getTok().getLoc();
3628
3629  // Expect another immediate operand.
3630  if (Parser.getTok().isNot(AsmToken::Comma)) {
3631    Error(Parser.getTok().getLoc(), "too few operands");
3632    return MatchOperand_ParseFail;
3633  }
3634  Parser.Lex(); // Eat hash token.
3635  if (Parser.getTok().isNot(AsmToken::Hash) &&
3636      Parser.getTok().isNot(AsmToken::Dollar)) {
3637    Error(Parser.getTok().getLoc(), "'#' expected");
3638    return MatchOperand_ParseFail;
3639  }
3640  Parser.Lex(); // Eat hash token.
3641
3642  const MCExpr *WidthExpr;
3643  if (getParser().ParseExpression(WidthExpr)) {
3644    Error(E, "malformed immediate expression");
3645    return MatchOperand_ParseFail;
3646  }
3647  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3648  if (!CE) {
3649    Error(E, "'width' operand must be an immediate");
3650    return MatchOperand_ParseFail;
3651  }
3652
3653  int64_t Width = CE->getValue();
3654  // The LSB must be in the range [1,32-lsb]
3655  if (Width < 1 || Width > 32 - LSB) {
3656    Error(E, "'width' operand must be in the range [1,32-lsb]");
3657    return MatchOperand_ParseFail;
3658  }
3659  E = Parser.getTok().getLoc();
3660
3661  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3662
3663  return MatchOperand_Success;
3664}
3665
3666ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3667parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3668  // Check for a post-index addressing register operand. Specifically:
3669  // postidx_reg := '+' register {, shift}
3670  //              | '-' register {, shift}
3671  //              | register {, shift}
3672
3673  // This method must return MatchOperand_NoMatch without consuming any tokens
3674  // in the case where there is no match, as other alternatives take other
3675  // parse methods.
3676  AsmToken Tok = Parser.getTok();
3677  SMLoc S = Tok.getLoc();
3678  bool haveEaten = false;
3679  bool isAdd = true;
3680  int Reg = -1;
3681  if (Tok.is(AsmToken::Plus)) {
3682    Parser.Lex(); // Eat the '+' token.
3683    haveEaten = true;
3684  } else if (Tok.is(AsmToken::Minus)) {
3685    Parser.Lex(); // Eat the '-' token.
3686    isAdd = false;
3687    haveEaten = true;
3688  }
3689  if (Parser.getTok().is(AsmToken::Identifier))
3690    Reg = tryParseRegister();
3691  if (Reg == -1) {
3692    if (!haveEaten)
3693      return MatchOperand_NoMatch;
3694    Error(Parser.getTok().getLoc(), "register expected");
3695    return MatchOperand_ParseFail;
3696  }
3697  SMLoc E = Parser.getTok().getLoc();
3698
3699  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3700  unsigned ShiftImm = 0;
3701  if (Parser.getTok().is(AsmToken::Comma)) {
3702    Parser.Lex(); // Eat the ','.
3703    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3704      return MatchOperand_ParseFail;
3705  }
3706
3707  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3708                                                  ShiftImm, S, E));
3709
3710  return MatchOperand_Success;
3711}
3712
3713ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3714parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3715  // Check for a post-index addressing register operand. Specifically:
3716  // am3offset := '+' register
3717  //              | '-' register
3718  //              | register
3719  //              | # imm
3720  //              | # + imm
3721  //              | # - imm
3722
3723  // This method must return MatchOperand_NoMatch without consuming any tokens
3724  // in the case where there is no match, as other alternatives take other
3725  // parse methods.
3726  AsmToken Tok = Parser.getTok();
3727  SMLoc S = Tok.getLoc();
3728
3729  // Do immediates first, as we always parse those if we have a '#'.
3730  if (Parser.getTok().is(AsmToken::Hash) ||
3731      Parser.getTok().is(AsmToken::Dollar)) {
3732    Parser.Lex(); // Eat the '#'.
3733    // Explicitly look for a '-', as we need to encode negative zero
3734    // differently.
3735    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3736    const MCExpr *Offset;
3737    if (getParser().ParseExpression(Offset))
3738      return MatchOperand_ParseFail;
3739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3740    if (!CE) {
3741      Error(S, "constant expression expected");
3742      return MatchOperand_ParseFail;
3743    }
3744    SMLoc E = Tok.getLoc();
3745    // Negative zero is encoded as the flag value INT32_MIN.
3746    int32_t Val = CE->getValue();
3747    if (isNegative && Val == 0)
3748      Val = INT32_MIN;
3749
3750    Operands.push_back(
3751      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3752
3753    return MatchOperand_Success;
3754  }
3755
3756
3757  bool haveEaten = false;
3758  bool isAdd = true;
3759  int Reg = -1;
3760  if (Tok.is(AsmToken::Plus)) {
3761    Parser.Lex(); // Eat the '+' token.
3762    haveEaten = true;
3763  } else if (Tok.is(AsmToken::Minus)) {
3764    Parser.Lex(); // Eat the '-' token.
3765    isAdd = false;
3766    haveEaten = true;
3767  }
3768  if (Parser.getTok().is(AsmToken::Identifier))
3769    Reg = tryParseRegister();
3770  if (Reg == -1) {
3771    if (!haveEaten)
3772      return MatchOperand_NoMatch;
3773    Error(Parser.getTok().getLoc(), "register expected");
3774    return MatchOperand_ParseFail;
3775  }
3776  SMLoc E = Parser.getTok().getLoc();
3777
3778  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3779                                                  0, S, E));
3780
3781  return MatchOperand_Success;
3782}
3783
3784/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3785/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3786/// when they refer multiple MIOperands inside a single one.
3787bool ARMAsmParser::
3788cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3789             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3790  // Rt, Rt2
3791  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3792  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3793  // Create a writeback register dummy placeholder.
3794  Inst.addOperand(MCOperand::CreateReg(0));
3795  // addr
3796  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3797  // pred
3798  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3799  return true;
3800}
3801
3802/// cvtT2StrdPre - Convert parsed operands to MCInst.
3803/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3804/// when they refer multiple MIOperands inside a single one.
3805bool ARMAsmParser::
3806cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3807             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3808  // Create a writeback register dummy placeholder.
3809  Inst.addOperand(MCOperand::CreateReg(0));
3810  // Rt, Rt2
3811  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3812  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3813  // addr
3814  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3815  // pred
3816  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3817  return true;
3818}
3819
3820/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3821/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3822/// when they refer multiple MIOperands inside a single one.
3823bool ARMAsmParser::
3824cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3825                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3826  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3827
3828  // Create a writeback register dummy placeholder.
3829  Inst.addOperand(MCOperand::CreateImm(0));
3830
3831  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3832  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3833  return true;
3834}
3835
3836/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3837/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3838/// when they refer multiple MIOperands inside a single one.
3839bool ARMAsmParser::
3840cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3841                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3842  // Create a writeback register dummy placeholder.
3843  Inst.addOperand(MCOperand::CreateImm(0));
3844  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3845  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3846  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3847  return true;
3848}
3849
3850/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3851/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3852/// when they refer multiple MIOperands inside a single one.
3853bool ARMAsmParser::
3854cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3855                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3856  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3857
3858  // Create a writeback register dummy placeholder.
3859  Inst.addOperand(MCOperand::CreateImm(0));
3860
3861  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3862  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3863  return true;
3864}
3865
3866/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3867/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3868/// when they refer multiple MIOperands inside a single one.
3869bool ARMAsmParser::
3870cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3871                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3872  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3873
3874  // Create a writeback register dummy placeholder.
3875  Inst.addOperand(MCOperand::CreateImm(0));
3876
3877  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882
3883/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3884/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3885/// when they refer multiple MIOperands inside a single one.
3886bool ARMAsmParser::
3887cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3888                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3889  // Create a writeback register dummy placeholder.
3890  Inst.addOperand(MCOperand::CreateImm(0));
3891  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3892  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3893  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3894  return true;
3895}
3896
3897/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3898/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3899/// when they refer multiple MIOperands inside a single one.
3900bool ARMAsmParser::
3901cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3902                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3903  // Create a writeback register dummy placeholder.
3904  Inst.addOperand(MCOperand::CreateImm(0));
3905  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3906  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3907  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3908  return true;
3909}
3910
3911/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3912/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3913/// when they refer multiple MIOperands inside a single one.
3914bool ARMAsmParser::
3915cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3916                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3917  // Create a writeback register dummy placeholder.
3918  Inst.addOperand(MCOperand::CreateImm(0));
3919  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3920  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3921  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3922  return true;
3923}
3924
3925/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3926/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3927/// when they refer multiple MIOperands inside a single one.
3928bool ARMAsmParser::
3929cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3930                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3931  // Rt
3932  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3933  // Create a writeback register dummy placeholder.
3934  Inst.addOperand(MCOperand::CreateImm(0));
3935  // addr
3936  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3937  // offset
3938  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3939  // pred
3940  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3941  return true;
3942}
3943
3944/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3945/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3946/// when they refer multiple MIOperands inside a single one.
3947bool ARMAsmParser::
3948cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3949                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3950  // Rt
3951  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3952  // Create a writeback register dummy placeholder.
3953  Inst.addOperand(MCOperand::CreateImm(0));
3954  // addr
3955  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3956  // offset
3957  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3958  // pred
3959  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3960  return true;
3961}
3962
3963/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3964/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3965/// when they refer multiple MIOperands inside a single one.
3966bool ARMAsmParser::
3967cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3968                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3969  // Create a writeback register dummy placeholder.
3970  Inst.addOperand(MCOperand::CreateImm(0));
3971  // Rt
3972  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3973  // addr
3974  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3975  // offset
3976  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3977  // pred
3978  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3979  return true;
3980}
3981
3982/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3983/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3984/// when they refer multiple MIOperands inside a single one.
3985bool ARMAsmParser::
3986cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3987                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3988  // Create a writeback register dummy placeholder.
3989  Inst.addOperand(MCOperand::CreateImm(0));
3990  // Rt
3991  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3992  // addr
3993  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3994  // offset
3995  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3996  // pred
3997  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3998  return true;
3999}
4000
4001/// cvtLdrdPre - Convert parsed operands to MCInst.
4002/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4003/// when they refer multiple MIOperands inside a single one.
4004bool ARMAsmParser::
4005cvtLdrdPre(MCInst &Inst, unsigned Opcode,
4006           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4007  // Rt, Rt2
4008  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4009  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4010  // Create a writeback register dummy placeholder.
4011  Inst.addOperand(MCOperand::CreateImm(0));
4012  // addr
4013  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4014  // pred
4015  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4016  return true;
4017}
4018
4019/// cvtStrdPre - Convert parsed operands to MCInst.
4020/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4021/// when they refer multiple MIOperands inside a single one.
4022bool ARMAsmParser::
4023cvtStrdPre(MCInst &Inst, unsigned Opcode,
4024           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4025  // Create a writeback register dummy placeholder.
4026  Inst.addOperand(MCOperand::CreateImm(0));
4027  // Rt, Rt2
4028  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4029  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4030  // addr
4031  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4032  // pred
4033  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4034  return true;
4035}
4036
4037/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4038/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4039/// when they refer multiple MIOperands inside a single one.
4040bool ARMAsmParser::
4041cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4042                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4043  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4044  // Create a writeback register dummy placeholder.
4045  Inst.addOperand(MCOperand::CreateImm(0));
4046  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4047  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4048  return true;
4049}
4050
4051/// cvtThumbMultiple- Convert parsed operands to MCInst.
4052/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4053/// when they refer multiple MIOperands inside a single one.
4054bool ARMAsmParser::
4055cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4056           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4057  // The second source operand must be the same register as the destination
4058  // operand.
4059  if (Operands.size() == 6 &&
4060      (((ARMOperand*)Operands[3])->getReg() !=
4061       ((ARMOperand*)Operands[5])->getReg()) &&
4062      (((ARMOperand*)Operands[3])->getReg() !=
4063       ((ARMOperand*)Operands[4])->getReg())) {
4064    Error(Operands[3]->getStartLoc(),
4065          "destination register must match source register");
4066    return false;
4067  }
4068  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4069  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4070  // If we have a three-operand form, make sure to set Rn to be the operand
4071  // that isn't the same as Rd.
4072  unsigned RegOp = 4;
4073  if (Operands.size() == 6 &&
4074      ((ARMOperand*)Operands[4])->getReg() ==
4075        ((ARMOperand*)Operands[3])->getReg())
4076    RegOp = 5;
4077  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4078  Inst.addOperand(Inst.getOperand(0));
4079  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4080
4081  return true;
4082}
4083
4084bool ARMAsmParser::
4085cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4086              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4087  // Vd
4088  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4089  // Create a writeback register dummy placeholder.
4090  Inst.addOperand(MCOperand::CreateImm(0));
4091  // Vn
4092  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4093  // pred
4094  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4095  return true;
4096}
4097
4098bool ARMAsmParser::
4099cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4100                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4101  // Vd
4102  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4103  // Create a writeback register dummy placeholder.
4104  Inst.addOperand(MCOperand::CreateImm(0));
4105  // Vn
4106  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4107  // Vm
4108  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4109  // pred
4110  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4111  return true;
4112}
4113
4114bool ARMAsmParser::
4115cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4116              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4117  // Create a writeback register dummy placeholder.
4118  Inst.addOperand(MCOperand::CreateImm(0));
4119  // Vn
4120  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4121  // Vt
4122  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4123  // pred
4124  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4125  return true;
4126}
4127
4128bool ARMAsmParser::
4129cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4130                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4131  // Create a writeback register dummy placeholder.
4132  Inst.addOperand(MCOperand::CreateImm(0));
4133  // Vn
4134  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4135  // Vm
4136  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4137  // Vt
4138  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4139  // pred
4140  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4141  return true;
4142}
4143
4144/// Parse an ARM memory expression, return false if successful else return true
4145/// or an error.  The first token must be a '[' when called.
4146bool ARMAsmParser::
4147parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4148  SMLoc S, E;
4149  assert(Parser.getTok().is(AsmToken::LBrac) &&
4150         "Token is not a Left Bracket");
4151  S = Parser.getTok().getLoc();
4152  Parser.Lex(); // Eat left bracket token.
4153
4154  const AsmToken &BaseRegTok = Parser.getTok();
4155  int BaseRegNum = tryParseRegister();
4156  if (BaseRegNum == -1)
4157    return Error(BaseRegTok.getLoc(), "register expected");
4158
4159  // The next token must either be a comma or a closing bracket.
4160  const AsmToken &Tok = Parser.getTok();
4161  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4162    return Error(Tok.getLoc(), "malformed memory operand");
4163
4164  if (Tok.is(AsmToken::RBrac)) {
4165    E = Tok.getLoc();
4166    Parser.Lex(); // Eat right bracket token.
4167
4168    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4169                                             0, 0, false, S, E));
4170
4171    // If there's a pre-indexing writeback marker, '!', just add it as a token
4172    // operand. It's rather odd, but syntactically valid.
4173    if (Parser.getTok().is(AsmToken::Exclaim)) {
4174      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4175      Parser.Lex(); // Eat the '!'.
4176    }
4177
4178    return false;
4179  }
4180
4181  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4182  Parser.Lex(); // Eat the comma.
4183
4184  // If we have a ':', it's an alignment specifier.
4185  if (Parser.getTok().is(AsmToken::Colon)) {
4186    Parser.Lex(); // Eat the ':'.
4187    E = Parser.getTok().getLoc();
4188
4189    const MCExpr *Expr;
4190    if (getParser().ParseExpression(Expr))
4191     return true;
4192
4193    // The expression has to be a constant. Memory references with relocations
4194    // don't come through here, as they use the <label> forms of the relevant
4195    // instructions.
4196    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4197    if (!CE)
4198      return Error (E, "constant expression expected");
4199
4200    unsigned Align = 0;
4201    switch (CE->getValue()) {
4202    default:
4203      return Error(E,
4204                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4205    case 16:  Align = 2; break;
4206    case 32:  Align = 4; break;
4207    case 64:  Align = 8; break;
4208    case 128: Align = 16; break;
4209    case 256: Align = 32; break;
4210    }
4211
4212    // Now we should have the closing ']'
4213    E = Parser.getTok().getLoc();
4214    if (Parser.getTok().isNot(AsmToken::RBrac))
4215      return Error(E, "']' expected");
4216    Parser.Lex(); // Eat right bracket token.
4217
4218    // Don't worry about range checking the value here. That's handled by
4219    // the is*() predicates.
4220    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4221                                             ARM_AM::no_shift, 0, Align,
4222                                             false, S, E));
4223
4224    // If there's a pre-indexing writeback marker, '!', just add it as a token
4225    // operand.
4226    if (Parser.getTok().is(AsmToken::Exclaim)) {
4227      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4228      Parser.Lex(); // Eat the '!'.
4229    }
4230
4231    return false;
4232  }
4233
4234  // If we have a '#', it's an immediate offset, else assume it's a register
4235  // offset. Be friendly and also accept a plain integer (without a leading
4236  // hash) for gas compatibility.
4237  if (Parser.getTok().is(AsmToken::Hash) ||
4238      Parser.getTok().is(AsmToken::Dollar) ||
4239      Parser.getTok().is(AsmToken::Integer)) {
4240    if (Parser.getTok().isNot(AsmToken::Integer))
4241      Parser.Lex(); // Eat the '#'.
4242    E = Parser.getTok().getLoc();
4243
4244    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4245    const MCExpr *Offset;
4246    if (getParser().ParseExpression(Offset))
4247     return true;
4248
4249    // The expression has to be a constant. Memory references with relocations
4250    // don't come through here, as they use the <label> forms of the relevant
4251    // instructions.
4252    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4253    if (!CE)
4254      return Error (E, "constant expression expected");
4255
4256    // If the constant was #-0, represent it as INT32_MIN.
4257    int32_t Val = CE->getValue();
4258    if (isNegative && Val == 0)
4259      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4260
4261    // Now we should have the closing ']'
4262    E = Parser.getTok().getLoc();
4263    if (Parser.getTok().isNot(AsmToken::RBrac))
4264      return Error(E, "']' expected");
4265    Parser.Lex(); // Eat right bracket token.
4266
4267    // Don't worry about range checking the value here. That's handled by
4268    // the is*() predicates.
4269    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4270                                             ARM_AM::no_shift, 0, 0,
4271                                             false, S, E));
4272
4273    // If there's a pre-indexing writeback marker, '!', just add it as a token
4274    // operand.
4275    if (Parser.getTok().is(AsmToken::Exclaim)) {
4276      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4277      Parser.Lex(); // Eat the '!'.
4278    }
4279
4280    return false;
4281  }
4282
4283  // The register offset is optionally preceded by a '+' or '-'
4284  bool isNegative = false;
4285  if (Parser.getTok().is(AsmToken::Minus)) {
4286    isNegative = true;
4287    Parser.Lex(); // Eat the '-'.
4288  } else if (Parser.getTok().is(AsmToken::Plus)) {
4289    // Nothing to do.
4290    Parser.Lex(); // Eat the '+'.
4291  }
4292
4293  E = Parser.getTok().getLoc();
4294  int OffsetRegNum = tryParseRegister();
4295  if (OffsetRegNum == -1)
4296    return Error(E, "register expected");
4297
4298  // If there's a shift operator, handle it.
4299  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4300  unsigned ShiftImm = 0;
4301  if (Parser.getTok().is(AsmToken::Comma)) {
4302    Parser.Lex(); // Eat the ','.
4303    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4304      return true;
4305  }
4306
4307  // Now we should have the closing ']'
4308  E = Parser.getTok().getLoc();
4309  if (Parser.getTok().isNot(AsmToken::RBrac))
4310    return Error(E, "']' expected");
4311  Parser.Lex(); // Eat right bracket token.
4312
4313  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4314                                           ShiftType, ShiftImm, 0, isNegative,
4315                                           S, E));
4316
4317  // If there's a pre-indexing writeback marker, '!', just add it as a token
4318  // operand.
4319  if (Parser.getTok().is(AsmToken::Exclaim)) {
4320    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4321    Parser.Lex(); // Eat the '!'.
4322  }
4323
4324  return false;
4325}
4326
4327/// parseMemRegOffsetShift - one of these two:
4328///   ( lsl | lsr | asr | ror ) , # shift_amount
4329///   rrx
4330/// return true if it parses a shift otherwise it returns false.
4331bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4332                                          unsigned &Amount) {
4333  SMLoc Loc = Parser.getTok().getLoc();
4334  const AsmToken &Tok = Parser.getTok();
4335  if (Tok.isNot(AsmToken::Identifier))
4336    return true;
4337  StringRef ShiftName = Tok.getString();
4338  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4339      ShiftName == "asl" || ShiftName == "ASL")
4340    St = ARM_AM::lsl;
4341  else if (ShiftName == "lsr" || ShiftName == "LSR")
4342    St = ARM_AM::lsr;
4343  else if (ShiftName == "asr" || ShiftName == "ASR")
4344    St = ARM_AM::asr;
4345  else if (ShiftName == "ror" || ShiftName == "ROR")
4346    St = ARM_AM::ror;
4347  else if (ShiftName == "rrx" || ShiftName == "RRX")
4348    St = ARM_AM::rrx;
4349  else
4350    return Error(Loc, "illegal shift operator");
4351  Parser.Lex(); // Eat shift type token.
4352
4353  // rrx stands alone.
4354  Amount = 0;
4355  if (St != ARM_AM::rrx) {
4356    Loc = Parser.getTok().getLoc();
4357    // A '#' and a shift amount.
4358    const AsmToken &HashTok = Parser.getTok();
4359    if (HashTok.isNot(AsmToken::Hash) &&
4360        HashTok.isNot(AsmToken::Dollar))
4361      return Error(HashTok.getLoc(), "'#' expected");
4362    Parser.Lex(); // Eat hash token.
4363
4364    const MCExpr *Expr;
4365    if (getParser().ParseExpression(Expr))
4366      return true;
4367    // Range check the immediate.
4368    // lsl, ror: 0 <= imm <= 31
4369    // lsr, asr: 0 <= imm <= 32
4370    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4371    if (!CE)
4372      return Error(Loc, "shift amount must be an immediate");
4373    int64_t Imm = CE->getValue();
4374    if (Imm < 0 ||
4375        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4376        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4377      return Error(Loc, "immediate shift value out of range");
4378    Amount = Imm;
4379  }
4380
4381  return false;
4382}
4383
4384/// parseFPImm - A floating point immediate expression operand.
4385ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4386parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4387  // Anything that can accept a floating point constant as an operand
4388  // needs to go through here, as the regular ParseExpression is
4389  // integer only.
4390  //
4391  // This routine still creates a generic Immediate operand, containing
4392  // a bitcast of the 64-bit floating point value. The various operands
4393  // that accept floats can check whether the value is valid for them
4394  // via the standard is*() predicates.
4395
4396  SMLoc S = Parser.getTok().getLoc();
4397
4398  if (Parser.getTok().isNot(AsmToken::Hash) &&
4399      Parser.getTok().isNot(AsmToken::Dollar))
4400    return MatchOperand_NoMatch;
4401
4402  // Disambiguate the VMOV forms that can accept an FP immediate.
4403  // vmov.f32 <sreg>, #imm
4404  // vmov.f64 <dreg>, #imm
4405  // vmov.f32 <dreg>, #imm  @ vector f32x2
4406  // vmov.f32 <qreg>, #imm  @ vector f32x4
4407  //
4408  // There are also the NEON VMOV instructions which expect an
4409  // integer constant. Make sure we don't try to parse an FPImm
4410  // for these:
4411  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4412  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4413  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4414                           TyOp->getToken() != ".f64"))
4415    return MatchOperand_NoMatch;
4416
4417  Parser.Lex(); // Eat the '#'.
4418
4419  // Handle negation, as that still comes through as a separate token.
4420  bool isNegative = false;
4421  if (Parser.getTok().is(AsmToken::Minus)) {
4422    isNegative = true;
4423    Parser.Lex();
4424  }
4425  const AsmToken &Tok = Parser.getTok();
4426  SMLoc Loc = Tok.getLoc();
4427  if (Tok.is(AsmToken::Real)) {
4428    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4429    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4430    // If we had a '-' in front, toggle the sign bit.
4431    IntVal ^= (uint64_t)isNegative << 31;
4432    Parser.Lex(); // Eat the token.
4433    Operands.push_back(ARMOperand::CreateImm(
4434          MCConstantExpr::Create(IntVal, getContext()),
4435          S, Parser.getTok().getLoc()));
4436    return MatchOperand_Success;
4437  }
4438  // Also handle plain integers. Instructions which allow floating point
4439  // immediates also allow a raw encoded 8-bit value.
4440  if (Tok.is(AsmToken::Integer)) {
4441    int64_t Val = Tok.getIntVal();
4442    Parser.Lex(); // Eat the token.
4443    if (Val > 255 || Val < 0) {
4444      Error(Loc, "encoded floating point value out of range");
4445      return MatchOperand_ParseFail;
4446    }
4447    double RealVal = ARM_AM::getFPImmFloat(Val);
4448    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4449    Operands.push_back(ARMOperand::CreateImm(
4450        MCConstantExpr::Create(Val, getContext()), S,
4451        Parser.getTok().getLoc()));
4452    return MatchOperand_Success;
4453  }
4454
4455  Error(Loc, "invalid floating point immediate");
4456  return MatchOperand_ParseFail;
4457}
4458
4459/// Parse a arm instruction operand.  For now this parses the operand regardless
4460/// of the mnemonic.
4461bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4462                                StringRef Mnemonic) {
4463  SMLoc S, E;
4464
4465  // Check if the current operand has a custom associated parser, if so, try to
4466  // custom parse the operand, or fallback to the general approach.
4467  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4468  if (ResTy == MatchOperand_Success)
4469    return false;
4470  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4471  // there was a match, but an error occurred, in which case, just return that
4472  // the operand parsing failed.
4473  if (ResTy == MatchOperand_ParseFail)
4474    return true;
4475
4476  switch (getLexer().getKind()) {
4477  default:
4478    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4479    return true;
4480  case AsmToken::Identifier: {
4481    if (!tryParseRegisterWithWriteBack(Operands))
4482      return false;
4483    int Res = tryParseShiftRegister(Operands);
4484    if (Res == 0) // success
4485      return false;
4486    else if (Res == -1) // irrecoverable error
4487      return true;
4488    // If this is VMRS, check for the apsr_nzcv operand.
4489    if (Mnemonic == "vmrs" &&
4490        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4491      S = Parser.getTok().getLoc();
4492      Parser.Lex();
4493      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4494      return false;
4495    }
4496
4497    // Fall though for the Identifier case that is not a register or a
4498    // special name.
4499  }
4500  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4501  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4502  case AsmToken::String:  // quoted label names.
4503  case AsmToken::Dot: {   // . as a branch target
4504    // This was not a register so parse other operands that start with an
4505    // identifier (like labels) as expressions and create them as immediates.
4506    const MCExpr *IdVal;
4507    S = Parser.getTok().getLoc();
4508    if (getParser().ParseExpression(IdVal))
4509      return true;
4510    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4511    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4512    return false;
4513  }
4514  case AsmToken::LBrac:
4515    return parseMemory(Operands);
4516  case AsmToken::LCurly:
4517    return parseRegisterList(Operands);
4518  case AsmToken::Dollar:
4519  case AsmToken::Hash: {
4520    // #42 -> immediate.
4521    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4522    S = Parser.getTok().getLoc();
4523    Parser.Lex();
4524    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4525    const MCExpr *ImmVal;
4526    if (getParser().ParseExpression(ImmVal))
4527      return true;
4528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4529    if (CE) {
4530      int32_t Val = CE->getValue();
4531      if (isNegative && Val == 0)
4532        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4533    }
4534    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4535    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4536    return false;
4537  }
4538  case AsmToken::Colon: {
4539    // ":lower16:" and ":upper16:" expression prefixes
4540    // FIXME: Check it's an expression prefix,
4541    // e.g. (FOO - :lower16:BAR) isn't legal.
4542    ARMMCExpr::VariantKind RefKind;
4543    if (parsePrefix(RefKind))
4544      return true;
4545
4546    const MCExpr *SubExprVal;
4547    if (getParser().ParseExpression(SubExprVal))
4548      return true;
4549
4550    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4551                                                   getContext());
4552    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4553    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4554    return false;
4555  }
4556  }
4557}
4558
4559// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4560//  :lower16: and :upper16:.
4561bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4562  RefKind = ARMMCExpr::VK_ARM_None;
4563
4564  // :lower16: and :upper16: modifiers
4565  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4566  Parser.Lex(); // Eat ':'
4567
4568  if (getLexer().isNot(AsmToken::Identifier)) {
4569    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4570    return true;
4571  }
4572
4573  StringRef IDVal = Parser.getTok().getIdentifier();
4574  if (IDVal == "lower16") {
4575    RefKind = ARMMCExpr::VK_ARM_LO16;
4576  } else if (IDVal == "upper16") {
4577    RefKind = ARMMCExpr::VK_ARM_HI16;
4578  } else {
4579    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4580    return true;
4581  }
4582  Parser.Lex();
4583
4584  if (getLexer().isNot(AsmToken::Colon)) {
4585    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4586    return true;
4587  }
4588  Parser.Lex(); // Eat the last ':'
4589  return false;
4590}
4591
4592/// \brief Given a mnemonic, split out possible predication code and carry
4593/// setting letters to form a canonical mnemonic and flags.
4594//
4595// FIXME: Would be nice to autogen this.
4596// FIXME: This is a bit of a maze of special cases.
4597StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4598                                      unsigned &PredicationCode,
4599                                      bool &CarrySetting,
4600                                      unsigned &ProcessorIMod,
4601                                      StringRef &ITMask) {
4602  PredicationCode = ARMCC::AL;
4603  CarrySetting = false;
4604  ProcessorIMod = 0;
4605
4606  // Ignore some mnemonics we know aren't predicated forms.
4607  //
4608  // FIXME: Would be nice to autogen this.
4609  if ((Mnemonic == "movs" && isThumb()) ||
4610      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4611      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4612      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4613      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4614      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4615      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4616      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4617      Mnemonic == "fmuls")
4618    return Mnemonic;
4619
4620  // First, split out any predication code. Ignore mnemonics we know aren't
4621  // predicated but do have a carry-set and so weren't caught above.
4622  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4623      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4624      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4625      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4626    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4627      .Case("eq", ARMCC::EQ)
4628      .Case("ne", ARMCC::NE)
4629      .Case("hs", ARMCC::HS)
4630      .Case("cs", ARMCC::HS)
4631      .Case("lo", ARMCC::LO)
4632      .Case("cc", ARMCC::LO)
4633      .Case("mi", ARMCC::MI)
4634      .Case("pl", ARMCC::PL)
4635      .Case("vs", ARMCC::VS)
4636      .Case("vc", ARMCC::VC)
4637      .Case("hi", ARMCC::HI)
4638      .Case("ls", ARMCC::LS)
4639      .Case("ge", ARMCC::GE)
4640      .Case("lt", ARMCC::LT)
4641      .Case("gt", ARMCC::GT)
4642      .Case("le", ARMCC::LE)
4643      .Case("al", ARMCC::AL)
4644      .Default(~0U);
4645    if (CC != ~0U) {
4646      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4647      PredicationCode = CC;
4648    }
4649  }
4650
4651  // Next, determine if we have a carry setting bit. We explicitly ignore all
4652  // the instructions we know end in 's'.
4653  if (Mnemonic.endswith("s") &&
4654      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4655        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4656        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4657        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4658        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4659        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4660        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4661        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4662        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4663        (Mnemonic == "movs" && isThumb()))) {
4664    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4665    CarrySetting = true;
4666  }
4667
4668  // The "cps" instruction can have a interrupt mode operand which is glued into
4669  // the mnemonic. Check if this is the case, split it and parse the imod op
4670  if (Mnemonic.startswith("cps")) {
4671    // Split out any imod code.
4672    unsigned IMod =
4673      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4674      .Case("ie", ARM_PROC::IE)
4675      .Case("id", ARM_PROC::ID)
4676      .Default(~0U);
4677    if (IMod != ~0U) {
4678      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4679      ProcessorIMod = IMod;
4680    }
4681  }
4682
4683  // The "it" instruction has the condition mask on the end of the mnemonic.
4684  if (Mnemonic.startswith("it")) {
4685    ITMask = Mnemonic.slice(2, Mnemonic.size());
4686    Mnemonic = Mnemonic.slice(0, 2);
4687  }
4688
4689  return Mnemonic;
4690}
4691
4692/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4693/// inclusion of carry set or predication code operands.
4694//
4695// FIXME: It would be nice to autogen this.
4696void ARMAsmParser::
4697getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4698                      bool &CanAcceptPredicationCode) {
4699  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4700      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4701      Mnemonic == "add" || Mnemonic == "adc" ||
4702      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4703      Mnemonic == "orr" || Mnemonic == "mvn" ||
4704      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4705      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4706      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4707      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4708                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4709                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4710    CanAcceptCarrySet = true;
4711  } else
4712    CanAcceptCarrySet = false;
4713
4714  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4715      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4716      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4717      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4718      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4719      (Mnemonic == "clrex" && !isThumb()) ||
4720      (Mnemonic == "nop" && isThumbOne()) ||
4721      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4722        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4723        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4724      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4725       !isThumb()) ||
4726      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4727    CanAcceptPredicationCode = false;
4728  } else
4729    CanAcceptPredicationCode = true;
4730
4731  if (isThumb()) {
4732    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4733        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4734      CanAcceptPredicationCode = false;
4735  }
4736}
4737
4738bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4739                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4740  // FIXME: This is all horribly hacky. We really need a better way to deal
4741  // with optional operands like this in the matcher table.
4742
4743  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4744  // another does not. Specifically, the MOVW instruction does not. So we
4745  // special case it here and remove the defaulted (non-setting) cc_out
4746  // operand if that's the instruction we're trying to match.
4747  //
4748  // We do this as post-processing of the explicit operands rather than just
4749  // conditionally adding the cc_out in the first place because we need
4750  // to check the type of the parsed immediate operand.
4751  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4752      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4753      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4754      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4755    return true;
4756
4757  // Register-register 'add' for thumb does not have a cc_out operand
4758  // when there are only two register operands.
4759  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4760      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4761      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4762      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4763    return true;
4764  // Register-register 'add' for thumb does not have a cc_out operand
4765  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4766  // have to check the immediate range here since Thumb2 has a variant
4767  // that can handle a different range and has a cc_out operand.
4768  if (((isThumb() && Mnemonic == "add") ||
4769       (isThumbTwo() && Mnemonic == "sub")) &&
4770      Operands.size() == 6 &&
4771      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4772      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4773      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4774      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4775      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4776       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4777    return true;
4778  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4779  // imm0_4095 variant. That's the least-preferred variant when
4780  // selecting via the generic "add" mnemonic, so to know that we
4781  // should remove the cc_out operand, we have to explicitly check that
4782  // it's not one of the other variants. Ugh.
4783  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4784      Operands.size() == 6 &&
4785      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4786      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4787      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4788    // Nest conditions rather than one big 'if' statement for readability.
4789    //
4790    // If either register is a high reg, it's either one of the SP
4791    // variants (handled above) or a 32-bit encoding, so we just
4792    // check against T3. If the second register is the PC, this is an
4793    // alternate form of ADR, which uses encoding T4, so check for that too.
4794    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4795         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4796        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4797        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4798      return false;
4799    // If both registers are low, we're in an IT block, and the immediate is
4800    // in range, we should use encoding T1 instead, which has a cc_out.
4801    if (inITBlock() &&
4802        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4803        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4804        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4805      return false;
4806
4807    // Otherwise, we use encoding T4, which does not have a cc_out
4808    // operand.
4809    return true;
4810  }
4811
4812  // The thumb2 multiply instruction doesn't have a CCOut register, so
4813  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4814  // use the 16-bit encoding or not.
4815  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4816      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4817      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4818      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4819      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4820      // If the registers aren't low regs, the destination reg isn't the
4821      // same as one of the source regs, or the cc_out operand is zero
4822      // outside of an IT block, we have to use the 32-bit encoding, so
4823      // remove the cc_out operand.
4824      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4825       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4826       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4827       !inITBlock() ||
4828       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4829        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4830        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4831        static_cast<ARMOperand*>(Operands[4])->getReg())))
4832    return true;
4833
4834  // Also check the 'mul' syntax variant that doesn't specify an explicit
4835  // destination register.
4836  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4837      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4838      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4839      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4840      // If the registers aren't low regs  or the cc_out operand is zero
4841      // outside of an IT block, we have to use the 32-bit encoding, so
4842      // remove the cc_out operand.
4843      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4844       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4845       !inITBlock()))
4846    return true;
4847
4848
4849
4850  // Register-register 'add/sub' for thumb does not have a cc_out operand
4851  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4852  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4853  // right, this will result in better diagnostics (which operand is off)
4854  // anyway.
4855  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4856      (Operands.size() == 5 || Operands.size() == 6) &&
4857      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4858      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4859      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4860      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4861       (Operands.size() == 6 &&
4862        static_cast<ARMOperand*>(Operands[5])->isImm())))
4863    return true;
4864
4865  return false;
4866}
4867
4868static bool isDataTypeToken(StringRef Tok) {
4869  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4870    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4871    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4872    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4873    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4874    Tok == ".f" || Tok == ".d";
4875}
4876
4877// FIXME: This bit should probably be handled via an explicit match class
4878// in the .td files that matches the suffix instead of having it be
4879// a literal string token the way it is now.
4880static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4881  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4882}
4883
4884static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4885/// Parse an arm instruction mnemonic followed by its operands.
4886bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4887                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4888  // Apply mnemonic aliases before doing anything else, as the destination
4889  // mnemnonic may include suffices and we want to handle them normally.
4890  // The generic tblgen'erated code does this later, at the start of
4891  // MatchInstructionImpl(), but that's too late for aliases that include
4892  // any sort of suffix.
4893  unsigned AvailableFeatures = getAvailableFeatures();
4894  applyMnemonicAliases(Name, AvailableFeatures);
4895
4896  // First check for the ARM-specific .req directive.
4897  if (Parser.getTok().is(AsmToken::Identifier) &&
4898      Parser.getTok().getIdentifier() == ".req") {
4899    parseDirectiveReq(Name, NameLoc);
4900    // We always return 'error' for this, as we're done with this
4901    // statement and don't need to match the 'instruction."
4902    return true;
4903  }
4904
4905  // Create the leading tokens for the mnemonic, split by '.' characters.
4906  size_t Start = 0, Next = Name.find('.');
4907  StringRef Mnemonic = Name.slice(Start, Next);
4908
4909  // Split out the predication code and carry setting flag from the mnemonic.
4910  unsigned PredicationCode;
4911  unsigned ProcessorIMod;
4912  bool CarrySetting;
4913  StringRef ITMask;
4914  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4915                           ProcessorIMod, ITMask);
4916
4917  // In Thumb1, only the branch (B) instruction can be predicated.
4918  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4919    Parser.EatToEndOfStatement();
4920    return Error(NameLoc, "conditional execution not supported in Thumb1");
4921  }
4922
4923  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4924
4925  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4926  // is the mask as it will be for the IT encoding if the conditional
4927  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4928  // where the conditional bit0 is zero, the instruction post-processing
4929  // will adjust the mask accordingly.
4930  if (Mnemonic == "it") {
4931    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4932    if (ITMask.size() > 3) {
4933      Parser.EatToEndOfStatement();
4934      return Error(Loc, "too many conditions on IT instruction");
4935    }
4936    unsigned Mask = 8;
4937    for (unsigned i = ITMask.size(); i != 0; --i) {
4938      char pos = ITMask[i - 1];
4939      if (pos != 't' && pos != 'e') {
4940        Parser.EatToEndOfStatement();
4941        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4942      }
4943      Mask >>= 1;
4944      if (ITMask[i - 1] == 't')
4945        Mask |= 8;
4946    }
4947    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4948  }
4949
4950  // FIXME: This is all a pretty gross hack. We should automatically handle
4951  // optional operands like this via tblgen.
4952
4953  // Next, add the CCOut and ConditionCode operands, if needed.
4954  //
4955  // For mnemonics which can ever incorporate a carry setting bit or predication
4956  // code, our matching model involves us always generating CCOut and
4957  // ConditionCode operands to match the mnemonic "as written" and then we let
4958  // the matcher deal with finding the right instruction or generating an
4959  // appropriate error.
4960  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4961  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4962
4963  // If we had a carry-set on an instruction that can't do that, issue an
4964  // error.
4965  if (!CanAcceptCarrySet && CarrySetting) {
4966    Parser.EatToEndOfStatement();
4967    return Error(NameLoc, "instruction '" + Mnemonic +
4968                 "' can not set flags, but 's' suffix specified");
4969  }
4970  // If we had a predication code on an instruction that can't do that, issue an
4971  // error.
4972  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4973    Parser.EatToEndOfStatement();
4974    return Error(NameLoc, "instruction '" + Mnemonic +
4975                 "' is not predicable, but condition code specified");
4976  }
4977
4978  // Add the carry setting operand, if necessary.
4979  if (CanAcceptCarrySet) {
4980    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4981    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4982                                               Loc));
4983  }
4984
4985  // Add the predication code operand, if necessary.
4986  if (CanAcceptPredicationCode) {
4987    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4988                                      CarrySetting);
4989    Operands.push_back(ARMOperand::CreateCondCode(
4990                         ARMCC::CondCodes(PredicationCode), Loc));
4991  }
4992
4993  // Add the processor imod operand, if necessary.
4994  if (ProcessorIMod) {
4995    Operands.push_back(ARMOperand::CreateImm(
4996          MCConstantExpr::Create(ProcessorIMod, getContext()),
4997                                 NameLoc, NameLoc));
4998  }
4999
5000  // Add the remaining tokens in the mnemonic.
5001  while (Next != StringRef::npos) {
5002    Start = Next;
5003    Next = Name.find('.', Start + 1);
5004    StringRef ExtraToken = Name.slice(Start, Next);
5005
5006    // Some NEON instructions have an optional datatype suffix that is
5007    // completely ignored. Check for that.
5008    if (isDataTypeToken(ExtraToken) &&
5009        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5010      continue;
5011
5012    if (ExtraToken != ".n") {
5013      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5014      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5015    }
5016  }
5017
5018  // Read the remaining operands.
5019  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5020    // Read the first operand.
5021    if (parseOperand(Operands, Mnemonic)) {
5022      Parser.EatToEndOfStatement();
5023      return true;
5024    }
5025
5026    while (getLexer().is(AsmToken::Comma)) {
5027      Parser.Lex();  // Eat the comma.
5028
5029      // Parse and remember the operand.
5030      if (parseOperand(Operands, Mnemonic)) {
5031        Parser.EatToEndOfStatement();
5032        return true;
5033      }
5034    }
5035  }
5036
5037  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5038    SMLoc Loc = getLexer().getLoc();
5039    Parser.EatToEndOfStatement();
5040    return Error(Loc, "unexpected token in argument list");
5041  }
5042
5043  Parser.Lex(); // Consume the EndOfStatement
5044
5045  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5046  // do and don't have a cc_out optional-def operand. With some spot-checks
5047  // of the operand list, we can figure out which variant we're trying to
5048  // parse and adjust accordingly before actually matching. We shouldn't ever
5049  // try to remove a cc_out operand that was explicitly set on the the
5050  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5051  // table driven matcher doesn't fit well with the ARM instruction set.
5052  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5053    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5054    Operands.erase(Operands.begin() + 1);
5055    delete Op;
5056  }
5057
5058  // ARM mode 'blx' need special handling, as the register operand version
5059  // is predicable, but the label operand version is not. So, we can't rely
5060  // on the Mnemonic based checking to correctly figure out when to put
5061  // a k_CondCode operand in the list. If we're trying to match the label
5062  // version, remove the k_CondCode operand here.
5063  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5064      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5065    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5066    Operands.erase(Operands.begin() + 1);
5067    delete Op;
5068  }
5069
5070  // The vector-compare-to-zero instructions have a literal token "#0" at
5071  // the end that comes to here as an immediate operand. Convert it to a
5072  // token to play nicely with the matcher.
5073  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5074      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5075      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5076    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5077    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5078    if (CE && CE->getValue() == 0) {
5079      Operands.erase(Operands.begin() + 5);
5080      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5081      delete Op;
5082    }
5083  }
5084  // VCMP{E} does the same thing, but with a different operand count.
5085  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5086      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5087    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5088    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5089    if (CE && CE->getValue() == 0) {
5090      Operands.erase(Operands.begin() + 4);
5091      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5092      delete Op;
5093    }
5094  }
5095  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5096  // end. Convert it to a token here. Take care not to convert those
5097  // that should hit the Thumb2 encoding.
5098  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5099      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5100      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5101      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5102    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5103    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5104    if (CE && CE->getValue() == 0 &&
5105        (isThumbOne() ||
5106         // The cc_out operand matches the IT block.
5107         ((inITBlock() != CarrySetting) &&
5108         // Neither register operand is a high register.
5109         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5110          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5111      Operands.erase(Operands.begin() + 5);
5112      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5113      delete Op;
5114    }
5115  }
5116
5117  return false;
5118}
5119
5120// Validate context-sensitive operand constraints.
5121
5122// return 'true' if register list contains non-low GPR registers,
5123// 'false' otherwise. If Reg is in the register list or is HiReg, set
5124// 'containsReg' to true.
5125static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5126                                 unsigned HiReg, bool &containsReg) {
5127  containsReg = false;
5128  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5129    unsigned OpReg = Inst.getOperand(i).getReg();
5130    if (OpReg == Reg)
5131      containsReg = true;
5132    // Anything other than a low register isn't legal here.
5133    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5134      return true;
5135  }
5136  return false;
5137}
5138
5139// Check if the specified regisgter is in the register list of the inst,
5140// starting at the indicated operand number.
5141static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5142  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5143    unsigned OpReg = Inst.getOperand(i).getReg();
5144    if (OpReg == Reg)
5145      return true;
5146  }
5147  return false;
5148}
5149
5150// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5151// the ARMInsts array) instead. Getting that here requires awkward
5152// API changes, though. Better way?
5153namespace llvm {
5154extern const MCInstrDesc ARMInsts[];
5155}
5156static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5157  return ARMInsts[Opcode];
5158}
5159
5160// FIXME: We would really like to be able to tablegen'erate this.
5161bool ARMAsmParser::
5162validateInstruction(MCInst &Inst,
5163                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5164  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5165  SMLoc Loc = Operands[0]->getStartLoc();
5166  // Check the IT block state first.
5167  // NOTE: BKPT instruction has the interesting property of being
5168  // allowed in IT blocks, but not being predicable.  It just always
5169  // executes.
5170  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5171      Inst.getOpcode() != ARM::BKPT) {
5172    unsigned bit = 1;
5173    if (ITState.FirstCond)
5174      ITState.FirstCond = false;
5175    else
5176      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5177    // The instruction must be predicable.
5178    if (!MCID.isPredicable())
5179      return Error(Loc, "instructions in IT block must be predicable");
5180    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5181    unsigned ITCond = bit ? ITState.Cond :
5182      ARMCC::getOppositeCondition(ITState.Cond);
5183    if (Cond != ITCond) {
5184      // Find the condition code Operand to get its SMLoc information.
5185      SMLoc CondLoc;
5186      for (unsigned i = 1; i < Operands.size(); ++i)
5187        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5188          CondLoc = Operands[i]->getStartLoc();
5189      return Error(CondLoc, "incorrect condition in IT block; got '" +
5190                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5191                   "', but expected '" +
5192                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5193    }
5194  // Check for non-'al' condition codes outside of the IT block.
5195  } else if (isThumbTwo() && MCID.isPredicable() &&
5196             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5197             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5198             Inst.getOpcode() != ARM::t2B)
5199    return Error(Loc, "predicated instructions must be in IT block");
5200
5201  switch (Inst.getOpcode()) {
5202  case ARM::LDRD:
5203  case ARM::LDRD_PRE:
5204  case ARM::LDRD_POST:
5205  case ARM::LDREXD: {
5206    // Rt2 must be Rt + 1.
5207    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5208    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5209    if (Rt2 != Rt + 1)
5210      return Error(Operands[3]->getStartLoc(),
5211                   "destination operands must be sequential");
5212    return false;
5213  }
5214  case ARM::STRD: {
5215    // Rt2 must be Rt + 1.
5216    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5217    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5218    if (Rt2 != Rt + 1)
5219      return Error(Operands[3]->getStartLoc(),
5220                   "source operands must be sequential");
5221    return false;
5222  }
5223  case ARM::STRD_PRE:
5224  case ARM::STRD_POST:
5225  case ARM::STREXD: {
5226    // Rt2 must be Rt + 1.
5227    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5228    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5229    if (Rt2 != Rt + 1)
5230      return Error(Operands[3]->getStartLoc(),
5231                   "source operands must be sequential");
5232    return false;
5233  }
5234  case ARM::SBFX:
5235  case ARM::UBFX: {
5236    // width must be in range [1, 32-lsb]
5237    unsigned lsb = Inst.getOperand(2).getImm();
5238    unsigned widthm1 = Inst.getOperand(3).getImm();
5239    if (widthm1 >= 32 - lsb)
5240      return Error(Operands[5]->getStartLoc(),
5241                   "bitfield width must be in range [1,32-lsb]");
5242    return false;
5243  }
5244  case ARM::tLDMIA: {
5245    // If we're parsing Thumb2, the .w variant is available and handles
5246    // most cases that are normally illegal for a Thumb1 LDM
5247    // instruction. We'll make the transformation in processInstruction()
5248    // if necessary.
5249    //
5250    // Thumb LDM instructions are writeback iff the base register is not
5251    // in the register list.
5252    unsigned Rn = Inst.getOperand(0).getReg();
5253    bool hasWritebackToken =
5254      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5255       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5256    bool listContainsBase;
5257    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5258      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5259                   "registers must be in range r0-r7");
5260    // If we should have writeback, then there should be a '!' token.
5261    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5262      return Error(Operands[2]->getStartLoc(),
5263                   "writeback operator '!' expected");
5264    // If we should not have writeback, there must not be a '!'. This is
5265    // true even for the 32-bit wide encodings.
5266    if (listContainsBase && hasWritebackToken)
5267      return Error(Operands[3]->getStartLoc(),
5268                   "writeback operator '!' not allowed when base register "
5269                   "in register list");
5270
5271    break;
5272  }
5273  case ARM::t2LDMIA_UPD: {
5274    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5275      return Error(Operands[4]->getStartLoc(),
5276                   "writeback operator '!' not allowed when base register "
5277                   "in register list");
5278    break;
5279  }
5280  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5281  // so only issue a diagnostic for thumb1. The instructions will be
5282  // switched to the t2 encodings in processInstruction() if necessary.
5283  case ARM::tPOP: {
5284    bool listContainsBase;
5285    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5286        !isThumbTwo())
5287      return Error(Operands[2]->getStartLoc(),
5288                   "registers must be in range r0-r7 or pc");
5289    break;
5290  }
5291  case ARM::tPUSH: {
5292    bool listContainsBase;
5293    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5294        !isThumbTwo())
5295      return Error(Operands[2]->getStartLoc(),
5296                   "registers must be in range r0-r7 or lr");
5297    break;
5298  }
5299  case ARM::tSTMIA_UPD: {
5300    bool listContainsBase;
5301    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5302      return Error(Operands[4]->getStartLoc(),
5303                   "registers must be in range r0-r7");
5304    break;
5305  }
5306  }
5307
5308  return false;
5309}
5310
5311static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5312  switch(Opc) {
5313  default: llvm_unreachable("unexpected opcode!");
5314  // VST1LN
5315  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5316  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5317  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5318  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5319  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5320  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5321  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5322  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5323  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5324
5325  // VST2LN
5326  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5327  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5328  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5329  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5330  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5331
5332  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5333  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5334  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5335  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5336  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5337
5338  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5339  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5340  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5341  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5342  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5343
5344  // VST3LN
5345  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5346  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5347  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5348  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5349  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5350  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5351  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5352  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5353  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5354  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5355  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5356  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5357  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5358  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5359  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5360
5361  // VST3
5362  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5363  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5364  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5365  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5366  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5367  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5368  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5369  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5370  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5371  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5372  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5373  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5374  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5375  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5376  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5377  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5378  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5379  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5380
5381  // VST4LN
5382  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5383  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5384  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5385  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5386  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5387  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5388  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5389  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5390  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5391  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5392  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5393  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5394  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5395  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5396  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5397
5398  // VST4
5399  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5400  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5401  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5402  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5403  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5404  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5405  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5406  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5407  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5408  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5409  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5410  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5411  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5412  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5413  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5414  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5415  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5416  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5417  }
5418}
5419
5420static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5421  switch(Opc) {
5422  default: llvm_unreachable("unexpected opcode!");
5423  // VLD1LN
5424  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5425  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5426  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5427  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5428  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5429  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5430  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5431  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5432  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5433
5434  // VLD2LN
5435  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5436  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5437  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5438  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5439  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5440  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5441  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5442  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5443  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5444  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5445  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5446  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5447  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5448  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5449  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5450
5451  // VLD3DUP
5452  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5453  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5454  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5455  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5456  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5457  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5458  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5459  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5460  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5461  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5462  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5463  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5464  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5465  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5466  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5467  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5468  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5469  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5470
5471  // VLD3LN
5472  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5473  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5474  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5475  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5476  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5477  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5478  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5479  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5480  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5481  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5482  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5483  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5484  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5485  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5486  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5487
5488  // VLD3
5489  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5490  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5491  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5492  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5493  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5494  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5495  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5496  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5497  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5498  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5499  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5500  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5501  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5502  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5503  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5504  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5505  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5506  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5507
5508  // VLD4LN
5509  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5510  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5511  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5512  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5513  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5514  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5515  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5516  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5517  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5518  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5519  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5520  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5521  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5522  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5523  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5524
5525  // VLD4DUP
5526  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5527  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5528  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5529  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5530  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5531  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5532  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5533  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5534  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5535  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5536  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5537  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5538  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5539  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5540  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5541  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5542  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5543  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5544
5545  // VLD4
5546  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5547  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5548  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5549  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5550  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5551  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5552  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5553  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5554  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5555  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5556  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5557  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5558  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5559  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5560  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5561  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5562  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5563  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5564  }
5565}
5566
5567bool ARMAsmParser::
5568processInstruction(MCInst &Inst,
5569                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5570  switch (Inst.getOpcode()) {
5571  // Aliases for alternate PC+imm syntax of LDR instructions.
5572  case ARM::t2LDRpcrel:
5573    Inst.setOpcode(ARM::t2LDRpci);
5574    return true;
5575  case ARM::t2LDRBpcrel:
5576    Inst.setOpcode(ARM::t2LDRBpci);
5577    return true;
5578  case ARM::t2LDRHpcrel:
5579    Inst.setOpcode(ARM::t2LDRHpci);
5580    return true;
5581  case ARM::t2LDRSBpcrel:
5582    Inst.setOpcode(ARM::t2LDRSBpci);
5583    return true;
5584  case ARM::t2LDRSHpcrel:
5585    Inst.setOpcode(ARM::t2LDRSHpci);
5586    return true;
5587  // Handle NEON VST complex aliases.
5588  case ARM::VST1LNdWB_register_Asm_8:
5589  case ARM::VST1LNdWB_register_Asm_16:
5590  case ARM::VST1LNdWB_register_Asm_32: {
5591    MCInst TmpInst;
5592    // Shuffle the operands around so the lane index operand is in the
5593    // right place.
5594    unsigned Spacing;
5595    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5596    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5597    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5598    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5599    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5600    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5601    TmpInst.addOperand(Inst.getOperand(1)); // lane
5602    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5603    TmpInst.addOperand(Inst.getOperand(6));
5604    Inst = TmpInst;
5605    return true;
5606  }
5607
5608  case ARM::VST2LNdWB_register_Asm_8:
5609  case ARM::VST2LNdWB_register_Asm_16:
5610  case ARM::VST2LNdWB_register_Asm_32:
5611  case ARM::VST2LNqWB_register_Asm_16:
5612  case ARM::VST2LNqWB_register_Asm_32: {
5613    MCInst TmpInst;
5614    // Shuffle the operands around so the lane index operand is in the
5615    // right place.
5616    unsigned Spacing;
5617    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5618    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5619    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5620    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5621    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5622    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5623    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5624                                            Spacing));
5625    TmpInst.addOperand(Inst.getOperand(1)); // lane
5626    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5627    TmpInst.addOperand(Inst.getOperand(6));
5628    Inst = TmpInst;
5629    return true;
5630  }
5631
5632  case ARM::VST3LNdWB_register_Asm_8:
5633  case ARM::VST3LNdWB_register_Asm_16:
5634  case ARM::VST3LNdWB_register_Asm_32:
5635  case ARM::VST3LNqWB_register_Asm_16:
5636  case ARM::VST3LNqWB_register_Asm_32: {
5637    MCInst TmpInst;
5638    // Shuffle the operands around so the lane index operand is in the
5639    // right place.
5640    unsigned Spacing;
5641    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5642    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5643    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5644    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5645    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5646    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5647    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5648                                            Spacing));
5649    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5650                                            Spacing * 2));
5651    TmpInst.addOperand(Inst.getOperand(1)); // lane
5652    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5653    TmpInst.addOperand(Inst.getOperand(6));
5654    Inst = TmpInst;
5655    return true;
5656  }
5657
5658  case ARM::VST4LNdWB_register_Asm_8:
5659  case ARM::VST4LNdWB_register_Asm_16:
5660  case ARM::VST4LNdWB_register_Asm_32:
5661  case ARM::VST4LNqWB_register_Asm_16:
5662  case ARM::VST4LNqWB_register_Asm_32: {
5663    MCInst TmpInst;
5664    // Shuffle the operands around so the lane index operand is in the
5665    // right place.
5666    unsigned Spacing;
5667    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5668    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5669    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5670    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5671    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5672    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5673    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5674                                            Spacing));
5675    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5676                                            Spacing * 2));
5677    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5678                                            Spacing * 3));
5679    TmpInst.addOperand(Inst.getOperand(1)); // lane
5680    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5681    TmpInst.addOperand(Inst.getOperand(6));
5682    Inst = TmpInst;
5683    return true;
5684  }
5685
5686  case ARM::VST1LNdWB_fixed_Asm_8:
5687  case ARM::VST1LNdWB_fixed_Asm_16:
5688  case ARM::VST1LNdWB_fixed_Asm_32: {
5689    MCInst TmpInst;
5690    // Shuffle the operands around so the lane index operand is in the
5691    // right place.
5692    unsigned Spacing;
5693    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5694    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5695    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5696    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5697    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5698    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5699    TmpInst.addOperand(Inst.getOperand(1)); // lane
5700    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5701    TmpInst.addOperand(Inst.getOperand(5));
5702    Inst = TmpInst;
5703    return true;
5704  }
5705
5706  case ARM::VST2LNdWB_fixed_Asm_8:
5707  case ARM::VST2LNdWB_fixed_Asm_16:
5708  case ARM::VST2LNdWB_fixed_Asm_32:
5709  case ARM::VST2LNqWB_fixed_Asm_16:
5710  case ARM::VST2LNqWB_fixed_Asm_32: {
5711    MCInst TmpInst;
5712    // Shuffle the operands around so the lane index operand is in the
5713    // right place.
5714    unsigned Spacing;
5715    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5716    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5717    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5718    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5719    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5720    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5721    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5722                                            Spacing));
5723    TmpInst.addOperand(Inst.getOperand(1)); // lane
5724    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5725    TmpInst.addOperand(Inst.getOperand(5));
5726    Inst = TmpInst;
5727    return true;
5728  }
5729
5730  case ARM::VST3LNdWB_fixed_Asm_8:
5731  case ARM::VST3LNdWB_fixed_Asm_16:
5732  case ARM::VST3LNdWB_fixed_Asm_32:
5733  case ARM::VST3LNqWB_fixed_Asm_16:
5734  case ARM::VST3LNqWB_fixed_Asm_32: {
5735    MCInst TmpInst;
5736    // Shuffle the operands around so the lane index operand is in the
5737    // right place.
5738    unsigned Spacing;
5739    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5740    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5741    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5742    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5743    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5744    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5745    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5746                                            Spacing));
5747    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5748                                            Spacing * 2));
5749    TmpInst.addOperand(Inst.getOperand(1)); // lane
5750    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5751    TmpInst.addOperand(Inst.getOperand(5));
5752    Inst = TmpInst;
5753    return true;
5754  }
5755
5756  case ARM::VST4LNdWB_fixed_Asm_8:
5757  case ARM::VST4LNdWB_fixed_Asm_16:
5758  case ARM::VST4LNdWB_fixed_Asm_32:
5759  case ARM::VST4LNqWB_fixed_Asm_16:
5760  case ARM::VST4LNqWB_fixed_Asm_32: {
5761    MCInst TmpInst;
5762    // Shuffle the operands around so the lane index operand is in the
5763    // right place.
5764    unsigned Spacing;
5765    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5766    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5767    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5768    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5769    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5770    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5771    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5772                                            Spacing));
5773    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5774                                            Spacing * 2));
5775    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5776                                            Spacing * 3));
5777    TmpInst.addOperand(Inst.getOperand(1)); // lane
5778    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5779    TmpInst.addOperand(Inst.getOperand(5));
5780    Inst = TmpInst;
5781    return true;
5782  }
5783
5784  case ARM::VST1LNdAsm_8:
5785  case ARM::VST1LNdAsm_16:
5786  case ARM::VST1LNdAsm_32: {
5787    MCInst TmpInst;
5788    // Shuffle the operands around so the lane index operand is in the
5789    // right place.
5790    unsigned Spacing;
5791    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5792    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5793    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5794    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5795    TmpInst.addOperand(Inst.getOperand(1)); // lane
5796    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5797    TmpInst.addOperand(Inst.getOperand(5));
5798    Inst = TmpInst;
5799    return true;
5800  }
5801
5802  case ARM::VST2LNdAsm_8:
5803  case ARM::VST2LNdAsm_16:
5804  case ARM::VST2LNdAsm_32:
5805  case ARM::VST2LNqAsm_16:
5806  case ARM::VST2LNqAsm_32: {
5807    MCInst TmpInst;
5808    // Shuffle the operands around so the lane index operand is in the
5809    // right place.
5810    unsigned Spacing;
5811    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5812    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5813    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5814    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5815    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5816                                            Spacing));
5817    TmpInst.addOperand(Inst.getOperand(1)); // lane
5818    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5819    TmpInst.addOperand(Inst.getOperand(5));
5820    Inst = TmpInst;
5821    return true;
5822  }
5823
5824  case ARM::VST3LNdAsm_8:
5825  case ARM::VST3LNdAsm_16:
5826  case ARM::VST3LNdAsm_32:
5827  case ARM::VST3LNqAsm_16:
5828  case ARM::VST3LNqAsm_32: {
5829    MCInst TmpInst;
5830    // Shuffle the operands around so the lane index operand is in the
5831    // right place.
5832    unsigned Spacing;
5833    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5834    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5835    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5836    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5837    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5838                                            Spacing));
5839    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5840                                            Spacing * 2));
5841    TmpInst.addOperand(Inst.getOperand(1)); // lane
5842    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5843    TmpInst.addOperand(Inst.getOperand(5));
5844    Inst = TmpInst;
5845    return true;
5846  }
5847
5848  case ARM::VST4LNdAsm_8:
5849  case ARM::VST4LNdAsm_16:
5850  case ARM::VST4LNdAsm_32:
5851  case ARM::VST4LNqAsm_16:
5852  case ARM::VST4LNqAsm_32: {
5853    MCInst TmpInst;
5854    // Shuffle the operands around so the lane index operand is in the
5855    // right place.
5856    unsigned Spacing;
5857    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5858    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5859    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5860    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5861    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5862                                            Spacing));
5863    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5864                                            Spacing * 2));
5865    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5866                                            Spacing * 3));
5867    TmpInst.addOperand(Inst.getOperand(1)); // lane
5868    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5869    TmpInst.addOperand(Inst.getOperand(5));
5870    Inst = TmpInst;
5871    return true;
5872  }
5873
5874  // Handle NEON VLD complex aliases.
5875  case ARM::VLD1LNdWB_register_Asm_8:
5876  case ARM::VLD1LNdWB_register_Asm_16:
5877  case ARM::VLD1LNdWB_register_Asm_32: {
5878    MCInst TmpInst;
5879    // Shuffle the operands around so the lane index operand is in the
5880    // right place.
5881    unsigned Spacing;
5882    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5883    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5884    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5885    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5886    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5887    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5888    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5889    TmpInst.addOperand(Inst.getOperand(1)); // lane
5890    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5891    TmpInst.addOperand(Inst.getOperand(6));
5892    Inst = TmpInst;
5893    return true;
5894  }
5895
5896  case ARM::VLD2LNdWB_register_Asm_8:
5897  case ARM::VLD2LNdWB_register_Asm_16:
5898  case ARM::VLD2LNdWB_register_Asm_32:
5899  case ARM::VLD2LNqWB_register_Asm_16:
5900  case ARM::VLD2LNqWB_register_Asm_32: {
5901    MCInst TmpInst;
5902    // Shuffle the operands around so the lane index operand is in the
5903    // right place.
5904    unsigned Spacing;
5905    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5906    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5907    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5908                                            Spacing));
5909    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5910    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5911    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5912    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5913    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5914    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5915                                            Spacing));
5916    TmpInst.addOperand(Inst.getOperand(1)); // lane
5917    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5918    TmpInst.addOperand(Inst.getOperand(6));
5919    Inst = TmpInst;
5920    return true;
5921  }
5922
5923  case ARM::VLD3LNdWB_register_Asm_8:
5924  case ARM::VLD3LNdWB_register_Asm_16:
5925  case ARM::VLD3LNdWB_register_Asm_32:
5926  case ARM::VLD3LNqWB_register_Asm_16:
5927  case ARM::VLD3LNqWB_register_Asm_32: {
5928    MCInst TmpInst;
5929    // Shuffle the operands around so the lane index operand is in the
5930    // right place.
5931    unsigned Spacing;
5932    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5933    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5934    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5935                                            Spacing));
5936    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5937                                            Spacing * 2));
5938    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5939    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5940    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5941    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5942    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5943    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5944                                            Spacing));
5945    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5946                                            Spacing * 2));
5947    TmpInst.addOperand(Inst.getOperand(1)); // lane
5948    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5949    TmpInst.addOperand(Inst.getOperand(6));
5950    Inst = TmpInst;
5951    return true;
5952  }
5953
5954  case ARM::VLD4LNdWB_register_Asm_8:
5955  case ARM::VLD4LNdWB_register_Asm_16:
5956  case ARM::VLD4LNdWB_register_Asm_32:
5957  case ARM::VLD4LNqWB_register_Asm_16:
5958  case ARM::VLD4LNqWB_register_Asm_32: {
5959    MCInst TmpInst;
5960    // Shuffle the operands around so the lane index operand is in the
5961    // right place.
5962    unsigned Spacing;
5963    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5964    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5965    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5966                                            Spacing));
5967    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5968                                            Spacing * 2));
5969    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5970                                            Spacing * 3));
5971    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5972    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5973    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5974    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5975    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5976    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5977                                            Spacing));
5978    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5979                                            Spacing * 2));
5980    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5981                                            Spacing * 3));
5982    TmpInst.addOperand(Inst.getOperand(1)); // lane
5983    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5984    TmpInst.addOperand(Inst.getOperand(6));
5985    Inst = TmpInst;
5986    return true;
5987  }
5988
5989  case ARM::VLD1LNdWB_fixed_Asm_8:
5990  case ARM::VLD1LNdWB_fixed_Asm_16:
5991  case ARM::VLD1LNdWB_fixed_Asm_32: {
5992    MCInst TmpInst;
5993    // Shuffle the operands around so the lane index operand is in the
5994    // right place.
5995    unsigned Spacing;
5996    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5997    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5998    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5999    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6000    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6001    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6002    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6003    TmpInst.addOperand(Inst.getOperand(1)); // lane
6004    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6005    TmpInst.addOperand(Inst.getOperand(5));
6006    Inst = TmpInst;
6007    return true;
6008  }
6009
6010  case ARM::VLD2LNdWB_fixed_Asm_8:
6011  case ARM::VLD2LNdWB_fixed_Asm_16:
6012  case ARM::VLD2LNdWB_fixed_Asm_32:
6013  case ARM::VLD2LNqWB_fixed_Asm_16:
6014  case ARM::VLD2LNqWB_fixed_Asm_32: {
6015    MCInst TmpInst;
6016    // Shuffle the operands around so the lane index operand is in the
6017    // right place.
6018    unsigned Spacing;
6019    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6020    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6021    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6022                                            Spacing));
6023    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6024    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6025    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6026    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6027    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6028    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6029                                            Spacing));
6030    TmpInst.addOperand(Inst.getOperand(1)); // lane
6031    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6032    TmpInst.addOperand(Inst.getOperand(5));
6033    Inst = TmpInst;
6034    return true;
6035  }
6036
6037  case ARM::VLD3LNdWB_fixed_Asm_8:
6038  case ARM::VLD3LNdWB_fixed_Asm_16:
6039  case ARM::VLD3LNdWB_fixed_Asm_32:
6040  case ARM::VLD3LNqWB_fixed_Asm_16:
6041  case ARM::VLD3LNqWB_fixed_Asm_32: {
6042    MCInst TmpInst;
6043    // Shuffle the operands around so the lane index operand is in the
6044    // right place.
6045    unsigned Spacing;
6046    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6047    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049                                            Spacing));
6050    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6051                                            Spacing * 2));
6052    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6053    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6054    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6055    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6056    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6057    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6058                                            Spacing));
6059    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6060                                            Spacing * 2));
6061    TmpInst.addOperand(Inst.getOperand(1)); // lane
6062    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6063    TmpInst.addOperand(Inst.getOperand(5));
6064    Inst = TmpInst;
6065    return true;
6066  }
6067
6068  case ARM::VLD4LNdWB_fixed_Asm_8:
6069  case ARM::VLD4LNdWB_fixed_Asm_16:
6070  case ARM::VLD4LNdWB_fixed_Asm_32:
6071  case ARM::VLD4LNqWB_fixed_Asm_16:
6072  case ARM::VLD4LNqWB_fixed_Asm_32: {
6073    MCInst TmpInst;
6074    // Shuffle the operands around so the lane index operand is in the
6075    // right place.
6076    unsigned Spacing;
6077    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6078    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6079    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6080                                            Spacing));
6081    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6082                                            Spacing * 2));
6083    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6084                                            Spacing * 3));
6085    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6086    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6087    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6088    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6089    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6090    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6091                                            Spacing));
6092    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6093                                            Spacing * 2));
6094    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6095                                            Spacing * 3));
6096    TmpInst.addOperand(Inst.getOperand(1)); // lane
6097    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6098    TmpInst.addOperand(Inst.getOperand(5));
6099    Inst = TmpInst;
6100    return true;
6101  }
6102
6103  case ARM::VLD1LNdAsm_8:
6104  case ARM::VLD1LNdAsm_16:
6105  case ARM::VLD1LNdAsm_32: {
6106    MCInst TmpInst;
6107    // Shuffle the operands around so the lane index operand is in the
6108    // right place.
6109    unsigned Spacing;
6110    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6111    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6112    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6113    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6114    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6115    TmpInst.addOperand(Inst.getOperand(1)); // lane
6116    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6117    TmpInst.addOperand(Inst.getOperand(5));
6118    Inst = TmpInst;
6119    return true;
6120  }
6121
6122  case ARM::VLD2LNdAsm_8:
6123  case ARM::VLD2LNdAsm_16:
6124  case ARM::VLD2LNdAsm_32:
6125  case ARM::VLD2LNqAsm_16:
6126  case ARM::VLD2LNqAsm_32: {
6127    MCInst TmpInst;
6128    // Shuffle the operands around so the lane index operand is in the
6129    // right place.
6130    unsigned Spacing;
6131    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6132    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6133    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6134                                            Spacing));
6135    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6136    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6137    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6138    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6139                                            Spacing));
6140    TmpInst.addOperand(Inst.getOperand(1)); // lane
6141    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6142    TmpInst.addOperand(Inst.getOperand(5));
6143    Inst = TmpInst;
6144    return true;
6145  }
6146
6147  case ARM::VLD3LNdAsm_8:
6148  case ARM::VLD3LNdAsm_16:
6149  case ARM::VLD3LNdAsm_32:
6150  case ARM::VLD3LNqAsm_16:
6151  case ARM::VLD3LNqAsm_32: {
6152    MCInst TmpInst;
6153    // Shuffle the operands around so the lane index operand is in the
6154    // right place.
6155    unsigned Spacing;
6156    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6157    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6158    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6159                                            Spacing));
6160    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6161                                            Spacing * 2));
6162    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6163    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6164    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6165    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6166                                            Spacing));
6167    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6168                                            Spacing * 2));
6169    TmpInst.addOperand(Inst.getOperand(1)); // lane
6170    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6171    TmpInst.addOperand(Inst.getOperand(5));
6172    Inst = TmpInst;
6173    return true;
6174  }
6175
6176  case ARM::VLD4LNdAsm_8:
6177  case ARM::VLD4LNdAsm_16:
6178  case ARM::VLD4LNdAsm_32:
6179  case ARM::VLD4LNqAsm_16:
6180  case ARM::VLD4LNqAsm_32: {
6181    MCInst TmpInst;
6182    // Shuffle the operands around so the lane index operand is in the
6183    // right place.
6184    unsigned Spacing;
6185    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6186    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6187    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6188                                            Spacing));
6189    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6190                                            Spacing * 2));
6191    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6192                                            Spacing * 3));
6193    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6194    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6195    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6196    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6197                                            Spacing));
6198    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6199                                            Spacing * 2));
6200    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6201                                            Spacing * 3));
6202    TmpInst.addOperand(Inst.getOperand(1)); // lane
6203    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6204    TmpInst.addOperand(Inst.getOperand(5));
6205    Inst = TmpInst;
6206    return true;
6207  }
6208
6209  // VLD3DUP single 3-element structure to all lanes instructions.
6210  case ARM::VLD3DUPdAsm_8:
6211  case ARM::VLD3DUPdAsm_16:
6212  case ARM::VLD3DUPdAsm_32:
6213  case ARM::VLD3DUPqAsm_8:
6214  case ARM::VLD3DUPqAsm_16:
6215  case ARM::VLD3DUPqAsm_32: {
6216    MCInst TmpInst;
6217    unsigned Spacing;
6218    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6219    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6220    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6221                                            Spacing));
6222    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6223                                            Spacing * 2));
6224    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6225    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6226    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6227    TmpInst.addOperand(Inst.getOperand(4));
6228    Inst = TmpInst;
6229    return true;
6230  }
6231
6232  case ARM::VLD3DUPdWB_fixed_Asm_8:
6233  case ARM::VLD3DUPdWB_fixed_Asm_16:
6234  case ARM::VLD3DUPdWB_fixed_Asm_32:
6235  case ARM::VLD3DUPqWB_fixed_Asm_8:
6236  case ARM::VLD3DUPqWB_fixed_Asm_16:
6237  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6238    MCInst TmpInst;
6239    unsigned Spacing;
6240    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6241    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6242    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6243                                            Spacing));
6244    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6245                                            Spacing * 2));
6246    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6247    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6248    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6249    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6250    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6251    TmpInst.addOperand(Inst.getOperand(4));
6252    Inst = TmpInst;
6253    return true;
6254  }
6255
6256  case ARM::VLD3DUPdWB_register_Asm_8:
6257  case ARM::VLD3DUPdWB_register_Asm_16:
6258  case ARM::VLD3DUPdWB_register_Asm_32:
6259  case ARM::VLD3DUPqWB_register_Asm_8:
6260  case ARM::VLD3DUPqWB_register_Asm_16:
6261  case ARM::VLD3DUPqWB_register_Asm_32: {
6262    MCInst TmpInst;
6263    unsigned Spacing;
6264    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6265    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6266    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6267                                            Spacing));
6268    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6269                                            Spacing * 2));
6270    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6271    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6272    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6273    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6274    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6275    TmpInst.addOperand(Inst.getOperand(5));
6276    Inst = TmpInst;
6277    return true;
6278  }
6279
6280  // VLD3 multiple 3-element structure instructions.
6281  case ARM::VLD3dAsm_8:
6282  case ARM::VLD3dAsm_16:
6283  case ARM::VLD3dAsm_32:
6284  case ARM::VLD3qAsm_8:
6285  case ARM::VLD3qAsm_16:
6286  case ARM::VLD3qAsm_32: {
6287    MCInst TmpInst;
6288    unsigned Spacing;
6289    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6290    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6291    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6292                                            Spacing));
6293    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6294                                            Spacing * 2));
6295    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6296    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6297    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6298    TmpInst.addOperand(Inst.getOperand(4));
6299    Inst = TmpInst;
6300    return true;
6301  }
6302
6303  case ARM::VLD3dWB_fixed_Asm_8:
6304  case ARM::VLD3dWB_fixed_Asm_16:
6305  case ARM::VLD3dWB_fixed_Asm_32:
6306  case ARM::VLD3qWB_fixed_Asm_8:
6307  case ARM::VLD3qWB_fixed_Asm_16:
6308  case ARM::VLD3qWB_fixed_Asm_32: {
6309    MCInst TmpInst;
6310    unsigned Spacing;
6311    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6312    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6313    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6314                                            Spacing));
6315    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6316                                            Spacing * 2));
6317    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6318    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6319    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6320    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6321    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6322    TmpInst.addOperand(Inst.getOperand(4));
6323    Inst = TmpInst;
6324    return true;
6325  }
6326
6327  case ARM::VLD3dWB_register_Asm_8:
6328  case ARM::VLD3dWB_register_Asm_16:
6329  case ARM::VLD3dWB_register_Asm_32:
6330  case ARM::VLD3qWB_register_Asm_8:
6331  case ARM::VLD3qWB_register_Asm_16:
6332  case ARM::VLD3qWB_register_Asm_32: {
6333    MCInst TmpInst;
6334    unsigned Spacing;
6335    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6336    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6337    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6338                                            Spacing));
6339    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6340                                            Spacing * 2));
6341    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6342    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6343    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6344    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6345    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6346    TmpInst.addOperand(Inst.getOperand(5));
6347    Inst = TmpInst;
6348    return true;
6349  }
6350
6351  // VLD4DUP single 3-element structure to all lanes instructions.
6352  case ARM::VLD4DUPdAsm_8:
6353  case ARM::VLD4DUPdAsm_16:
6354  case ARM::VLD4DUPdAsm_32:
6355  case ARM::VLD4DUPqAsm_8:
6356  case ARM::VLD4DUPqAsm_16:
6357  case ARM::VLD4DUPqAsm_32: {
6358    MCInst TmpInst;
6359    unsigned Spacing;
6360    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6361    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6362    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6363                                            Spacing));
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing * 2));
6366    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6367                                            Spacing * 3));
6368    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6369    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6370    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6371    TmpInst.addOperand(Inst.getOperand(4));
6372    Inst = TmpInst;
6373    return true;
6374  }
6375
6376  case ARM::VLD4DUPdWB_fixed_Asm_8:
6377  case ARM::VLD4DUPdWB_fixed_Asm_16:
6378  case ARM::VLD4DUPdWB_fixed_Asm_32:
6379  case ARM::VLD4DUPqWB_fixed_Asm_8:
6380  case ARM::VLD4DUPqWB_fixed_Asm_16:
6381  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6382    MCInst TmpInst;
6383    unsigned Spacing;
6384    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6385    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6386    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6387                                            Spacing));
6388    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6389                                            Spacing * 2));
6390    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6391                                            Spacing * 3));
6392    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6393    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6394    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6395    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6396    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6397    TmpInst.addOperand(Inst.getOperand(4));
6398    Inst = TmpInst;
6399    return true;
6400  }
6401
6402  case ARM::VLD4DUPdWB_register_Asm_8:
6403  case ARM::VLD4DUPdWB_register_Asm_16:
6404  case ARM::VLD4DUPdWB_register_Asm_32:
6405  case ARM::VLD4DUPqWB_register_Asm_8:
6406  case ARM::VLD4DUPqWB_register_Asm_16:
6407  case ARM::VLD4DUPqWB_register_Asm_32: {
6408    MCInst TmpInst;
6409    unsigned Spacing;
6410    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6411    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6412    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6413                                            Spacing));
6414    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6415                                            Spacing * 2));
6416    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6417                                            Spacing * 3));
6418    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6419    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6420    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6421    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6422    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6423    TmpInst.addOperand(Inst.getOperand(5));
6424    Inst = TmpInst;
6425    return true;
6426  }
6427
6428  // VLD4 multiple 4-element structure instructions.
6429  case ARM::VLD4dAsm_8:
6430  case ARM::VLD4dAsm_16:
6431  case ARM::VLD4dAsm_32:
6432  case ARM::VLD4qAsm_8:
6433  case ARM::VLD4qAsm_16:
6434  case ARM::VLD4qAsm_32: {
6435    MCInst TmpInst;
6436    unsigned Spacing;
6437    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6438    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6439    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6440                                            Spacing));
6441    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6442                                            Spacing * 2));
6443    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6444                                            Spacing * 3));
6445    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6446    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6447    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6448    TmpInst.addOperand(Inst.getOperand(4));
6449    Inst = TmpInst;
6450    return true;
6451  }
6452
6453  case ARM::VLD4dWB_fixed_Asm_8:
6454  case ARM::VLD4dWB_fixed_Asm_16:
6455  case ARM::VLD4dWB_fixed_Asm_32:
6456  case ARM::VLD4qWB_fixed_Asm_8:
6457  case ARM::VLD4qWB_fixed_Asm_16:
6458  case ARM::VLD4qWB_fixed_Asm_32: {
6459    MCInst TmpInst;
6460    unsigned Spacing;
6461    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6462    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6463    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6464                                            Spacing));
6465    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6466                                            Spacing * 2));
6467    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6468                                            Spacing * 3));
6469    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6470    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6471    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6472    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6473    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6474    TmpInst.addOperand(Inst.getOperand(4));
6475    Inst = TmpInst;
6476    return true;
6477  }
6478
6479  case ARM::VLD4dWB_register_Asm_8:
6480  case ARM::VLD4dWB_register_Asm_16:
6481  case ARM::VLD4dWB_register_Asm_32:
6482  case ARM::VLD4qWB_register_Asm_8:
6483  case ARM::VLD4qWB_register_Asm_16:
6484  case ARM::VLD4qWB_register_Asm_32: {
6485    MCInst TmpInst;
6486    unsigned Spacing;
6487    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6488    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6489    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6490                                            Spacing));
6491    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6492                                            Spacing * 2));
6493    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6494                                            Spacing * 3));
6495    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6496    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6497    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6498    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6499    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6500    TmpInst.addOperand(Inst.getOperand(5));
6501    Inst = TmpInst;
6502    return true;
6503  }
6504
6505  // VST3 multiple 3-element structure instructions.
6506  case ARM::VST3dAsm_8:
6507  case ARM::VST3dAsm_16:
6508  case ARM::VST3dAsm_32:
6509  case ARM::VST3qAsm_8:
6510  case ARM::VST3qAsm_16:
6511  case ARM::VST3qAsm_32: {
6512    MCInst TmpInst;
6513    unsigned Spacing;
6514    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6515    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6516    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6517    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6518    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6519                                            Spacing));
6520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6521                                            Spacing * 2));
6522    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6523    TmpInst.addOperand(Inst.getOperand(4));
6524    Inst = TmpInst;
6525    return true;
6526  }
6527
6528  case ARM::VST3dWB_fixed_Asm_8:
6529  case ARM::VST3dWB_fixed_Asm_16:
6530  case ARM::VST3dWB_fixed_Asm_32:
6531  case ARM::VST3qWB_fixed_Asm_8:
6532  case ARM::VST3qWB_fixed_Asm_16:
6533  case ARM::VST3qWB_fixed_Asm_32: {
6534    MCInst TmpInst;
6535    unsigned Spacing;
6536    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6537    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6538    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6539    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6540    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6541    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6542    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6543                                            Spacing));
6544    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6545                                            Spacing * 2));
6546    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6547    TmpInst.addOperand(Inst.getOperand(4));
6548    Inst = TmpInst;
6549    return true;
6550  }
6551
6552  case ARM::VST3dWB_register_Asm_8:
6553  case ARM::VST3dWB_register_Asm_16:
6554  case ARM::VST3dWB_register_Asm_32:
6555  case ARM::VST3qWB_register_Asm_8:
6556  case ARM::VST3qWB_register_Asm_16:
6557  case ARM::VST3qWB_register_Asm_32: {
6558    MCInst TmpInst;
6559    unsigned Spacing;
6560    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6561    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6562    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6563    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6564    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6565    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6566    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6567                                            Spacing));
6568    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6569                                            Spacing * 2));
6570    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6571    TmpInst.addOperand(Inst.getOperand(5));
6572    Inst = TmpInst;
6573    return true;
6574  }
6575
6576  // VST4 multiple 3-element structure instructions.
6577  case ARM::VST4dAsm_8:
6578  case ARM::VST4dAsm_16:
6579  case ARM::VST4dAsm_32:
6580  case ARM::VST4qAsm_8:
6581  case ARM::VST4qAsm_16:
6582  case ARM::VST4qAsm_32: {
6583    MCInst TmpInst;
6584    unsigned Spacing;
6585    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6586    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6587    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6588    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6589    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6590                                            Spacing));
6591    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6592                                            Spacing * 2));
6593    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6594                                            Spacing * 3));
6595    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6596    TmpInst.addOperand(Inst.getOperand(4));
6597    Inst = TmpInst;
6598    return true;
6599  }
6600
6601  case ARM::VST4dWB_fixed_Asm_8:
6602  case ARM::VST4dWB_fixed_Asm_16:
6603  case ARM::VST4dWB_fixed_Asm_32:
6604  case ARM::VST4qWB_fixed_Asm_8:
6605  case ARM::VST4qWB_fixed_Asm_16:
6606  case ARM::VST4qWB_fixed_Asm_32: {
6607    MCInst TmpInst;
6608    unsigned Spacing;
6609    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6610    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6611    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6612    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6613    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6614    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6615    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6616                                            Spacing));
6617    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6618                                            Spacing * 2));
6619    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6620                                            Spacing * 3));
6621    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6622    TmpInst.addOperand(Inst.getOperand(4));
6623    Inst = TmpInst;
6624    return true;
6625  }
6626
6627  case ARM::VST4dWB_register_Asm_8:
6628  case ARM::VST4dWB_register_Asm_16:
6629  case ARM::VST4dWB_register_Asm_32:
6630  case ARM::VST4qWB_register_Asm_8:
6631  case ARM::VST4qWB_register_Asm_16:
6632  case ARM::VST4qWB_register_Asm_32: {
6633    MCInst TmpInst;
6634    unsigned Spacing;
6635    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6636    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6637    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6638    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6639    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6640    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6641    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6642                                            Spacing));
6643    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6644                                            Spacing * 2));
6645    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6646                                            Spacing * 3));
6647    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6648    TmpInst.addOperand(Inst.getOperand(5));
6649    Inst = TmpInst;
6650    return true;
6651  }
6652
6653  // Handle encoding choice for the shift-immediate instructions.
6654  case ARM::t2LSLri:
6655  case ARM::t2LSRri:
6656  case ARM::t2ASRri: {
6657    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6658        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6659        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6660        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6661         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6662      unsigned NewOpc;
6663      switch (Inst.getOpcode()) {
6664      default: llvm_unreachable("unexpected opcode");
6665      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6666      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6667      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6668      }
6669      // The Thumb1 operands aren't in the same order. Awesome, eh?
6670      MCInst TmpInst;
6671      TmpInst.setOpcode(NewOpc);
6672      TmpInst.addOperand(Inst.getOperand(0));
6673      TmpInst.addOperand(Inst.getOperand(5));
6674      TmpInst.addOperand(Inst.getOperand(1));
6675      TmpInst.addOperand(Inst.getOperand(2));
6676      TmpInst.addOperand(Inst.getOperand(3));
6677      TmpInst.addOperand(Inst.getOperand(4));
6678      Inst = TmpInst;
6679      return true;
6680    }
6681    return false;
6682  }
6683
6684  // Handle the Thumb2 mode MOV complex aliases.
6685  case ARM::t2MOVsr:
6686  case ARM::t2MOVSsr: {
6687    // Which instruction to expand to depends on the CCOut operand and
6688    // whether we're in an IT block if the register operands are low
6689    // registers.
6690    bool isNarrow = false;
6691    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6692        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6693        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6694        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6695        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6696      isNarrow = true;
6697    MCInst TmpInst;
6698    unsigned newOpc;
6699    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6700    default: llvm_unreachable("unexpected opcode!");
6701    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6702    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6703    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6704    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6705    }
6706    TmpInst.setOpcode(newOpc);
6707    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6708    if (isNarrow)
6709      TmpInst.addOperand(MCOperand::CreateReg(
6710          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6711    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6712    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6713    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6714    TmpInst.addOperand(Inst.getOperand(5));
6715    if (!isNarrow)
6716      TmpInst.addOperand(MCOperand::CreateReg(
6717          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6718    Inst = TmpInst;
6719    return true;
6720  }
6721  case ARM::t2MOVsi:
6722  case ARM::t2MOVSsi: {
6723    // Which instruction to expand to depends on the CCOut operand and
6724    // whether we're in an IT block if the register operands are low
6725    // registers.
6726    bool isNarrow = false;
6727    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6728        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6729        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6730      isNarrow = true;
6731    MCInst TmpInst;
6732    unsigned newOpc;
6733    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6734    default: llvm_unreachable("unexpected opcode!");
6735    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6736    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6737    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6738    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6739    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6740    }
6741    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6742    if (Ammount == 32) Ammount = 0;
6743    TmpInst.setOpcode(newOpc);
6744    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6745    if (isNarrow)
6746      TmpInst.addOperand(MCOperand::CreateReg(
6747          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6748    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6749    if (newOpc != ARM::t2RRX)
6750      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6751    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6752    TmpInst.addOperand(Inst.getOperand(4));
6753    if (!isNarrow)
6754      TmpInst.addOperand(MCOperand::CreateReg(
6755          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6756    Inst = TmpInst;
6757    return true;
6758  }
6759  // Handle the ARM mode MOV complex aliases.
6760  case ARM::ASRr:
6761  case ARM::LSRr:
6762  case ARM::LSLr:
6763  case ARM::RORr: {
6764    ARM_AM::ShiftOpc ShiftTy;
6765    switch(Inst.getOpcode()) {
6766    default: llvm_unreachable("unexpected opcode!");
6767    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6768    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6769    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6770    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6771    }
6772    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6773    MCInst TmpInst;
6774    TmpInst.setOpcode(ARM::MOVsr);
6775    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6776    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6777    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6778    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6779    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6780    TmpInst.addOperand(Inst.getOperand(4));
6781    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6782    Inst = TmpInst;
6783    return true;
6784  }
6785  case ARM::ASRi:
6786  case ARM::LSRi:
6787  case ARM::LSLi:
6788  case ARM::RORi: {
6789    ARM_AM::ShiftOpc ShiftTy;
6790    switch(Inst.getOpcode()) {
6791    default: llvm_unreachable("unexpected opcode!");
6792    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6793    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6794    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6795    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6796    }
6797    // A shift by zero is a plain MOVr, not a MOVsi.
6798    unsigned Amt = Inst.getOperand(2).getImm();
6799    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6800    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6801    MCInst TmpInst;
6802    TmpInst.setOpcode(Opc);
6803    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6804    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6805    if (Opc == ARM::MOVsi)
6806      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6807    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6808    TmpInst.addOperand(Inst.getOperand(4));
6809    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6810    Inst = TmpInst;
6811    return true;
6812  }
6813  case ARM::RRXi: {
6814    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6815    MCInst TmpInst;
6816    TmpInst.setOpcode(ARM::MOVsi);
6817    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6818    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6819    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6820    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6821    TmpInst.addOperand(Inst.getOperand(3));
6822    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6823    Inst = TmpInst;
6824    return true;
6825  }
6826  case ARM::t2LDMIA_UPD: {
6827    // If this is a load of a single register, then we should use
6828    // a post-indexed LDR instruction instead, per the ARM ARM.
6829    if (Inst.getNumOperands() != 5)
6830      return false;
6831    MCInst TmpInst;
6832    TmpInst.setOpcode(ARM::t2LDR_POST);
6833    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6834    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6835    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6836    TmpInst.addOperand(MCOperand::CreateImm(4));
6837    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6838    TmpInst.addOperand(Inst.getOperand(3));
6839    Inst = TmpInst;
6840    return true;
6841  }
6842  case ARM::t2STMDB_UPD: {
6843    // If this is a store of a single register, then we should use
6844    // a pre-indexed STR instruction instead, per the ARM ARM.
6845    if (Inst.getNumOperands() != 5)
6846      return false;
6847    MCInst TmpInst;
6848    TmpInst.setOpcode(ARM::t2STR_PRE);
6849    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6850    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6851    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6852    TmpInst.addOperand(MCOperand::CreateImm(-4));
6853    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6854    TmpInst.addOperand(Inst.getOperand(3));
6855    Inst = TmpInst;
6856    return true;
6857  }
6858  case ARM::LDMIA_UPD:
6859    // If this is a load of a single register via a 'pop', then we should use
6860    // a post-indexed LDR instruction instead, per the ARM ARM.
6861    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6862        Inst.getNumOperands() == 5) {
6863      MCInst TmpInst;
6864      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6865      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6866      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6867      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6868      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6869      TmpInst.addOperand(MCOperand::CreateImm(4));
6870      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6871      TmpInst.addOperand(Inst.getOperand(3));
6872      Inst = TmpInst;
6873      return true;
6874    }
6875    break;
6876  case ARM::STMDB_UPD:
6877    // If this is a store of a single register via a 'push', then we should use
6878    // a pre-indexed STR instruction instead, per the ARM ARM.
6879    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6880        Inst.getNumOperands() == 5) {
6881      MCInst TmpInst;
6882      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6883      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6884      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6885      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6886      TmpInst.addOperand(MCOperand::CreateImm(-4));
6887      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6888      TmpInst.addOperand(Inst.getOperand(3));
6889      Inst = TmpInst;
6890    }
6891    break;
6892  case ARM::t2ADDri12:
6893    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6894    // mnemonic was used (not "addw"), encoding T3 is preferred.
6895    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6896        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6897      break;
6898    Inst.setOpcode(ARM::t2ADDri);
6899    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6900    break;
6901  case ARM::t2SUBri12:
6902    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6903    // mnemonic was used (not "subw"), encoding T3 is preferred.
6904    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6905        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6906      break;
6907    Inst.setOpcode(ARM::t2SUBri);
6908    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6909    break;
6910  case ARM::tADDi8:
6911    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6912    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6913    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6914    // to encoding T1 if <Rd> is omitted."
6915    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6916      Inst.setOpcode(ARM::tADDi3);
6917      return true;
6918    }
6919    break;
6920  case ARM::tSUBi8:
6921    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6922    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6923    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6924    // to encoding T1 if <Rd> is omitted."
6925    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6926      Inst.setOpcode(ARM::tSUBi3);
6927      return true;
6928    }
6929    break;
6930  case ARM::t2ADDri:
6931  case ARM::t2SUBri: {
6932    // If the destination and first source operand are the same, and
6933    // the flags are compatible with the current IT status, use encoding T2
6934    // instead of T3. For compatibility with the system 'as'. Make sure the
6935    // wide encoding wasn't explicit.
6936    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6937        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6938        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6939        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6940        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6941        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6942         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6943      break;
6944    MCInst TmpInst;
6945    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6946                      ARM::tADDi8 : ARM::tSUBi8);
6947    TmpInst.addOperand(Inst.getOperand(0));
6948    TmpInst.addOperand(Inst.getOperand(5));
6949    TmpInst.addOperand(Inst.getOperand(0));
6950    TmpInst.addOperand(Inst.getOperand(2));
6951    TmpInst.addOperand(Inst.getOperand(3));
6952    TmpInst.addOperand(Inst.getOperand(4));
6953    Inst = TmpInst;
6954    return true;
6955  }
6956  case ARM::t2ADDrr: {
6957    // If the destination and first source operand are the same, and
6958    // there's no setting of the flags, use encoding T2 instead of T3.
6959    // Note that this is only for ADD, not SUB. This mirrors the system
6960    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6961    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6962        Inst.getOperand(5).getReg() != 0 ||
6963        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6964         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6965      break;
6966    MCInst TmpInst;
6967    TmpInst.setOpcode(ARM::tADDhirr);
6968    TmpInst.addOperand(Inst.getOperand(0));
6969    TmpInst.addOperand(Inst.getOperand(0));
6970    TmpInst.addOperand(Inst.getOperand(2));
6971    TmpInst.addOperand(Inst.getOperand(3));
6972    TmpInst.addOperand(Inst.getOperand(4));
6973    Inst = TmpInst;
6974    return true;
6975  }
6976  case ARM::tB:
6977    // A Thumb conditional branch outside of an IT block is a tBcc.
6978    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6979      Inst.setOpcode(ARM::tBcc);
6980      return true;
6981    }
6982    break;
6983  case ARM::t2B:
6984    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6985    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6986      Inst.setOpcode(ARM::t2Bcc);
6987      return true;
6988    }
6989    break;
6990  case ARM::t2Bcc:
6991    // If the conditional is AL or we're in an IT block, we really want t2B.
6992    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6993      Inst.setOpcode(ARM::t2B);
6994      return true;
6995    }
6996    break;
6997  case ARM::tBcc:
6998    // If the conditional is AL, we really want tB.
6999    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7000      Inst.setOpcode(ARM::tB);
7001      return true;
7002    }
7003    break;
7004  case ARM::tLDMIA: {
7005    // If the register list contains any high registers, or if the writeback
7006    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7007    // instead if we're in Thumb2. Otherwise, this should have generated
7008    // an error in validateInstruction().
7009    unsigned Rn = Inst.getOperand(0).getReg();
7010    bool hasWritebackToken =
7011      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7012       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7013    bool listContainsBase;
7014    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7015        (!listContainsBase && !hasWritebackToken) ||
7016        (listContainsBase && hasWritebackToken)) {
7017      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7018      assert (isThumbTwo());
7019      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7020      // If we're switching to the updating version, we need to insert
7021      // the writeback tied operand.
7022      if (hasWritebackToken)
7023        Inst.insert(Inst.begin(),
7024                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7025      return true;
7026    }
7027    break;
7028  }
7029  case ARM::tSTMIA_UPD: {
7030    // If the register list contains any high registers, we need to use
7031    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7032    // should have generated an error in validateInstruction().
7033    unsigned Rn = Inst.getOperand(0).getReg();
7034    bool listContainsBase;
7035    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7036      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7037      assert (isThumbTwo());
7038      Inst.setOpcode(ARM::t2STMIA_UPD);
7039      return true;
7040    }
7041    break;
7042  }
7043  case ARM::tPOP: {
7044    bool listContainsBase;
7045    // If the register list contains any high registers, we need to use
7046    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7047    // should have generated an error in validateInstruction().
7048    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7049      return false;
7050    assert (isThumbTwo());
7051    Inst.setOpcode(ARM::t2LDMIA_UPD);
7052    // Add the base register and writeback operands.
7053    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7054    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7055    return true;
7056  }
7057  case ARM::tPUSH: {
7058    bool listContainsBase;
7059    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7060      return false;
7061    assert (isThumbTwo());
7062    Inst.setOpcode(ARM::t2STMDB_UPD);
7063    // Add the base register and writeback operands.
7064    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7065    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7066    return true;
7067  }
7068  case ARM::t2MOVi: {
7069    // If we can use the 16-bit encoding and the user didn't explicitly
7070    // request the 32-bit variant, transform it here.
7071    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7072        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7073        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7074         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7075        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7076        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7077         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7078      // The operands aren't in the same order for tMOVi8...
7079      MCInst TmpInst;
7080      TmpInst.setOpcode(ARM::tMOVi8);
7081      TmpInst.addOperand(Inst.getOperand(0));
7082      TmpInst.addOperand(Inst.getOperand(4));
7083      TmpInst.addOperand(Inst.getOperand(1));
7084      TmpInst.addOperand(Inst.getOperand(2));
7085      TmpInst.addOperand(Inst.getOperand(3));
7086      Inst = TmpInst;
7087      return true;
7088    }
7089    break;
7090  }
7091  case ARM::t2MOVr: {
7092    // If we can use the 16-bit encoding and the user didn't explicitly
7093    // request the 32-bit variant, transform it here.
7094    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7095        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7096        Inst.getOperand(2).getImm() == ARMCC::AL &&
7097        Inst.getOperand(4).getReg() == ARM::CPSR &&
7098        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7099         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7100      // The operands aren't the same for tMOV[S]r... (no cc_out)
7101      MCInst TmpInst;
7102      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7103      TmpInst.addOperand(Inst.getOperand(0));
7104      TmpInst.addOperand(Inst.getOperand(1));
7105      TmpInst.addOperand(Inst.getOperand(2));
7106      TmpInst.addOperand(Inst.getOperand(3));
7107      Inst = TmpInst;
7108      return true;
7109    }
7110    break;
7111  }
7112  case ARM::t2SXTH:
7113  case ARM::t2SXTB:
7114  case ARM::t2UXTH:
7115  case ARM::t2UXTB: {
7116    // If we can use the 16-bit encoding and the user didn't explicitly
7117    // request the 32-bit variant, transform it here.
7118    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7119        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7120        Inst.getOperand(2).getImm() == 0 &&
7121        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7122         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7123      unsigned NewOpc;
7124      switch (Inst.getOpcode()) {
7125      default: llvm_unreachable("Illegal opcode!");
7126      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7127      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7128      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7129      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7130      }
7131      // The operands aren't the same for thumb1 (no rotate operand).
7132      MCInst TmpInst;
7133      TmpInst.setOpcode(NewOpc);
7134      TmpInst.addOperand(Inst.getOperand(0));
7135      TmpInst.addOperand(Inst.getOperand(1));
7136      TmpInst.addOperand(Inst.getOperand(3));
7137      TmpInst.addOperand(Inst.getOperand(4));
7138      Inst = TmpInst;
7139      return true;
7140    }
7141    break;
7142  }
7143  case ARM::MOVsi: {
7144    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7145    if (SOpc == ARM_AM::rrx) return false;
7146    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7147      // Shifting by zero is accepted as a vanilla 'MOVr'
7148      MCInst TmpInst;
7149      TmpInst.setOpcode(ARM::MOVr);
7150      TmpInst.addOperand(Inst.getOperand(0));
7151      TmpInst.addOperand(Inst.getOperand(1));
7152      TmpInst.addOperand(Inst.getOperand(3));
7153      TmpInst.addOperand(Inst.getOperand(4));
7154      TmpInst.addOperand(Inst.getOperand(5));
7155      Inst = TmpInst;
7156      return true;
7157    }
7158    return false;
7159  }
7160  case ARM::ANDrsi:
7161  case ARM::ORRrsi:
7162  case ARM::EORrsi:
7163  case ARM::BICrsi:
7164  case ARM::SUBrsi:
7165  case ARM::ADDrsi: {
7166    unsigned newOpc;
7167    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7168    if (SOpc == ARM_AM::rrx) return false;
7169    switch (Inst.getOpcode()) {
7170    default: llvm_unreachable("unexpected opcode!");
7171    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7172    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7173    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7174    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7175    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7176    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7177    }
7178    // If the shift is by zero, use the non-shifted instruction definition.
7179    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7180      MCInst TmpInst;
7181      TmpInst.setOpcode(newOpc);
7182      TmpInst.addOperand(Inst.getOperand(0));
7183      TmpInst.addOperand(Inst.getOperand(1));
7184      TmpInst.addOperand(Inst.getOperand(2));
7185      TmpInst.addOperand(Inst.getOperand(4));
7186      TmpInst.addOperand(Inst.getOperand(5));
7187      TmpInst.addOperand(Inst.getOperand(6));
7188      Inst = TmpInst;
7189      return true;
7190    }
7191    return false;
7192  }
7193  case ARM::ITasm:
7194  case ARM::t2IT: {
7195    // The mask bits for all but the first condition are represented as
7196    // the low bit of the condition code value implies 't'. We currently
7197    // always have 1 implies 't', so XOR toggle the bits if the low bit
7198    // of the condition code is zero. The encoding also expects the low
7199    // bit of the condition to be encoded as bit 4 of the mask operand,
7200    // so mask that in if needed
7201    MCOperand &MO = Inst.getOperand(1);
7202    unsigned Mask = MO.getImm();
7203    unsigned OrigMask = Mask;
7204    unsigned TZ = CountTrailingZeros_32(Mask);
7205    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7206      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7207      for (unsigned i = 3; i != TZ; --i)
7208        Mask ^= 1 << i;
7209    } else
7210      Mask |= 0x10;
7211    MO.setImm(Mask);
7212
7213    // Set up the IT block state according to the IT instruction we just
7214    // matched.
7215    assert(!inITBlock() && "nested IT blocks?!");
7216    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7217    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7218    ITState.CurPosition = 0;
7219    ITState.FirstCond = true;
7220    break;
7221  }
7222  }
7223  return false;
7224}
7225
7226unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7227  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7228  // suffix depending on whether they're in an IT block or not.
7229  unsigned Opc = Inst.getOpcode();
7230  const MCInstrDesc &MCID = getInstDesc(Opc);
7231  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7232    assert(MCID.hasOptionalDef() &&
7233           "optionally flag setting instruction missing optional def operand");
7234    assert(MCID.NumOperands == Inst.getNumOperands() &&
7235           "operand count mismatch!");
7236    // Find the optional-def operand (cc_out).
7237    unsigned OpNo;
7238    for (OpNo = 0;
7239         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7240         ++OpNo)
7241      ;
7242    // If we're parsing Thumb1, reject it completely.
7243    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7244      return Match_MnemonicFail;
7245    // If we're parsing Thumb2, which form is legal depends on whether we're
7246    // in an IT block.
7247    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7248        !inITBlock())
7249      return Match_RequiresITBlock;
7250    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7251        inITBlock())
7252      return Match_RequiresNotITBlock;
7253  }
7254  // Some high-register supporting Thumb1 encodings only allow both registers
7255  // to be from r0-r7 when in Thumb2.
7256  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7257           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7258           isARMLowRegister(Inst.getOperand(2).getReg()))
7259    return Match_RequiresThumb2;
7260  // Others only require ARMv6 or later.
7261  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7262           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7263           isARMLowRegister(Inst.getOperand(1).getReg()))
7264    return Match_RequiresV6;
7265  return Match_Success;
7266}
7267
7268bool ARMAsmParser::
7269MatchAndEmitInstruction(SMLoc IDLoc,
7270                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7271                        MCStreamer &Out) {
7272  MCInst Inst;
7273  unsigned ErrorInfo;
7274  unsigned MatchResult;
7275  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7276  switch (MatchResult) {
7277  default: break;
7278  case Match_Success:
7279    // Context sensitive operand constraints aren't handled by the matcher,
7280    // so check them here.
7281    if (validateInstruction(Inst, Operands)) {
7282      // Still progress the IT block, otherwise one wrong condition causes
7283      // nasty cascading errors.
7284      forwardITPosition();
7285      return true;
7286    }
7287
7288    // Some instructions need post-processing to, for example, tweak which
7289    // encoding is selected. Loop on it while changes happen so the
7290    // individual transformations can chain off each other. E.g.,
7291    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7292    while (processInstruction(Inst, Operands))
7293      ;
7294
7295    // Only move forward at the very end so that everything in validate
7296    // and process gets a consistent answer about whether we're in an IT
7297    // block.
7298    forwardITPosition();
7299
7300    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7301    // doesn't actually encode.
7302    if (Inst.getOpcode() == ARM::ITasm)
7303      return false;
7304
7305    Inst.setLoc(IDLoc);
7306    Out.EmitInstruction(Inst);
7307    return false;
7308  case Match_MissingFeature:
7309    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7310    return true;
7311  case Match_InvalidOperand: {
7312    SMLoc ErrorLoc = IDLoc;
7313    if (ErrorInfo != ~0U) {
7314      if (ErrorInfo >= Operands.size())
7315        return Error(IDLoc, "too few operands for instruction");
7316
7317      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7318      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7319    }
7320
7321    return Error(ErrorLoc, "invalid operand for instruction");
7322  }
7323  case Match_MnemonicFail:
7324    return Error(IDLoc, "invalid instruction");
7325  case Match_ConversionFail:
7326    // The converter function will have already emited a diagnostic.
7327    return true;
7328  case Match_RequiresNotITBlock:
7329    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7330  case Match_RequiresITBlock:
7331    return Error(IDLoc, "instruction only valid inside IT block");
7332  case Match_RequiresV6:
7333    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7334  case Match_RequiresThumb2:
7335    return Error(IDLoc, "instruction variant requires Thumb2");
7336  }
7337
7338  llvm_unreachable("Implement any new match types added!");
7339}
7340
7341/// parseDirective parses the arm specific directives
7342bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7343  StringRef IDVal = DirectiveID.getIdentifier();
7344  if (IDVal == ".word")
7345    return parseDirectiveWord(4, DirectiveID.getLoc());
7346  else if (IDVal == ".thumb")
7347    return parseDirectiveThumb(DirectiveID.getLoc());
7348  else if (IDVal == ".arm")
7349    return parseDirectiveARM(DirectiveID.getLoc());
7350  else if (IDVal == ".thumb_func")
7351    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7352  else if (IDVal == ".code")
7353    return parseDirectiveCode(DirectiveID.getLoc());
7354  else if (IDVal == ".syntax")
7355    return parseDirectiveSyntax(DirectiveID.getLoc());
7356  else if (IDVal == ".unreq")
7357    return parseDirectiveUnreq(DirectiveID.getLoc());
7358  else if (IDVal == ".arch")
7359    return parseDirectiveArch(DirectiveID.getLoc());
7360  else if (IDVal == ".eabi_attribute")
7361    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7362  return true;
7363}
7364
7365/// parseDirectiveWord
7366///  ::= .word [ expression (, expression)* ]
7367bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7368  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7369    for (;;) {
7370      const MCExpr *Value;
7371      if (getParser().ParseExpression(Value))
7372        return true;
7373
7374      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7375
7376      if (getLexer().is(AsmToken::EndOfStatement))
7377        break;
7378
7379      // FIXME: Improve diagnostic.
7380      if (getLexer().isNot(AsmToken::Comma))
7381        return Error(L, "unexpected token in directive");
7382      Parser.Lex();
7383    }
7384  }
7385
7386  Parser.Lex();
7387  return false;
7388}
7389
7390/// parseDirectiveThumb
7391///  ::= .thumb
7392bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7393  if (getLexer().isNot(AsmToken::EndOfStatement))
7394    return Error(L, "unexpected token in directive");
7395  Parser.Lex();
7396
7397  if (!isThumb())
7398    SwitchMode();
7399  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7400  return false;
7401}
7402
7403/// parseDirectiveARM
7404///  ::= .arm
7405bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7406  if (getLexer().isNot(AsmToken::EndOfStatement))
7407    return Error(L, "unexpected token in directive");
7408  Parser.Lex();
7409
7410  if (isThumb())
7411    SwitchMode();
7412  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7413  return false;
7414}
7415
7416/// parseDirectiveThumbFunc
7417///  ::= .thumbfunc symbol_name
7418bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7419  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7420  bool isMachO = MAI.hasSubsectionsViaSymbols();
7421  StringRef Name;
7422  bool needFuncName = true;
7423
7424  // Darwin asm has (optionally) function name after .thumb_func direction
7425  // ELF doesn't
7426  if (isMachO) {
7427    const AsmToken &Tok = Parser.getTok();
7428    if (Tok.isNot(AsmToken::EndOfStatement)) {
7429      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7430        return Error(L, "unexpected token in .thumb_func directive");
7431      Name = Tok.getIdentifier();
7432      Parser.Lex(); // Consume the identifier token.
7433      needFuncName = false;
7434    }
7435  }
7436
7437  if (getLexer().isNot(AsmToken::EndOfStatement))
7438    return Error(L, "unexpected token in directive");
7439
7440  // Eat the end of statement and any blank lines that follow.
7441  while (getLexer().is(AsmToken::EndOfStatement))
7442    Parser.Lex();
7443
7444  // FIXME: assuming function name will be the line following .thumb_func
7445  // We really should be checking the next symbol definition even if there's
7446  // stuff in between.
7447  if (needFuncName) {
7448    Name = Parser.getTok().getIdentifier();
7449  }
7450
7451  // Mark symbol as a thumb symbol.
7452  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7453  getParser().getStreamer().EmitThumbFunc(Func);
7454  return false;
7455}
7456
7457/// parseDirectiveSyntax
7458///  ::= .syntax unified | divided
7459bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7460  const AsmToken &Tok = Parser.getTok();
7461  if (Tok.isNot(AsmToken::Identifier))
7462    return Error(L, "unexpected token in .syntax directive");
7463  StringRef Mode = Tok.getString();
7464  if (Mode == "unified" || Mode == "UNIFIED")
7465    Parser.Lex();
7466  else if (Mode == "divided" || Mode == "DIVIDED")
7467    return Error(L, "'.syntax divided' arm asssembly not supported");
7468  else
7469    return Error(L, "unrecognized syntax mode in .syntax directive");
7470
7471  if (getLexer().isNot(AsmToken::EndOfStatement))
7472    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7473  Parser.Lex();
7474
7475  // TODO tell the MC streamer the mode
7476  // getParser().getStreamer().Emit???();
7477  return false;
7478}
7479
7480/// parseDirectiveCode
7481///  ::= .code 16 | 32
7482bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7483  const AsmToken &Tok = Parser.getTok();
7484  if (Tok.isNot(AsmToken::Integer))
7485    return Error(L, "unexpected token in .code directive");
7486  int64_t Val = Parser.getTok().getIntVal();
7487  if (Val == 16)
7488    Parser.Lex();
7489  else if (Val == 32)
7490    Parser.Lex();
7491  else
7492    return Error(L, "invalid operand to .code directive");
7493
7494  if (getLexer().isNot(AsmToken::EndOfStatement))
7495    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7496  Parser.Lex();
7497
7498  if (Val == 16) {
7499    if (!isThumb())
7500      SwitchMode();
7501    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7502  } else {
7503    if (isThumb())
7504      SwitchMode();
7505    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7506  }
7507
7508  return false;
7509}
7510
7511/// parseDirectiveReq
7512///  ::= name .req registername
7513bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7514  Parser.Lex(); // Eat the '.req' token.
7515  unsigned Reg;
7516  SMLoc SRegLoc, ERegLoc;
7517  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7518    Parser.EatToEndOfStatement();
7519    return Error(SRegLoc, "register name expected");
7520  }
7521
7522  // Shouldn't be anything else.
7523  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7524    Parser.EatToEndOfStatement();
7525    return Error(Parser.getTok().getLoc(),
7526                 "unexpected input in .req directive.");
7527  }
7528
7529  Parser.Lex(); // Consume the EndOfStatement
7530
7531  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7532    return Error(SRegLoc, "redefinition of '" + Name +
7533                          "' does not match original.");
7534
7535  return false;
7536}
7537
7538/// parseDirectiveUneq
7539///  ::= .unreq registername
7540bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7541  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7542    Parser.EatToEndOfStatement();
7543    return Error(L, "unexpected input in .unreq directive.");
7544  }
7545  RegisterReqs.erase(Parser.getTok().getIdentifier());
7546  Parser.Lex(); // Eat the identifier.
7547  return false;
7548}
7549
7550/// parseDirectiveArch
7551///  ::= .arch token
7552bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7553  return true;
7554}
7555
7556/// parseDirectiveEabiAttr
7557///  ::= .eabi_attribute int, int
7558bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7559  return true;
7560}
7561
7562extern "C" void LLVMInitializeARMAsmLexer();
7563
7564/// Force static initialization.
7565extern "C" void LLVMInitializeARMAsmParser() {
7566  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7567  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7568  LLVMInitializeARMAsmLexer();
7569}
7570
7571#define GET_REGISTER_MATCHER
7572#define GET_MATCHER_IMPLEMENTATION
7573#include "ARMGenAsmMatcher.inc"
7574