ARMAsmParser.cpp revision 276537
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "ARMFPUName.h"
11#include "ARMFeatures.h"
12#include "MCTargetDesc/ARMAddressingModes.h"
13#include "MCTargetDesc/ARMArchName.h"
14#include "MCTargetDesc/ARMBaseInfo.h"
15#include "MCTargetDesc/ARMMCExpr.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCAsmInfo.h"
22#include "llvm/MC/MCAssembler.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCDisassembler.h"
25#include "llvm/MC/MCELFStreamer.h"
26#include "llvm/MC/MCExpr.h"
27#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCInstrDesc.h"
29#include "llvm/MC/MCInstrInfo.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34#include "llvm/MC/MCRegisterInfo.h"
35#include "llvm/MC/MCSection.h"
36#include "llvm/MC/MCStreamer.h"
37#include "llvm/MC/MCSubtargetInfo.h"
38#include "llvm/MC/MCSymbol.h"
39#include "llvm/MC/MCTargetAsmParser.h"
40#include "llvm/Support/ARMBuildAttributes.h"
41#include "llvm/Support/ARMEHABI.h"
42#include "llvm/Support/COFF.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/ELF.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/SourceMgr.h"
47#include "llvm/Support/TargetRegistry.h"
48#include "llvm/Support/raw_ostream.h"
49
50using namespace llvm;
51
52namespace {
53
54class ARMOperand;
55
56enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
57
58class UnwindContext {
59  MCAsmParser &Parser;
60
61  typedef SmallVector<SMLoc, 4> Locs;
62
63  Locs FnStartLocs;
64  Locs CantUnwindLocs;
65  Locs PersonalityLocs;
66  Locs PersonalityIndexLocs;
67  Locs HandlerDataLocs;
68  int FPReg;
69
70public:
71  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
72
73  bool hasFnStart() const { return !FnStartLocs.empty(); }
74  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
75  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
76  bool hasPersonality() const {
77    return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
78  }
79
80  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
81  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
82  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
83  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
84  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
85
86  void saveFPReg(int Reg) { FPReg = Reg; }
87  int getFPReg() const { return FPReg; }
88
89  void emitFnStartLocNotes() const {
90    for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
91         FI != FE; ++FI)
92      Parser.Note(*FI, ".fnstart was specified here");
93  }
94  void emitCantUnwindLocNotes() const {
95    for (Locs::const_iterator UI = CantUnwindLocs.begin(),
96                              UE = CantUnwindLocs.end(); UI != UE; ++UI)
97      Parser.Note(*UI, ".cantunwind was specified here");
98  }
99  void emitHandlerDataLocNotes() const {
100    for (Locs::const_iterator HI = HandlerDataLocs.begin(),
101                              HE = HandlerDataLocs.end(); HI != HE; ++HI)
102      Parser.Note(*HI, ".handlerdata was specified here");
103  }
104  void emitPersonalityLocNotes() const {
105    for (Locs::const_iterator PI = PersonalityLocs.begin(),
106                              PE = PersonalityLocs.end(),
107                              PII = PersonalityIndexLocs.begin(),
108                              PIE = PersonalityIndexLocs.end();
109         PI != PE || PII != PIE;) {
110      if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
111        Parser.Note(*PI++, ".personality was specified here");
112      else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
113        Parser.Note(*PII++, ".personalityindex was specified here");
114      else
115        llvm_unreachable(".personality and .personalityindex cannot be "
116                         "at the same location");
117    }
118  }
119
120  void reset() {
121    FnStartLocs = Locs();
122    CantUnwindLocs = Locs();
123    PersonalityLocs = Locs();
124    HandlerDataLocs = Locs();
125    PersonalityIndexLocs = Locs();
126    FPReg = ARM::SP;
127  }
128};
129
130class ARMAsmParser : public MCTargetAsmParser {
131  MCSubtargetInfo &STI;
132  MCAsmParser &Parser;
133  const MCInstrInfo &MII;
134  const MCRegisterInfo *MRI;
135  UnwindContext UC;
136
137  ARMTargetStreamer &getTargetStreamer() {
138    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
139    return static_cast<ARMTargetStreamer &>(TS);
140  }
141
142  // Map of register aliases registers via the .req directive.
143  StringMap<unsigned> RegisterReqs;
144
145  bool NextSymbolIsThumb;
146
147  struct {
148    ARMCC::CondCodes Cond;    // Condition for IT block.
149    unsigned Mask:4;          // Condition mask for instructions.
150                              // Starting at first 1 (from lsb).
151                              //   '1'  condition as indicated in IT.
152                              //   '0'  inverse of condition (else).
153                              // Count of instructions in IT block is
154                              // 4 - trailingzeroes(mask)
155
156    bool FirstCond;           // Explicit flag for when we're parsing the
157                              // First instruction in the IT block. It's
158                              // implied in the mask, so needs special
159                              // handling.
160
161    unsigned CurPosition;     // Current position in parsing of IT
162                              // block. In range [0,3]. Initialized
163                              // according to count of instructions in block.
164                              // ~0U if no active IT block.
165  } ITState;
166  bool inITBlock() { return ITState.CurPosition != ~0U;}
167  void forwardITPosition() {
168    if (!inITBlock()) return;
169    // Move to the next instruction in the IT block, if there is one. If not,
170    // mark the block as done.
171    unsigned TZ = countTrailingZeros(ITState.Mask);
172    if (++ITState.CurPosition == 5 - TZ)
173      ITState.CurPosition = ~0U; // Done with the IT block after this.
174  }
175
176
177  MCAsmParser &getParser() const { return Parser; }
178  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
179
180  void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
181    return Parser.Note(L, Msg, Ranges);
182  }
183  bool Warning(SMLoc L, const Twine &Msg,
184               ArrayRef<SMRange> Ranges = None) {
185    return Parser.Warning(L, Msg, Ranges);
186  }
187  bool Error(SMLoc L, const Twine &Msg,
188             ArrayRef<SMRange> Ranges = None) {
189    return Parser.Error(L, Msg, Ranges);
190  }
191
192  int tryParseRegister();
193  bool tryParseRegisterWithWriteBack(OperandVector &);
194  int tryParseShiftRegister(OperandVector &);
195  bool parseRegisterList(OperandVector &);
196  bool parseMemory(OperandVector &);
197  bool parseOperand(OperandVector &, StringRef Mnemonic);
198  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
199  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
200                              unsigned &ShiftAmount);
201  bool parseLiteralValues(unsigned Size, SMLoc L);
202  bool parseDirectiveThumb(SMLoc L);
203  bool parseDirectiveARM(SMLoc L);
204  bool parseDirectiveThumbFunc(SMLoc L);
205  bool parseDirectiveCode(SMLoc L);
206  bool parseDirectiveSyntax(SMLoc L);
207  bool parseDirectiveReq(StringRef Name, SMLoc L);
208  bool parseDirectiveUnreq(SMLoc L);
209  bool parseDirectiveArch(SMLoc L);
210  bool parseDirectiveEabiAttr(SMLoc L);
211  bool parseDirectiveCPU(SMLoc L);
212  bool parseDirectiveFPU(SMLoc L);
213  bool parseDirectiveFnStart(SMLoc L);
214  bool parseDirectiveFnEnd(SMLoc L);
215  bool parseDirectiveCantUnwind(SMLoc L);
216  bool parseDirectivePersonality(SMLoc L);
217  bool parseDirectiveHandlerData(SMLoc L);
218  bool parseDirectiveSetFP(SMLoc L);
219  bool parseDirectivePad(SMLoc L);
220  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
221  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
222  bool parseDirectiveLtorg(SMLoc L);
223  bool parseDirectiveEven(SMLoc L);
224  bool parseDirectivePersonalityIndex(SMLoc L);
225  bool parseDirectiveUnwindRaw(SMLoc L);
226  bool parseDirectiveTLSDescSeq(SMLoc L);
227  bool parseDirectiveMovSP(SMLoc L);
228  bool parseDirectiveObjectArch(SMLoc L);
229  bool parseDirectiveArchExtension(SMLoc L);
230  bool parseDirectiveAlign(SMLoc L);
231  bool parseDirectiveThumbSet(SMLoc L);
232
233  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
234                          bool &CarrySetting, unsigned &ProcessorIMod,
235                          StringRef &ITMask);
236  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
237                             bool &CanAcceptCarrySet,
238                             bool &CanAcceptPredicationCode);
239
240  bool isThumb() const {
241    // FIXME: Can tablegen auto-generate this?
242    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
243  }
244  bool isThumbOne() const {
245    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
246  }
247  bool isThumbTwo() const {
248    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
249  }
250  bool hasThumb() const {
251    return STI.getFeatureBits() & ARM::HasV4TOps;
252  }
253  bool hasV6Ops() const {
254    return STI.getFeatureBits() & ARM::HasV6Ops;
255  }
256  bool hasV6MOps() const {
257    return STI.getFeatureBits() & ARM::HasV6MOps;
258  }
259  bool hasV7Ops() const {
260    return STI.getFeatureBits() & ARM::HasV7Ops;
261  }
262  bool hasV8Ops() const {
263    return STI.getFeatureBits() & ARM::HasV8Ops;
264  }
265  bool hasARM() const {
266    return !(STI.getFeatureBits() & ARM::FeatureNoARM);
267  }
268
269  void SwitchMode() {
270    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
271    setAvailableFeatures(FB);
272  }
273  bool isMClass() const {
274    return STI.getFeatureBits() & ARM::FeatureMClass;
275  }
276
277  /// @name Auto-generated Match Functions
278  /// {
279
280#define GET_ASSEMBLER_HEADER
281#include "ARMGenAsmMatcher.inc"
282
283  /// }
284
285  OperandMatchResultTy parseITCondCode(OperandVector &);
286  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
287  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
288  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
289  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
290  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
291  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
292  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
293  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
294                                   int High);
295  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
296    return parsePKHImm(O, "lsl", 0, 31);
297  }
298  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
299    return parsePKHImm(O, "asr", 1, 32);
300  }
301  OperandMatchResultTy parseSetEndImm(OperandVector &);
302  OperandMatchResultTy parseShifterImm(OperandVector &);
303  OperandMatchResultTy parseRotImm(OperandVector &);
304  OperandMatchResultTy parseBitfield(OperandVector &);
305  OperandMatchResultTy parsePostIdxReg(OperandVector &);
306  OperandMatchResultTy parseAM3Offset(OperandVector &);
307  OperandMatchResultTy parseFPImm(OperandVector &);
308  OperandMatchResultTy parseVectorList(OperandVector &);
309  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
310                                       SMLoc &EndLoc);
311
312  // Asm Match Converter Methods
313  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
314  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
315
316  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
317  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
318  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
319  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
320
321public:
322  enum ARMMatchResultTy {
323    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
324    Match_RequiresNotITBlock,
325    Match_RequiresV6,
326    Match_RequiresThumb2,
327#define GET_OPERAND_DIAGNOSTIC_TYPES
328#include "ARMGenAsmMatcher.inc"
329
330  };
331
332  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
333               const MCInstrInfo &MII,
334               const MCTargetOptions &Options)
335      : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), UC(_Parser) {
336    MCAsmParserExtension::Initialize(_Parser);
337
338    // Cache the MCRegisterInfo.
339    MRI = getContext().getRegisterInfo();
340
341    // Initialize the set of available features.
342    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
343
344    // Not in an ITBlock to start with.
345    ITState.CurPosition = ~0U;
346
347    NextSymbolIsThumb = false;
348  }
349
350  // Implementation of the MCTargetAsmParser interface:
351  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
352  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
353                        SMLoc NameLoc, OperandVector &Operands) override;
354  bool ParseDirective(AsmToken DirectiveID) override;
355
356  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
357                                      unsigned Kind) override;
358  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
359
360  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
361                               OperandVector &Operands, MCStreamer &Out,
362                               unsigned &ErrorInfo,
363                               bool MatchingInlineAsm) override;
364  void onLabelParsed(MCSymbol *Symbol) override;
365};
366} // end anonymous namespace
367
368namespace {
369
370/// ARMOperand - Instances of this class represent a parsed ARM machine
371/// operand.
372class ARMOperand : public MCParsedAsmOperand {
373  enum KindTy {
374    k_CondCode,
375    k_CCOut,
376    k_ITCondMask,
377    k_CoprocNum,
378    k_CoprocReg,
379    k_CoprocOption,
380    k_Immediate,
381    k_MemBarrierOpt,
382    k_InstSyncBarrierOpt,
383    k_Memory,
384    k_PostIndexRegister,
385    k_MSRMask,
386    k_ProcIFlags,
387    k_VectorIndex,
388    k_Register,
389    k_RegisterList,
390    k_DPRRegisterList,
391    k_SPRRegisterList,
392    k_VectorList,
393    k_VectorListAllLanes,
394    k_VectorListIndexed,
395    k_ShiftedRegister,
396    k_ShiftedImmediate,
397    k_ShifterImmediate,
398    k_RotateImmediate,
399    k_BitfieldDescriptor,
400    k_Token
401  } Kind;
402
403  SMLoc StartLoc, EndLoc, AlignmentLoc;
404  SmallVector<unsigned, 8> Registers;
405
406  struct CCOp {
407    ARMCC::CondCodes Val;
408  };
409
410  struct CopOp {
411    unsigned Val;
412  };
413
414  struct CoprocOptionOp {
415    unsigned Val;
416  };
417
418  struct ITMaskOp {
419    unsigned Mask:4;
420  };
421
422  struct MBOptOp {
423    ARM_MB::MemBOpt Val;
424  };
425
426  struct ISBOptOp {
427    ARM_ISB::InstSyncBOpt Val;
428  };
429
430  struct IFlagsOp {
431    ARM_PROC::IFlags Val;
432  };
433
434  struct MMaskOp {
435    unsigned Val;
436  };
437
438  struct TokOp {
439    const char *Data;
440    unsigned Length;
441  };
442
443  struct RegOp {
444    unsigned RegNum;
445  };
446
447  // A vector register list is a sequential list of 1 to 4 registers.
448  struct VectorListOp {
449    unsigned RegNum;
450    unsigned Count;
451    unsigned LaneIndex;
452    bool isDoubleSpaced;
453  };
454
455  struct VectorIndexOp {
456    unsigned Val;
457  };
458
459  struct ImmOp {
460    const MCExpr *Val;
461  };
462
463  /// Combined record for all forms of ARM address expressions.
464  struct MemoryOp {
465    unsigned BaseRegNum;
466    // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
467    // was specified.
468    const MCConstantExpr *OffsetImm;  // Offset immediate value
469    unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
470    ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
471    unsigned ShiftImm;        // shift for OffsetReg.
472    unsigned Alignment;       // 0 = no alignment specified
473    // n = alignment in bytes (2, 4, 8, 16, or 32)
474    unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
475  };
476
477  struct PostIdxRegOp {
478    unsigned RegNum;
479    bool isAdd;
480    ARM_AM::ShiftOpc ShiftTy;
481    unsigned ShiftImm;
482  };
483
484  struct ShifterImmOp {
485    bool isASR;
486    unsigned Imm;
487  };
488
489  struct RegShiftedRegOp {
490    ARM_AM::ShiftOpc ShiftTy;
491    unsigned SrcReg;
492    unsigned ShiftReg;
493    unsigned ShiftImm;
494  };
495
496  struct RegShiftedImmOp {
497    ARM_AM::ShiftOpc ShiftTy;
498    unsigned SrcReg;
499    unsigned ShiftImm;
500  };
501
502  struct RotImmOp {
503    unsigned Imm;
504  };
505
506  struct BitfieldOp {
507    unsigned LSB;
508    unsigned Width;
509  };
510
511  union {
512    struct CCOp CC;
513    struct CopOp Cop;
514    struct CoprocOptionOp CoprocOption;
515    struct MBOptOp MBOpt;
516    struct ISBOptOp ISBOpt;
517    struct ITMaskOp ITMask;
518    struct IFlagsOp IFlags;
519    struct MMaskOp MMask;
520    struct TokOp Tok;
521    struct RegOp Reg;
522    struct VectorListOp VectorList;
523    struct VectorIndexOp VectorIndex;
524    struct ImmOp Imm;
525    struct MemoryOp Memory;
526    struct PostIdxRegOp PostIdxReg;
527    struct ShifterImmOp ShifterImm;
528    struct RegShiftedRegOp RegShiftedReg;
529    struct RegShiftedImmOp RegShiftedImm;
530    struct RotImmOp RotImm;
531    struct BitfieldOp Bitfield;
532  };
533
534public:
535  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
536  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
537    Kind = o.Kind;
538    StartLoc = o.StartLoc;
539    EndLoc = o.EndLoc;
540    switch (Kind) {
541    case k_CondCode:
542      CC = o.CC;
543      break;
544    case k_ITCondMask:
545      ITMask = o.ITMask;
546      break;
547    case k_Token:
548      Tok = o.Tok;
549      break;
550    case k_CCOut:
551    case k_Register:
552      Reg = o.Reg;
553      break;
554    case k_RegisterList:
555    case k_DPRRegisterList:
556    case k_SPRRegisterList:
557      Registers = o.Registers;
558      break;
559    case k_VectorList:
560    case k_VectorListAllLanes:
561    case k_VectorListIndexed:
562      VectorList = o.VectorList;
563      break;
564    case k_CoprocNum:
565    case k_CoprocReg:
566      Cop = o.Cop;
567      break;
568    case k_CoprocOption:
569      CoprocOption = o.CoprocOption;
570      break;
571    case k_Immediate:
572      Imm = o.Imm;
573      break;
574    case k_MemBarrierOpt:
575      MBOpt = o.MBOpt;
576      break;
577    case k_InstSyncBarrierOpt:
578      ISBOpt = o.ISBOpt;
579    case k_Memory:
580      Memory = o.Memory;
581      break;
582    case k_PostIndexRegister:
583      PostIdxReg = o.PostIdxReg;
584      break;
585    case k_MSRMask:
586      MMask = o.MMask;
587      break;
588    case k_ProcIFlags:
589      IFlags = o.IFlags;
590      break;
591    case k_ShifterImmediate:
592      ShifterImm = o.ShifterImm;
593      break;
594    case k_ShiftedRegister:
595      RegShiftedReg = o.RegShiftedReg;
596      break;
597    case k_ShiftedImmediate:
598      RegShiftedImm = o.RegShiftedImm;
599      break;
600    case k_RotateImmediate:
601      RotImm = o.RotImm;
602      break;
603    case k_BitfieldDescriptor:
604      Bitfield = o.Bitfield;
605      break;
606    case k_VectorIndex:
607      VectorIndex = o.VectorIndex;
608      break;
609    }
610  }
611
612  /// getStartLoc - Get the location of the first token of this operand.
613  SMLoc getStartLoc() const override { return StartLoc; }
614  /// getEndLoc - Get the location of the last token of this operand.
615  SMLoc getEndLoc() const override { return EndLoc; }
616  /// getLocRange - Get the range between the first and last token of this
617  /// operand.
618  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
619
620  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
621  SMLoc getAlignmentLoc() const {
622    assert(Kind == k_Memory && "Invalid access!");
623    return AlignmentLoc;
624  }
625
626  ARMCC::CondCodes getCondCode() const {
627    assert(Kind == k_CondCode && "Invalid access!");
628    return CC.Val;
629  }
630
631  unsigned getCoproc() const {
632    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
633    return Cop.Val;
634  }
635
636  StringRef getToken() const {
637    assert(Kind == k_Token && "Invalid access!");
638    return StringRef(Tok.Data, Tok.Length);
639  }
640
641  unsigned getReg() const override {
642    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
643    return Reg.RegNum;
644  }
645
646  const SmallVectorImpl<unsigned> &getRegList() const {
647    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
648            Kind == k_SPRRegisterList) && "Invalid access!");
649    return Registers;
650  }
651
652  const MCExpr *getImm() const {
653    assert(isImm() && "Invalid access!");
654    return Imm.Val;
655  }
656
657  unsigned getVectorIndex() const {
658    assert(Kind == k_VectorIndex && "Invalid access!");
659    return VectorIndex.Val;
660  }
661
662  ARM_MB::MemBOpt getMemBarrierOpt() const {
663    assert(Kind == k_MemBarrierOpt && "Invalid access!");
664    return MBOpt.Val;
665  }
666
667  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
668    assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
669    return ISBOpt.Val;
670  }
671
672  ARM_PROC::IFlags getProcIFlags() const {
673    assert(Kind == k_ProcIFlags && "Invalid access!");
674    return IFlags.Val;
675  }
676
677  unsigned getMSRMask() const {
678    assert(Kind == k_MSRMask && "Invalid access!");
679    return MMask.Val;
680  }
681
682  bool isCoprocNum() const { return Kind == k_CoprocNum; }
683  bool isCoprocReg() const { return Kind == k_CoprocReg; }
684  bool isCoprocOption() const { return Kind == k_CoprocOption; }
685  bool isCondCode() const { return Kind == k_CondCode; }
686  bool isCCOut() const { return Kind == k_CCOut; }
687  bool isITMask() const { return Kind == k_ITCondMask; }
688  bool isITCondCode() const { return Kind == k_CondCode; }
689  bool isImm() const override { return Kind == k_Immediate; }
690  // checks whether this operand is an unsigned offset which fits is a field
691  // of specified width and scaled by a specific number of bits
692  template<unsigned width, unsigned scale>
693  bool isUnsignedOffset() const {
694    if (!isImm()) return false;
695    if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
696    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
697      int64_t Val = CE->getValue();
698      int64_t Align = 1LL << scale;
699      int64_t Max = Align * ((1LL << width) - 1);
700      return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
701    }
702    return false;
703  }
704  // checks whether this operand is an signed offset which fits is a field
705  // of specified width and scaled by a specific number of bits
706  template<unsigned width, unsigned scale>
707  bool isSignedOffset() const {
708    if (!isImm()) return false;
709    if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
710    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
711      int64_t Val = CE->getValue();
712      int64_t Align = 1LL << scale;
713      int64_t Max = Align * ((1LL << (width-1)) - 1);
714      int64_t Min = -Align * (1LL << (width-1));
715      return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
716    }
717    return false;
718  }
719
720  // checks whether this operand is a memory operand computed as an offset
721  // applied to PC. the offset may have 8 bits of magnitude and is represented
722  // with two bits of shift. textually it may be either [pc, #imm], #imm or
723  // relocable expression...
724  bool isThumbMemPC() const {
725    int64_t Val = 0;
726    if (isImm()) {
727      if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
728      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
729      if (!CE) return false;
730      Val = CE->getValue();
731    }
732    else if (isMem()) {
733      if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
734      if(Memory.BaseRegNum != ARM::PC) return false;
735      Val = Memory.OffsetImm->getValue();
736    }
737    else return false;
738    return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
739  }
740  bool isFPImm() const {
741    if (!isImm()) return false;
742    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
743    if (!CE) return false;
744    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
745    return Val != -1;
746  }
747  bool isFBits16() const {
748    if (!isImm()) return false;
749    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750    if (!CE) return false;
751    int64_t Value = CE->getValue();
752    return Value >= 0 && Value <= 16;
753  }
754  bool isFBits32() const {
755    if (!isImm()) return false;
756    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
757    if (!CE) return false;
758    int64_t Value = CE->getValue();
759    return Value >= 1 && Value <= 32;
760  }
761  bool isImm8s4() const {
762    if (!isImm()) return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
767  }
768  bool isImm0_1020s4() const {
769    if (!isImm()) return false;
770    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771    if (!CE) return false;
772    int64_t Value = CE->getValue();
773    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
774  }
775  bool isImm0_508s4() const {
776    if (!isImm()) return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
781  }
782  bool isImm0_508s4Neg() const {
783    if (!isImm()) return false;
784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785    if (!CE) return false;
786    int64_t Value = -CE->getValue();
787    // explicitly exclude zero. we want that to use the normal 0_508 version.
788    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
789  }
790  bool isImm0_239() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return Value >= 0 && Value < 240;
796  }
797  bool isImm0_255() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return Value >= 0 && Value < 256;
803  }
804  bool isImm0_4095() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value >= 0 && Value < 4096;
810  }
811  bool isImm0_4095Neg() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = -CE->getValue();
816    return Value > 0 && Value < 4096;
817  }
818  bool isImm0_1() const {
819    if (!isImm()) return false;
820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821    if (!CE) return false;
822    int64_t Value = CE->getValue();
823    return Value >= 0 && Value < 2;
824  }
825  bool isImm0_3() const {
826    if (!isImm()) return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    return Value >= 0 && Value < 4;
831  }
832  bool isImm0_7() const {
833    if (!isImm()) return false;
834    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
835    if (!CE) return false;
836    int64_t Value = CE->getValue();
837    return Value >= 0 && Value < 8;
838  }
839  bool isImm0_15() const {
840    if (!isImm()) return false;
841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
842    if (!CE) return false;
843    int64_t Value = CE->getValue();
844    return Value >= 0 && Value < 16;
845  }
846  bool isImm0_31() const {
847    if (!isImm()) return false;
848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
849    if (!CE) return false;
850    int64_t Value = CE->getValue();
851    return Value >= 0 && Value < 32;
852  }
853  bool isImm0_63() const {
854    if (!isImm()) return false;
855    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
856    if (!CE) return false;
857    int64_t Value = CE->getValue();
858    return Value >= 0 && Value < 64;
859  }
860  bool isImm8() const {
861    if (!isImm()) return false;
862    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
863    if (!CE) return false;
864    int64_t Value = CE->getValue();
865    return Value == 8;
866  }
867  bool isImm16() const {
868    if (!isImm()) return false;
869    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
870    if (!CE) return false;
871    int64_t Value = CE->getValue();
872    return Value == 16;
873  }
874  bool isImm32() const {
875    if (!isImm()) return false;
876    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
877    if (!CE) return false;
878    int64_t Value = CE->getValue();
879    return Value == 32;
880  }
881  bool isShrImm8() const {
882    if (!isImm()) return false;
883    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
884    if (!CE) return false;
885    int64_t Value = CE->getValue();
886    return Value > 0 && Value <= 8;
887  }
888  bool isShrImm16() const {
889    if (!isImm()) return false;
890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
891    if (!CE) return false;
892    int64_t Value = CE->getValue();
893    return Value > 0 && Value <= 16;
894  }
895  bool isShrImm32() const {
896    if (!isImm()) return false;
897    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
898    if (!CE) return false;
899    int64_t Value = CE->getValue();
900    return Value > 0 && Value <= 32;
901  }
902  bool isShrImm64() const {
903    if (!isImm()) return false;
904    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
905    if (!CE) return false;
906    int64_t Value = CE->getValue();
907    return Value > 0 && Value <= 64;
908  }
909  bool isImm1_7() const {
910    if (!isImm()) return false;
911    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
912    if (!CE) return false;
913    int64_t Value = CE->getValue();
914    return Value > 0 && Value < 8;
915  }
916  bool isImm1_15() const {
917    if (!isImm()) return false;
918    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
919    if (!CE) return false;
920    int64_t Value = CE->getValue();
921    return Value > 0 && Value < 16;
922  }
923  bool isImm1_31() const {
924    if (!isImm()) return false;
925    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926    if (!CE) return false;
927    int64_t Value = CE->getValue();
928    return Value > 0 && Value < 32;
929  }
930  bool isImm1_16() const {
931    if (!isImm()) return false;
932    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
933    if (!CE) return false;
934    int64_t Value = CE->getValue();
935    return Value > 0 && Value < 17;
936  }
937  bool isImm1_32() const {
938    if (!isImm()) return false;
939    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
940    if (!CE) return false;
941    int64_t Value = CE->getValue();
942    return Value > 0 && Value < 33;
943  }
944  bool isImm0_32() const {
945    if (!isImm()) return false;
946    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
947    if (!CE) return false;
948    int64_t Value = CE->getValue();
949    return Value >= 0 && Value < 33;
950  }
951  bool isImm0_65535() const {
952    if (!isImm()) return false;
953    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
954    if (!CE) return false;
955    int64_t Value = CE->getValue();
956    return Value >= 0 && Value < 65536;
957  }
958  bool isImm256_65535Expr() const {
959    if (!isImm()) return false;
960    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
961    // If it's not a constant expression, it'll generate a fixup and be
962    // handled later.
963    if (!CE) return true;
964    int64_t Value = CE->getValue();
965    return Value >= 256 && Value < 65536;
966  }
967  bool isImm0_65535Expr() const {
968    if (!isImm()) return false;
969    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
970    // If it's not a constant expression, it'll generate a fixup and be
971    // handled later.
972    if (!CE) return true;
973    int64_t Value = CE->getValue();
974    return Value >= 0 && Value < 65536;
975  }
976  bool isImm24bit() const {
977    if (!isImm()) return false;
978    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
979    if (!CE) return false;
980    int64_t Value = CE->getValue();
981    return Value >= 0 && Value <= 0xffffff;
982  }
983  bool isImmThumbSR() const {
984    if (!isImm()) return false;
985    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
986    if (!CE) return false;
987    int64_t Value = CE->getValue();
988    return Value > 0 && Value < 33;
989  }
990  bool isPKHLSLImm() const {
991    if (!isImm()) return false;
992    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
993    if (!CE) return false;
994    int64_t Value = CE->getValue();
995    return Value >= 0 && Value < 32;
996  }
997  bool isPKHASRImm() const {
998    if (!isImm()) return false;
999    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1000    if (!CE) return false;
1001    int64_t Value = CE->getValue();
1002    return Value > 0 && Value <= 32;
1003  }
1004  bool isAdrLabel() const {
1005    // If we have an immediate that's not a constant, treat it as a label
1006    // reference needing a fixup. If it is a constant, but it can't fit
1007    // into shift immediate encoding, we reject it.
1008    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
1009    else return (isARMSOImm() || isARMSOImmNeg());
1010  }
1011  bool isARMSOImm() const {
1012    if (!isImm()) return false;
1013    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1014    if (!CE) return false;
1015    int64_t Value = CE->getValue();
1016    return ARM_AM::getSOImmVal(Value) != -1;
1017  }
1018  bool isARMSOImmNot() const {
1019    if (!isImm()) return false;
1020    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1021    if (!CE) return false;
1022    int64_t Value = CE->getValue();
1023    return ARM_AM::getSOImmVal(~Value) != -1;
1024  }
1025  bool isARMSOImmNeg() const {
1026    if (!isImm()) return false;
1027    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1028    if (!CE) return false;
1029    int64_t Value = CE->getValue();
1030    // Only use this when not representable as a plain so_imm.
1031    return ARM_AM::getSOImmVal(Value) == -1 &&
1032      ARM_AM::getSOImmVal(-Value) != -1;
1033  }
1034  bool isT2SOImm() const {
1035    if (!isImm()) return false;
1036    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1037    if (!CE) return false;
1038    int64_t Value = CE->getValue();
1039    return ARM_AM::getT2SOImmVal(Value) != -1;
1040  }
1041  bool isT2SOImmNot() const {
1042    if (!isImm()) return false;
1043    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1044    if (!CE) return false;
1045    int64_t Value = CE->getValue();
1046    return ARM_AM::getT2SOImmVal(Value) == -1 &&
1047      ARM_AM::getT2SOImmVal(~Value) != -1;
1048  }
1049  bool isT2SOImmNeg() const {
1050    if (!isImm()) return false;
1051    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1052    if (!CE) return false;
1053    int64_t Value = CE->getValue();
1054    // Only use this when not representable as a plain so_imm.
1055    return ARM_AM::getT2SOImmVal(Value) == -1 &&
1056      ARM_AM::getT2SOImmVal(-Value) != -1;
1057  }
1058  bool isSetEndImm() const {
1059    if (!isImm()) return false;
1060    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1061    if (!CE) return false;
1062    int64_t Value = CE->getValue();
1063    return Value == 1 || Value == 0;
1064  }
1065  bool isReg() const override { return Kind == k_Register; }
1066  bool isRegList() const { return Kind == k_RegisterList; }
1067  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1068  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1069  bool isToken() const override { return Kind == k_Token; }
1070  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1071  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1072  bool isMem() const override { return Kind == k_Memory; }
1073  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1074  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1075  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1076  bool isRotImm() const { return Kind == k_RotateImmediate; }
1077  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1078  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1079  bool isPostIdxReg() const {
1080    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1081  }
1082  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1083    if (!isMem())
1084      return false;
1085    // No offset of any kind.
1086    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1087     (alignOK || Memory.Alignment == Alignment);
1088  }
1089  bool isMemPCRelImm12() const {
1090    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1091      return false;
1092    // Base register must be PC.
1093    if (Memory.BaseRegNum != ARM::PC)
1094      return false;
1095    // Immediate offset in range [-4095, 4095].
1096    if (!Memory.OffsetImm) return true;
1097    int64_t Val = Memory.OffsetImm->getValue();
1098    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1099  }
1100  bool isAlignedMemory() const {
1101    return isMemNoOffset(true);
1102  }
1103  bool isAlignedMemoryNone() const {
1104    return isMemNoOffset(false, 0);
1105  }
1106  bool isDupAlignedMemoryNone() const {
1107    return isMemNoOffset(false, 0);
1108  }
1109  bool isAlignedMemory16() const {
1110    if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1111      return true;
1112    return isMemNoOffset(false, 0);
1113  }
1114  bool isDupAlignedMemory16() const {
1115    if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1116      return true;
1117    return isMemNoOffset(false, 0);
1118  }
1119  bool isAlignedMemory32() const {
1120    if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1121      return true;
1122    return isMemNoOffset(false, 0);
1123  }
1124  bool isDupAlignedMemory32() const {
1125    if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1126      return true;
1127    return isMemNoOffset(false, 0);
1128  }
1129  bool isAlignedMemory64() const {
1130    if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1131      return true;
1132    return isMemNoOffset(false, 0);
1133  }
1134  bool isDupAlignedMemory64() const {
1135    if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1136      return true;
1137    return isMemNoOffset(false, 0);
1138  }
1139  bool isAlignedMemory64or128() const {
1140    if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1141      return true;
1142    if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1143      return true;
1144    return isMemNoOffset(false, 0);
1145  }
1146  bool isDupAlignedMemory64or128() const {
1147    if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1148      return true;
1149    if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1150      return true;
1151    return isMemNoOffset(false, 0);
1152  }
1153  bool isAlignedMemory64or128or256() const {
1154    if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1155      return true;
1156    if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1157      return true;
1158    if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1159      return true;
1160    return isMemNoOffset(false, 0);
1161  }
1162  bool isAddrMode2() const {
1163    if (!isMem() || Memory.Alignment != 0) return false;
1164    // Check for register offset.
1165    if (Memory.OffsetRegNum) return true;
1166    // Immediate offset in range [-4095, 4095].
1167    if (!Memory.OffsetImm) return true;
1168    int64_t Val = Memory.OffsetImm->getValue();
1169    return Val > -4096 && Val < 4096;
1170  }
1171  bool isAM2OffsetImm() const {
1172    if (!isImm()) return false;
1173    // Immediate offset in range [-4095, 4095].
1174    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1175    if (!CE) return false;
1176    int64_t Val = CE->getValue();
1177    return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1178  }
1179  bool isAddrMode3() const {
1180    // If we have an immediate that's not a constant, treat it as a label
1181    // reference needing a fixup. If it is a constant, it's something else
1182    // and we reject it.
1183    if (isImm() && !isa<MCConstantExpr>(getImm()))
1184      return true;
1185    if (!isMem() || Memory.Alignment != 0) return false;
1186    // No shifts are legal for AM3.
1187    if (Memory.ShiftType != ARM_AM::no_shift) return false;
1188    // Check for register offset.
1189    if (Memory.OffsetRegNum) return true;
1190    // Immediate offset in range [-255, 255].
1191    if (!Memory.OffsetImm) return true;
1192    int64_t Val = Memory.OffsetImm->getValue();
1193    // The #-0 offset is encoded as INT32_MIN, and we have to check
1194    // for this too.
1195    return (Val > -256 && Val < 256) || Val == INT32_MIN;
1196  }
1197  bool isAM3Offset() const {
1198    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1199      return false;
1200    if (Kind == k_PostIndexRegister)
1201      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1202    // Immediate offset in range [-255, 255].
1203    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1204    if (!CE) return false;
1205    int64_t Val = CE->getValue();
1206    // Special case, #-0 is INT32_MIN.
1207    return (Val > -256 && Val < 256) || Val == INT32_MIN;
1208  }
1209  bool isAddrMode5() const {
1210    // If we have an immediate that's not a constant, treat it as a label
1211    // reference needing a fixup. If it is a constant, it's something else
1212    // and we reject it.
1213    if (isImm() && !isa<MCConstantExpr>(getImm()))
1214      return true;
1215    if (!isMem() || Memory.Alignment != 0) return false;
1216    // Check for register offset.
1217    if (Memory.OffsetRegNum) return false;
1218    // Immediate offset in range [-1020, 1020] and a multiple of 4.
1219    if (!Memory.OffsetImm) return true;
1220    int64_t Val = Memory.OffsetImm->getValue();
1221    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1222      Val == INT32_MIN;
1223  }
1224  bool isMemTBB() const {
1225    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1226        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1227      return false;
1228    return true;
1229  }
1230  bool isMemTBH() const {
1231    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1232        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1233        Memory.Alignment != 0 )
1234      return false;
1235    return true;
1236  }
1237  bool isMemRegOffset() const {
1238    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1239      return false;
1240    return true;
1241  }
1242  bool isT2MemRegOffset() const {
1243    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1244        Memory.Alignment != 0)
1245      return false;
1246    // Only lsl #{0, 1, 2, 3} allowed.
1247    if (Memory.ShiftType == ARM_AM::no_shift)
1248      return true;
1249    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1250      return false;
1251    return true;
1252  }
1253  bool isMemThumbRR() const {
1254    // Thumb reg+reg addressing is simple. Just two registers, a base and
1255    // an offset. No shifts, negations or any other complicating factors.
1256    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1257        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1258      return false;
1259    return isARMLowRegister(Memory.BaseRegNum) &&
1260      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1261  }
1262  bool isMemThumbRIs4() const {
1263    if (!isMem() || Memory.OffsetRegNum != 0 ||
1264        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1265      return false;
1266    // Immediate offset, multiple of 4 in range [0, 124].
1267    if (!Memory.OffsetImm) return true;
1268    int64_t Val = Memory.OffsetImm->getValue();
1269    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1270  }
1271  bool isMemThumbRIs2() const {
1272    if (!isMem() || Memory.OffsetRegNum != 0 ||
1273        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1274      return false;
1275    // Immediate offset, multiple of 4 in range [0, 62].
1276    if (!Memory.OffsetImm) return true;
1277    int64_t Val = Memory.OffsetImm->getValue();
1278    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1279  }
1280  bool isMemThumbRIs1() const {
1281    if (!isMem() || Memory.OffsetRegNum != 0 ||
1282        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1283      return false;
1284    // Immediate offset in range [0, 31].
1285    if (!Memory.OffsetImm) return true;
1286    int64_t Val = Memory.OffsetImm->getValue();
1287    return Val >= 0 && Val <= 31;
1288  }
1289  bool isMemThumbSPI() const {
1290    if (!isMem() || Memory.OffsetRegNum != 0 ||
1291        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1292      return false;
1293    // Immediate offset, multiple of 4 in range [0, 1020].
1294    if (!Memory.OffsetImm) return true;
1295    int64_t Val = Memory.OffsetImm->getValue();
1296    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1297  }
1298  bool isMemImm8s4Offset() const {
1299    // If we have an immediate that's not a constant, treat it as a label
1300    // reference needing a fixup. If it is a constant, it's something else
1301    // and we reject it.
1302    if (isImm() && !isa<MCConstantExpr>(getImm()))
1303      return true;
1304    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1305      return false;
1306    // Immediate offset a multiple of 4 in range [-1020, 1020].
1307    if (!Memory.OffsetImm) return true;
1308    int64_t Val = Memory.OffsetImm->getValue();
1309    // Special case, #-0 is INT32_MIN.
1310    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1311  }
1312  bool isMemImm0_1020s4Offset() const {
1313    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1314      return false;
1315    // Immediate offset a multiple of 4 in range [0, 1020].
1316    if (!Memory.OffsetImm) return true;
1317    int64_t Val = Memory.OffsetImm->getValue();
1318    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1319  }
1320  bool isMemImm8Offset() const {
1321    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1322      return false;
1323    // Base reg of PC isn't allowed for these encodings.
1324    if (Memory.BaseRegNum == ARM::PC) return false;
1325    // Immediate offset in range [-255, 255].
1326    if (!Memory.OffsetImm) return true;
1327    int64_t Val = Memory.OffsetImm->getValue();
1328    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1329  }
1330  bool isMemPosImm8Offset() const {
1331    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1332      return false;
1333    // Immediate offset in range [0, 255].
1334    if (!Memory.OffsetImm) return true;
1335    int64_t Val = Memory.OffsetImm->getValue();
1336    return Val >= 0 && Val < 256;
1337  }
1338  bool isMemNegImm8Offset() const {
1339    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1340      return false;
1341    // Base reg of PC isn't allowed for these encodings.
1342    if (Memory.BaseRegNum == ARM::PC) return false;
1343    // Immediate offset in range [-255, -1].
1344    if (!Memory.OffsetImm) return false;
1345    int64_t Val = Memory.OffsetImm->getValue();
1346    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1347  }
1348  bool isMemUImm12Offset() const {
1349    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1350      return false;
1351    // Immediate offset in range [0, 4095].
1352    if (!Memory.OffsetImm) return true;
1353    int64_t Val = Memory.OffsetImm->getValue();
1354    return (Val >= 0 && Val < 4096);
1355  }
1356  bool isMemImm12Offset() const {
1357    // If we have an immediate that's not a constant, treat it as a label
1358    // reference needing a fixup. If it is a constant, it's something else
1359    // and we reject it.
1360    if (isImm() && !isa<MCConstantExpr>(getImm()))
1361      return true;
1362
1363    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1364      return false;
1365    // Immediate offset in range [-4095, 4095].
1366    if (!Memory.OffsetImm) return true;
1367    int64_t Val = Memory.OffsetImm->getValue();
1368    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1369  }
1370  bool isPostIdxImm8() const {
1371    if (!isImm()) return false;
1372    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1373    if (!CE) return false;
1374    int64_t Val = CE->getValue();
1375    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1376  }
1377  bool isPostIdxImm8s4() const {
1378    if (!isImm()) return false;
1379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1380    if (!CE) return false;
1381    int64_t Val = CE->getValue();
1382    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1383      (Val == INT32_MIN);
1384  }
1385
1386  bool isMSRMask() const { return Kind == k_MSRMask; }
1387  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1388
1389  // NEON operands.
1390  bool isSingleSpacedVectorList() const {
1391    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1392  }
1393  bool isDoubleSpacedVectorList() const {
1394    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1395  }
1396  bool isVecListOneD() const {
1397    if (!isSingleSpacedVectorList()) return false;
1398    return VectorList.Count == 1;
1399  }
1400
1401  bool isVecListDPair() const {
1402    if (!isSingleSpacedVectorList()) return false;
1403    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1404              .contains(VectorList.RegNum));
1405  }
1406
1407  bool isVecListThreeD() const {
1408    if (!isSingleSpacedVectorList()) return false;
1409    return VectorList.Count == 3;
1410  }
1411
1412  bool isVecListFourD() const {
1413    if (!isSingleSpacedVectorList()) return false;
1414    return VectorList.Count == 4;
1415  }
1416
1417  bool isVecListDPairSpaced() const {
1418    if (Kind != k_VectorList) return false;
1419    if (isSingleSpacedVectorList()) return false;
1420    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1421              .contains(VectorList.RegNum));
1422  }
1423
1424  bool isVecListThreeQ() const {
1425    if (!isDoubleSpacedVectorList()) return false;
1426    return VectorList.Count == 3;
1427  }
1428
1429  bool isVecListFourQ() const {
1430    if (!isDoubleSpacedVectorList()) return false;
1431    return VectorList.Count == 4;
1432  }
1433
1434  bool isSingleSpacedVectorAllLanes() const {
1435    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1436  }
1437  bool isDoubleSpacedVectorAllLanes() const {
1438    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1439  }
1440  bool isVecListOneDAllLanes() const {
1441    if (!isSingleSpacedVectorAllLanes()) return false;
1442    return VectorList.Count == 1;
1443  }
1444
1445  bool isVecListDPairAllLanes() const {
1446    if (!isSingleSpacedVectorAllLanes()) return false;
1447    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1448              .contains(VectorList.RegNum));
1449  }
1450
1451  bool isVecListDPairSpacedAllLanes() const {
1452    if (!isDoubleSpacedVectorAllLanes()) return false;
1453    return VectorList.Count == 2;
1454  }
1455
1456  bool isVecListThreeDAllLanes() const {
1457    if (!isSingleSpacedVectorAllLanes()) return false;
1458    return VectorList.Count == 3;
1459  }
1460
1461  bool isVecListThreeQAllLanes() const {
1462    if (!isDoubleSpacedVectorAllLanes()) return false;
1463    return VectorList.Count == 3;
1464  }
1465
1466  bool isVecListFourDAllLanes() const {
1467    if (!isSingleSpacedVectorAllLanes()) return false;
1468    return VectorList.Count == 4;
1469  }
1470
1471  bool isVecListFourQAllLanes() const {
1472    if (!isDoubleSpacedVectorAllLanes()) return false;
1473    return VectorList.Count == 4;
1474  }
1475
1476  bool isSingleSpacedVectorIndexed() const {
1477    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1478  }
1479  bool isDoubleSpacedVectorIndexed() const {
1480    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1481  }
1482  bool isVecListOneDByteIndexed() const {
1483    if (!isSingleSpacedVectorIndexed()) return false;
1484    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1485  }
1486
1487  bool isVecListOneDHWordIndexed() const {
1488    if (!isSingleSpacedVectorIndexed()) return false;
1489    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1490  }
1491
1492  bool isVecListOneDWordIndexed() const {
1493    if (!isSingleSpacedVectorIndexed()) return false;
1494    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1495  }
1496
1497  bool isVecListTwoDByteIndexed() const {
1498    if (!isSingleSpacedVectorIndexed()) return false;
1499    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1500  }
1501
1502  bool isVecListTwoDHWordIndexed() const {
1503    if (!isSingleSpacedVectorIndexed()) return false;
1504    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1505  }
1506
1507  bool isVecListTwoQWordIndexed() const {
1508    if (!isDoubleSpacedVectorIndexed()) return false;
1509    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1510  }
1511
1512  bool isVecListTwoQHWordIndexed() const {
1513    if (!isDoubleSpacedVectorIndexed()) return false;
1514    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1515  }
1516
1517  bool isVecListTwoDWordIndexed() const {
1518    if (!isSingleSpacedVectorIndexed()) return false;
1519    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1520  }
1521
1522  bool isVecListThreeDByteIndexed() const {
1523    if (!isSingleSpacedVectorIndexed()) return false;
1524    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1525  }
1526
1527  bool isVecListThreeDHWordIndexed() const {
1528    if (!isSingleSpacedVectorIndexed()) return false;
1529    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1530  }
1531
1532  bool isVecListThreeQWordIndexed() const {
1533    if (!isDoubleSpacedVectorIndexed()) return false;
1534    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1535  }
1536
1537  bool isVecListThreeQHWordIndexed() const {
1538    if (!isDoubleSpacedVectorIndexed()) return false;
1539    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1540  }
1541
1542  bool isVecListThreeDWordIndexed() const {
1543    if (!isSingleSpacedVectorIndexed()) return false;
1544    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1545  }
1546
1547  bool isVecListFourDByteIndexed() const {
1548    if (!isSingleSpacedVectorIndexed()) return false;
1549    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1550  }
1551
1552  bool isVecListFourDHWordIndexed() const {
1553    if (!isSingleSpacedVectorIndexed()) return false;
1554    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1555  }
1556
1557  bool isVecListFourQWordIndexed() const {
1558    if (!isDoubleSpacedVectorIndexed()) return false;
1559    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1560  }
1561
1562  bool isVecListFourQHWordIndexed() const {
1563    if (!isDoubleSpacedVectorIndexed()) return false;
1564    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1565  }
1566
1567  bool isVecListFourDWordIndexed() const {
1568    if (!isSingleSpacedVectorIndexed()) return false;
1569    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1570  }
1571
1572  bool isVectorIndex8() const {
1573    if (Kind != k_VectorIndex) return false;
1574    return VectorIndex.Val < 8;
1575  }
1576  bool isVectorIndex16() const {
1577    if (Kind != k_VectorIndex) return false;
1578    return VectorIndex.Val < 4;
1579  }
1580  bool isVectorIndex32() const {
1581    if (Kind != k_VectorIndex) return false;
1582    return VectorIndex.Val < 2;
1583  }
1584
1585  bool isNEONi8splat() const {
1586    if (!isImm()) return false;
1587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1588    // Must be a constant.
1589    if (!CE) return false;
1590    int64_t Value = CE->getValue();
1591    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1592    // value.
1593    return Value >= 0 && Value < 256;
1594  }
1595
1596  bool isNEONi16splat() const {
1597    if (isNEONByteReplicate(2))
1598      return false; // Leave that for bytes replication and forbid by default.
1599    if (!isImm())
1600      return false;
1601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1602    // Must be a constant.
1603    if (!CE) return false;
1604    int64_t Value = CE->getValue();
1605    // i16 value in the range [0,255] or [0x0100, 0xff00]
1606    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1607  }
1608
1609  bool isNEONi32splat() const {
1610    if (isNEONByteReplicate(4))
1611      return false; // Leave that for bytes replication and forbid by default.
1612    if (!isImm())
1613      return false;
1614    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1615    // Must be a constant.
1616    if (!CE) return false;
1617    int64_t Value = CE->getValue();
1618    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1619    return (Value >= 0 && Value < 256) ||
1620      (Value >= 0x0100 && Value <= 0xff00) ||
1621      (Value >= 0x010000 && Value <= 0xff0000) ||
1622      (Value >= 0x01000000 && Value <= 0xff000000);
1623  }
1624
1625  bool isNEONByteReplicate(unsigned NumBytes) const {
1626    if (!isImm())
1627      return false;
1628    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1629    // Must be a constant.
1630    if (!CE)
1631      return false;
1632    int64_t Value = CE->getValue();
1633    if (!Value)
1634      return false; // Don't bother with zero.
1635
1636    unsigned char B = Value & 0xff;
1637    for (unsigned i = 1; i < NumBytes; ++i) {
1638      Value >>= 8;
1639      if ((Value & 0xff) != B)
1640        return false;
1641    }
1642    return true;
1643  }
1644  bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
1645  bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
1646  bool isNEONi32vmov() const {
1647    if (isNEONByteReplicate(4))
1648      return false; // Let it to be classified as byte-replicate case.
1649    if (!isImm())
1650      return false;
1651    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1652    // Must be a constant.
1653    if (!CE)
1654      return false;
1655    int64_t Value = CE->getValue();
1656    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1657    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1658    return (Value >= 0 && Value < 256) ||
1659      (Value >= 0x0100 && Value <= 0xff00) ||
1660      (Value >= 0x010000 && Value <= 0xff0000) ||
1661      (Value >= 0x01000000 && Value <= 0xff000000) ||
1662      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1663      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1664  }
1665  bool isNEONi32vmovNeg() const {
1666    if (!isImm()) return false;
1667    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1668    // Must be a constant.
1669    if (!CE) return false;
1670    int64_t Value = ~CE->getValue();
1671    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1672    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1673    return (Value >= 0 && Value < 256) ||
1674      (Value >= 0x0100 && Value <= 0xff00) ||
1675      (Value >= 0x010000 && Value <= 0xff0000) ||
1676      (Value >= 0x01000000 && Value <= 0xff000000) ||
1677      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1678      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1679  }
1680
1681  bool isNEONi64splat() const {
1682    if (!isImm()) return false;
1683    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1684    // Must be a constant.
1685    if (!CE) return false;
1686    uint64_t Value = CE->getValue();
1687    // i64 value with each byte being either 0 or 0xff.
1688    for (unsigned i = 0; i < 8; ++i)
1689      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1690    return true;
1691  }
1692
1693  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1694    // Add as immediates when possible.  Null MCExpr = 0.
1695    if (!Expr)
1696      Inst.addOperand(MCOperand::CreateImm(0));
1697    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1698      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1699    else
1700      Inst.addOperand(MCOperand::CreateExpr(Expr));
1701  }
1702
1703  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1704    assert(N == 2 && "Invalid number of operands!");
1705    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1706    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1707    Inst.addOperand(MCOperand::CreateReg(RegNum));
1708  }
1709
1710  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1711    assert(N == 1 && "Invalid number of operands!");
1712    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1713  }
1714
1715  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 1 && "Invalid number of operands!");
1717    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1718  }
1719
1720  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 1 && "Invalid number of operands!");
1722    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1723  }
1724
1725  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1726    assert(N == 1 && "Invalid number of operands!");
1727    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1728  }
1729
1730  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1731    assert(N == 1 && "Invalid number of operands!");
1732    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1733  }
1734
1735  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1736    assert(N == 1 && "Invalid number of operands!");
1737    Inst.addOperand(MCOperand::CreateReg(getReg()));
1738  }
1739
1740  void addRegOperands(MCInst &Inst, unsigned N) const {
1741    assert(N == 1 && "Invalid number of operands!");
1742    Inst.addOperand(MCOperand::CreateReg(getReg()));
1743  }
1744
1745  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1746    assert(N == 3 && "Invalid number of operands!");
1747    assert(isRegShiftedReg() &&
1748           "addRegShiftedRegOperands() on non-RegShiftedReg!");
1749    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1750    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1751    Inst.addOperand(MCOperand::CreateImm(
1752      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1753  }
1754
1755  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1756    assert(N == 2 && "Invalid number of operands!");
1757    assert(isRegShiftedImm() &&
1758           "addRegShiftedImmOperands() on non-RegShiftedImm!");
1759    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1760    // Shift of #32 is encoded as 0 where permitted
1761    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1762    Inst.addOperand(MCOperand::CreateImm(
1763      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1764  }
1765
1766  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1767    assert(N == 1 && "Invalid number of operands!");
1768    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1769                                         ShifterImm.Imm));
1770  }
1771
1772  void addRegListOperands(MCInst &Inst, unsigned N) const {
1773    assert(N == 1 && "Invalid number of operands!");
1774    const SmallVectorImpl<unsigned> &RegList = getRegList();
1775    for (SmallVectorImpl<unsigned>::const_iterator
1776           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1777      Inst.addOperand(MCOperand::CreateReg(*I));
1778  }
1779
1780  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1781    addRegListOperands(Inst, N);
1782  }
1783
1784  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1785    addRegListOperands(Inst, N);
1786  }
1787
1788  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1791    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1792  }
1793
1794  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 1 && "Invalid number of operands!");
1796    // Munge the lsb/width into a bitfield mask.
1797    unsigned lsb = Bitfield.LSB;
1798    unsigned width = Bitfield.Width;
1799    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1800    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1801                      (32 - (lsb + width)));
1802    Inst.addOperand(MCOperand::CreateImm(Mask));
1803  }
1804
1805  void addImmOperands(MCInst &Inst, unsigned N) const {
1806    assert(N == 1 && "Invalid number of operands!");
1807    addExpr(Inst, getImm());
1808  }
1809
1810  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1811    assert(N == 1 && "Invalid number of operands!");
1812    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1813    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1814  }
1815
1816  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1817    assert(N == 1 && "Invalid number of operands!");
1818    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1819    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1820  }
1821
1822  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1823    assert(N == 1 && "Invalid number of operands!");
1824    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1825    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1826    Inst.addOperand(MCOperand::CreateImm(Val));
1827  }
1828
1829  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1830    assert(N == 1 && "Invalid number of operands!");
1831    // FIXME: We really want to scale the value here, but the LDRD/STRD
1832    // instruction don't encode operands that way yet.
1833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1835  }
1836
1837  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1838    assert(N == 1 && "Invalid number of operands!");
1839    // The immediate is scaled by four in the encoding and is stored
1840    // in the MCInst as such. Lop off the low two bits here.
1841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1843  }
1844
1845  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1846    assert(N == 1 && "Invalid number of operands!");
1847    // The immediate is scaled by four in the encoding and is stored
1848    // in the MCInst as such. Lop off the low two bits here.
1849    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1850    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1851  }
1852
1853  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1854    assert(N == 1 && "Invalid number of operands!");
1855    // The immediate is scaled by four in the encoding and is stored
1856    // in the MCInst as such. Lop off the low two bits here.
1857    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1858    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1859  }
1860
1861  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1862    assert(N == 1 && "Invalid number of operands!");
1863    // The constant encodes as the immediate-1, and we store in the instruction
1864    // the bits as encoded, so subtract off one here.
1865    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1866    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1867  }
1868
1869  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1870    assert(N == 1 && "Invalid number of operands!");
1871    // The constant encodes as the immediate-1, and we store in the instruction
1872    // the bits as encoded, so subtract off one here.
1873    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1874    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1875  }
1876
1877  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    // The constant encodes as the immediate, except for 32, which encodes as
1880    // zero.
1881    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1882    unsigned Imm = CE->getValue();
1883    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1884  }
1885
1886  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1887    assert(N == 1 && "Invalid number of operands!");
1888    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1889    // the instruction as well.
1890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1891    int Val = CE->getValue();
1892    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1893  }
1894
1895  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1896    assert(N == 1 && "Invalid number of operands!");
1897    // The operand is actually a t2_so_imm, but we have its bitwise
1898    // negation in the assembly source, so twiddle it here.
1899    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1900    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1901  }
1902
1903  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1904    assert(N == 1 && "Invalid number of operands!");
1905    // The operand is actually a t2_so_imm, but we have its
1906    // negation in the assembly source, so twiddle it here.
1907    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1908    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1909  }
1910
1911  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1912    assert(N == 1 && "Invalid number of operands!");
1913    // The operand is actually an imm0_4095, but we have its
1914    // negation in the assembly source, so twiddle it here.
1915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1916    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1917  }
1918
1919  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
1920    if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1921      Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
1922      return;
1923    }
1924
1925    const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1926    assert(SR && "Unknown value type!");
1927    Inst.addOperand(MCOperand::CreateExpr(SR));
1928  }
1929
1930  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
1931    assert(N == 1 && "Invalid number of operands!");
1932    if (isImm()) {
1933      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1934      if (CE) {
1935        Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1936        return;
1937      }
1938
1939      const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1940      assert(SR && "Unknown value type!");
1941      Inst.addOperand(MCOperand::CreateExpr(SR));
1942      return;
1943    }
1944
1945    assert(isMem()  && "Unknown value type!");
1946    assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
1947    Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
1948  }
1949
1950  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1951    assert(N == 1 && "Invalid number of operands!");
1952    // The operand is actually a so_imm, but we have its bitwise
1953    // negation in the assembly source, so twiddle it here.
1954    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1955    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1956  }
1957
1958  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1959    assert(N == 1 && "Invalid number of operands!");
1960    // The operand is actually a so_imm, but we have its
1961    // negation in the assembly source, so twiddle it here.
1962    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1963    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1964  }
1965
1966  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1967    assert(N == 1 && "Invalid number of operands!");
1968    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1969  }
1970
1971  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
1972    assert(N == 1 && "Invalid number of operands!");
1973    Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
1974  }
1975
1976  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1977    assert(N == 1 && "Invalid number of operands!");
1978    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1979  }
1980
1981  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1982    assert(N == 1 && "Invalid number of operands!");
1983    int32_t Imm = Memory.OffsetImm->getValue();
1984    Inst.addOperand(MCOperand::CreateImm(Imm));
1985  }
1986
1987  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1988    assert(N == 1 && "Invalid number of operands!");
1989    assert(isImm() && "Not an immediate!");
1990
1991    // If we have an immediate that's not a constant, treat it as a label
1992    // reference needing a fixup.
1993    if (!isa<MCConstantExpr>(getImm())) {
1994      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1995      return;
1996    }
1997
1998    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1999    int Val = CE->getValue();
2000    Inst.addOperand(MCOperand::CreateImm(Val));
2001  }
2002
2003  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2004    assert(N == 2 && "Invalid number of operands!");
2005    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2006    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
2007  }
2008
2009  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2010    addAlignedMemoryOperands(Inst, N);
2011  }
2012
2013  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2014    addAlignedMemoryOperands(Inst, N);
2015  }
2016
2017  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2018    addAlignedMemoryOperands(Inst, N);
2019  }
2020
2021  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2022    addAlignedMemoryOperands(Inst, N);
2023  }
2024
2025  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2026    addAlignedMemoryOperands(Inst, N);
2027  }
2028
2029  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2030    addAlignedMemoryOperands(Inst, N);
2031  }
2032
2033  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2034    addAlignedMemoryOperands(Inst, N);
2035  }
2036
2037  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2038    addAlignedMemoryOperands(Inst, N);
2039  }
2040
2041  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2042    addAlignedMemoryOperands(Inst, N);
2043  }
2044
2045  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2046    addAlignedMemoryOperands(Inst, N);
2047  }
2048
2049  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2050    addAlignedMemoryOperands(Inst, N);
2051  }
2052
2053  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2054    assert(N == 3 && "Invalid number of operands!");
2055    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2056    if (!Memory.OffsetRegNum) {
2057      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2058      // Special case for #-0
2059      if (Val == INT32_MIN) Val = 0;
2060      if (Val < 0) Val = -Val;
2061      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2062    } else {
2063      // For register offset, we encode the shift type and negation flag
2064      // here.
2065      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2066                              Memory.ShiftImm, Memory.ShiftType);
2067    }
2068    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2069    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2070    Inst.addOperand(MCOperand::CreateImm(Val));
2071  }
2072
2073  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2074    assert(N == 2 && "Invalid number of operands!");
2075    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2076    assert(CE && "non-constant AM2OffsetImm operand!");
2077    int32_t Val = CE->getValue();
2078    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2079    // Special case for #-0
2080    if (Val == INT32_MIN) Val = 0;
2081    if (Val < 0) Val = -Val;
2082    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2083    Inst.addOperand(MCOperand::CreateReg(0));
2084    Inst.addOperand(MCOperand::CreateImm(Val));
2085  }
2086
2087  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2088    assert(N == 3 && "Invalid number of operands!");
2089    // If we have an immediate that's not a constant, treat it as a label
2090    // reference needing a fixup. If it is a constant, it's something else
2091    // and we reject it.
2092    if (isImm()) {
2093      Inst.addOperand(MCOperand::CreateExpr(getImm()));
2094      Inst.addOperand(MCOperand::CreateReg(0));
2095      Inst.addOperand(MCOperand::CreateImm(0));
2096      return;
2097    }
2098
2099    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2100    if (!Memory.OffsetRegNum) {
2101      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2102      // Special case for #-0
2103      if (Val == INT32_MIN) Val = 0;
2104      if (Val < 0) Val = -Val;
2105      Val = ARM_AM::getAM3Opc(AddSub, Val);
2106    } else {
2107      // For register offset, we encode the shift type and negation flag
2108      // here.
2109      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2110    }
2111    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2112    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2113    Inst.addOperand(MCOperand::CreateImm(Val));
2114  }
2115
2116  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2117    assert(N == 2 && "Invalid number of operands!");
2118    if (Kind == k_PostIndexRegister) {
2119      int32_t Val =
2120        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2121      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2122      Inst.addOperand(MCOperand::CreateImm(Val));
2123      return;
2124    }
2125
2126    // Constant offset.
2127    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2128    int32_t Val = CE->getValue();
2129    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2130    // Special case for #-0
2131    if (Val == INT32_MIN) Val = 0;
2132    if (Val < 0) Val = -Val;
2133    Val = ARM_AM::getAM3Opc(AddSub, Val);
2134    Inst.addOperand(MCOperand::CreateReg(0));
2135    Inst.addOperand(MCOperand::CreateImm(Val));
2136  }
2137
2138  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2139    assert(N == 2 && "Invalid number of operands!");
2140    // If we have an immediate that's not a constant, treat it as a label
2141    // reference needing a fixup. If it is a constant, it's something else
2142    // and we reject it.
2143    if (isImm()) {
2144      Inst.addOperand(MCOperand::CreateExpr(getImm()));
2145      Inst.addOperand(MCOperand::CreateImm(0));
2146      return;
2147    }
2148
2149    // The lower two bits are always zero and as such are not encoded.
2150    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2151    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2152    // Special case for #-0
2153    if (Val == INT32_MIN) Val = 0;
2154    if (Val < 0) Val = -Val;
2155    Val = ARM_AM::getAM5Opc(AddSub, Val);
2156    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2157    Inst.addOperand(MCOperand::CreateImm(Val));
2158  }
2159
2160  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2161    assert(N == 2 && "Invalid number of operands!");
2162    // If we have an immediate that's not a constant, treat it as a label
2163    // reference needing a fixup. If it is a constant, it's something else
2164    // and we reject it.
2165    if (isImm()) {
2166      Inst.addOperand(MCOperand::CreateExpr(getImm()));
2167      Inst.addOperand(MCOperand::CreateImm(0));
2168      return;
2169    }
2170
2171    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2172    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2173    Inst.addOperand(MCOperand::CreateImm(Val));
2174  }
2175
2176  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2177    assert(N == 2 && "Invalid number of operands!");
2178    // The lower two bits are always zero and as such are not encoded.
2179    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2180    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2181    Inst.addOperand(MCOperand::CreateImm(Val));
2182  }
2183
2184  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2185    assert(N == 2 && "Invalid number of operands!");
2186    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2187    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2188    Inst.addOperand(MCOperand::CreateImm(Val));
2189  }
2190
2191  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2192    addMemImm8OffsetOperands(Inst, N);
2193  }
2194
2195  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2196    addMemImm8OffsetOperands(Inst, N);
2197  }
2198
2199  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2200    assert(N == 2 && "Invalid number of operands!");
2201    // If this is an immediate, it's a label reference.
2202    if (isImm()) {
2203      addExpr(Inst, getImm());
2204      Inst.addOperand(MCOperand::CreateImm(0));
2205      return;
2206    }
2207
2208    // Otherwise, it's a normal memory reg+offset.
2209    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2210    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2211    Inst.addOperand(MCOperand::CreateImm(Val));
2212  }
2213
2214  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2215    assert(N == 2 && "Invalid number of operands!");
2216    // If this is an immediate, it's a label reference.
2217    if (isImm()) {
2218      addExpr(Inst, getImm());
2219      Inst.addOperand(MCOperand::CreateImm(0));
2220      return;
2221    }
2222
2223    // Otherwise, it's a normal memory reg+offset.
2224    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2225    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2226    Inst.addOperand(MCOperand::CreateImm(Val));
2227  }
2228
2229  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2230    assert(N == 2 && "Invalid number of operands!");
2231    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2232    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2233  }
2234
2235  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2236    assert(N == 2 && "Invalid number of operands!");
2237    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2238    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2239  }
2240
2241  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2242    assert(N == 3 && "Invalid number of operands!");
2243    unsigned Val =
2244      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2245                        Memory.ShiftImm, Memory.ShiftType);
2246    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2247    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2248    Inst.addOperand(MCOperand::CreateImm(Val));
2249  }
2250
2251  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2252    assert(N == 3 && "Invalid number of operands!");
2253    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2254    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2255    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
2256  }
2257
2258  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2259    assert(N == 2 && "Invalid number of operands!");
2260    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2261    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2262  }
2263
2264  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2265    assert(N == 2 && "Invalid number of operands!");
2266    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2267    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2268    Inst.addOperand(MCOperand::CreateImm(Val));
2269  }
2270
2271  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2272    assert(N == 2 && "Invalid number of operands!");
2273    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2274    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2275    Inst.addOperand(MCOperand::CreateImm(Val));
2276  }
2277
2278  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2279    assert(N == 2 && "Invalid number of operands!");
2280    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2281    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2282    Inst.addOperand(MCOperand::CreateImm(Val));
2283  }
2284
2285  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2286    assert(N == 2 && "Invalid number of operands!");
2287    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2288    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2289    Inst.addOperand(MCOperand::CreateImm(Val));
2290  }
2291
2292  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2293    assert(N == 1 && "Invalid number of operands!");
2294    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2295    assert(CE && "non-constant post-idx-imm8 operand!");
2296    int Imm = CE->getValue();
2297    bool isAdd = Imm >= 0;
2298    if (Imm == INT32_MIN) Imm = 0;
2299    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2300    Inst.addOperand(MCOperand::CreateImm(Imm));
2301  }
2302
2303  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2304    assert(N == 1 && "Invalid number of operands!");
2305    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2306    assert(CE && "non-constant post-idx-imm8s4 operand!");
2307    int Imm = CE->getValue();
2308    bool isAdd = Imm >= 0;
2309    if (Imm == INT32_MIN) Imm = 0;
2310    // Immediate is scaled by 4.
2311    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2312    Inst.addOperand(MCOperand::CreateImm(Imm));
2313  }
2314
2315  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2316    assert(N == 2 && "Invalid number of operands!");
2317    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2318    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2319  }
2320
2321  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2322    assert(N == 2 && "Invalid number of operands!");
2323    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2324    // The sign, shift type, and shift amount are encoded in a single operand
2325    // using the AM2 encoding helpers.
2326    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2327    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2328                                     PostIdxReg.ShiftTy);
2329    Inst.addOperand(MCOperand::CreateImm(Imm));
2330  }
2331
2332  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2333    assert(N == 1 && "Invalid number of operands!");
2334    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2335  }
2336
2337  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2338    assert(N == 1 && "Invalid number of operands!");
2339    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2340  }
2341
2342  void addVecListOperands(MCInst &Inst, unsigned N) const {
2343    assert(N == 1 && "Invalid number of operands!");
2344    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2345  }
2346
2347  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2348    assert(N == 2 && "Invalid number of operands!");
2349    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2350    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2351  }
2352
2353  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2354    assert(N == 1 && "Invalid number of operands!");
2355    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2356  }
2357
2358  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2359    assert(N == 1 && "Invalid number of operands!");
2360    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2361  }
2362
2363  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2364    assert(N == 1 && "Invalid number of operands!");
2365    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2366  }
2367
2368  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2369    assert(N == 1 && "Invalid number of operands!");
2370    // The immediate encodes the type of constant as well as the value.
2371    // Mask in that this is an i8 splat.
2372    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2373    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2374  }
2375
2376  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2377    assert(N == 1 && "Invalid number of operands!");
2378    // The immediate encodes the type of constant as well as the value.
2379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2380    unsigned Value = CE->getValue();
2381    if (Value >= 256)
2382      Value = (Value >> 8) | 0xa00;
2383    else
2384      Value |= 0x800;
2385    Inst.addOperand(MCOperand::CreateImm(Value));
2386  }
2387
2388  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2389    assert(N == 1 && "Invalid number of operands!");
2390    // The immediate encodes the type of constant as well as the value.
2391    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2392    unsigned Value = CE->getValue();
2393    if (Value >= 256 && Value <= 0xff00)
2394      Value = (Value >> 8) | 0x200;
2395    else if (Value > 0xffff && Value <= 0xff0000)
2396      Value = (Value >> 16) | 0x400;
2397    else if (Value > 0xffffff)
2398      Value = (Value >> 24) | 0x600;
2399    Inst.addOperand(MCOperand::CreateImm(Value));
2400  }
2401
2402  void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2403    assert(N == 1 && "Invalid number of operands!");
2404    // The immediate encodes the type of constant as well as the value.
2405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2406    unsigned Value = CE->getValue();
2407    assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2408            Inst.getOpcode() == ARM::VMOVv16i8) &&
2409           "All vmvn instructions that wants to replicate non-zero byte "
2410           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2411    unsigned B = ((~Value) & 0xff);
2412    B |= 0xe00; // cmode = 0b1110
2413    Inst.addOperand(MCOperand::CreateImm(B));
2414  }
2415  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2416    assert(N == 1 && "Invalid number of operands!");
2417    // The immediate encodes the type of constant as well as the value.
2418    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2419    unsigned Value = CE->getValue();
2420    if (Value >= 256 && Value <= 0xffff)
2421      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2422    else if (Value > 0xffff && Value <= 0xffffff)
2423      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2424    else if (Value > 0xffffff)
2425      Value = (Value >> 24) | 0x600;
2426    Inst.addOperand(MCOperand::CreateImm(Value));
2427  }
2428
2429  void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2430    assert(N == 1 && "Invalid number of operands!");
2431    // The immediate encodes the type of constant as well as the value.
2432    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2433    unsigned Value = CE->getValue();
2434    assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2435            Inst.getOpcode() == ARM::VMOVv16i8) &&
2436           "All instructions that wants to replicate non-zero byte "
2437           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2438    unsigned B = Value & 0xff;
2439    B |= 0xe00; // cmode = 0b1110
2440    Inst.addOperand(MCOperand::CreateImm(B));
2441  }
2442  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2443    assert(N == 1 && "Invalid number of operands!");
2444    // The immediate encodes the type of constant as well as the value.
2445    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2446    unsigned Value = ~CE->getValue();
2447    if (Value >= 256 && Value <= 0xffff)
2448      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2449    else if (Value > 0xffff && Value <= 0xffffff)
2450      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2451    else if (Value > 0xffffff)
2452      Value = (Value >> 24) | 0x600;
2453    Inst.addOperand(MCOperand::CreateImm(Value));
2454  }
2455
2456  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2457    assert(N == 1 && "Invalid number of operands!");
2458    // The immediate encodes the type of constant as well as the value.
2459    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2460    uint64_t Value = CE->getValue();
2461    unsigned Imm = 0;
2462    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2463      Imm |= (Value & 1) << i;
2464    }
2465    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2466  }
2467
2468  void print(raw_ostream &OS) const override;
2469
2470  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2471    auto Op = make_unique<ARMOperand>(k_ITCondMask);
2472    Op->ITMask.Mask = Mask;
2473    Op->StartLoc = S;
2474    Op->EndLoc = S;
2475    return Op;
2476  }
2477
2478  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2479                                                    SMLoc S) {
2480    auto Op = make_unique<ARMOperand>(k_CondCode);
2481    Op->CC.Val = CC;
2482    Op->StartLoc = S;
2483    Op->EndLoc = S;
2484    return Op;
2485  }
2486
2487  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2488    auto Op = make_unique<ARMOperand>(k_CoprocNum);
2489    Op->Cop.Val = CopVal;
2490    Op->StartLoc = S;
2491    Op->EndLoc = S;
2492    return Op;
2493  }
2494
2495  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2496    auto Op = make_unique<ARMOperand>(k_CoprocReg);
2497    Op->Cop.Val = CopVal;
2498    Op->StartLoc = S;
2499    Op->EndLoc = S;
2500    return Op;
2501  }
2502
2503  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2504                                                        SMLoc E) {
2505    auto Op = make_unique<ARMOperand>(k_CoprocOption);
2506    Op->Cop.Val = Val;
2507    Op->StartLoc = S;
2508    Op->EndLoc = E;
2509    return Op;
2510  }
2511
2512  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2513    auto Op = make_unique<ARMOperand>(k_CCOut);
2514    Op->Reg.RegNum = RegNum;
2515    Op->StartLoc = S;
2516    Op->EndLoc = S;
2517    return Op;
2518  }
2519
2520  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2521    auto Op = make_unique<ARMOperand>(k_Token);
2522    Op->Tok.Data = Str.data();
2523    Op->Tok.Length = Str.size();
2524    Op->StartLoc = S;
2525    Op->EndLoc = S;
2526    return Op;
2527  }
2528
2529  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2530                                               SMLoc E) {
2531    auto Op = make_unique<ARMOperand>(k_Register);
2532    Op->Reg.RegNum = RegNum;
2533    Op->StartLoc = S;
2534    Op->EndLoc = E;
2535    return Op;
2536  }
2537
2538  static std::unique_ptr<ARMOperand>
2539  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2540                        unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2541                        SMLoc E) {
2542    auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2543    Op->RegShiftedReg.ShiftTy = ShTy;
2544    Op->RegShiftedReg.SrcReg = SrcReg;
2545    Op->RegShiftedReg.ShiftReg = ShiftReg;
2546    Op->RegShiftedReg.ShiftImm = ShiftImm;
2547    Op->StartLoc = S;
2548    Op->EndLoc = E;
2549    return Op;
2550  }
2551
2552  static std::unique_ptr<ARMOperand>
2553  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2554                         unsigned ShiftImm, SMLoc S, SMLoc E) {
2555    auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2556    Op->RegShiftedImm.ShiftTy = ShTy;
2557    Op->RegShiftedImm.SrcReg = SrcReg;
2558    Op->RegShiftedImm.ShiftImm = ShiftImm;
2559    Op->StartLoc = S;
2560    Op->EndLoc = E;
2561    return Op;
2562  }
2563
2564  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2565                                                      SMLoc S, SMLoc E) {
2566    auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2567    Op->ShifterImm.isASR = isASR;
2568    Op->ShifterImm.Imm = Imm;
2569    Op->StartLoc = S;
2570    Op->EndLoc = E;
2571    return Op;
2572  }
2573
2574  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2575                                                  SMLoc E) {
2576    auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2577    Op->RotImm.Imm = Imm;
2578    Op->StartLoc = S;
2579    Op->EndLoc = E;
2580    return Op;
2581  }
2582
2583  static std::unique_ptr<ARMOperand>
2584  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2585    auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2586    Op->Bitfield.LSB = LSB;
2587    Op->Bitfield.Width = Width;
2588    Op->StartLoc = S;
2589    Op->EndLoc = E;
2590    return Op;
2591  }
2592
2593  static std::unique_ptr<ARMOperand>
2594  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2595                SMLoc StartLoc, SMLoc EndLoc) {
2596    assert (Regs.size() > 0 && "RegList contains no registers?");
2597    KindTy Kind = k_RegisterList;
2598
2599    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2600      Kind = k_DPRRegisterList;
2601    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2602             contains(Regs.front().second))
2603      Kind = k_SPRRegisterList;
2604
2605    // Sort based on the register encoding values.
2606    array_pod_sort(Regs.begin(), Regs.end());
2607
2608    auto Op = make_unique<ARMOperand>(Kind);
2609    for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2610           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2611      Op->Registers.push_back(I->second);
2612    Op->StartLoc = StartLoc;
2613    Op->EndLoc = EndLoc;
2614    return Op;
2615  }
2616
2617  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2618                                                      unsigned Count,
2619                                                      bool isDoubleSpaced,
2620                                                      SMLoc S, SMLoc E) {
2621    auto Op = make_unique<ARMOperand>(k_VectorList);
2622    Op->VectorList.RegNum = RegNum;
2623    Op->VectorList.Count = Count;
2624    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2625    Op->StartLoc = S;
2626    Op->EndLoc = E;
2627    return Op;
2628  }
2629
2630  static std::unique_ptr<ARMOperand>
2631  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2632                           SMLoc S, SMLoc E) {
2633    auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2634    Op->VectorList.RegNum = RegNum;
2635    Op->VectorList.Count = Count;
2636    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2637    Op->StartLoc = S;
2638    Op->EndLoc = E;
2639    return Op;
2640  }
2641
2642  static std::unique_ptr<ARMOperand>
2643  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2644                          bool isDoubleSpaced, SMLoc S, SMLoc E) {
2645    auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2646    Op->VectorList.RegNum = RegNum;
2647    Op->VectorList.Count = Count;
2648    Op->VectorList.LaneIndex = Index;
2649    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2650    Op->StartLoc = S;
2651    Op->EndLoc = E;
2652    return Op;
2653  }
2654
2655  static std::unique_ptr<ARMOperand>
2656  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2657    auto Op = make_unique<ARMOperand>(k_VectorIndex);
2658    Op->VectorIndex.Val = Idx;
2659    Op->StartLoc = S;
2660    Op->EndLoc = E;
2661    return Op;
2662  }
2663
2664  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2665                                               SMLoc E) {
2666    auto Op = make_unique<ARMOperand>(k_Immediate);
2667    Op->Imm.Val = Val;
2668    Op->StartLoc = S;
2669    Op->EndLoc = E;
2670    return Op;
2671  }
2672
2673  static std::unique_ptr<ARMOperand>
2674  CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2675            unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2676            unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2677            SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2678    auto Op = make_unique<ARMOperand>(k_Memory);
2679    Op->Memory.BaseRegNum = BaseRegNum;
2680    Op->Memory.OffsetImm = OffsetImm;
2681    Op->Memory.OffsetRegNum = OffsetRegNum;
2682    Op->Memory.ShiftType = ShiftType;
2683    Op->Memory.ShiftImm = ShiftImm;
2684    Op->Memory.Alignment = Alignment;
2685    Op->Memory.isNegative = isNegative;
2686    Op->StartLoc = S;
2687    Op->EndLoc = E;
2688    Op->AlignmentLoc = AlignmentLoc;
2689    return Op;
2690  }
2691
2692  static std::unique_ptr<ARMOperand>
2693  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2694                   unsigned ShiftImm, SMLoc S, SMLoc E) {
2695    auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2696    Op->PostIdxReg.RegNum = RegNum;
2697    Op->PostIdxReg.isAdd = isAdd;
2698    Op->PostIdxReg.ShiftTy = ShiftTy;
2699    Op->PostIdxReg.ShiftImm = ShiftImm;
2700    Op->StartLoc = S;
2701    Op->EndLoc = E;
2702    return Op;
2703  }
2704
2705  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
2706                                                         SMLoc S) {
2707    auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
2708    Op->MBOpt.Val = Opt;
2709    Op->StartLoc = S;
2710    Op->EndLoc = S;
2711    return Op;
2712  }
2713
2714  static std::unique_ptr<ARMOperand>
2715  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
2716    auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
2717    Op->ISBOpt.Val = Opt;
2718    Op->StartLoc = S;
2719    Op->EndLoc = S;
2720    return Op;
2721  }
2722
2723  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
2724                                                      SMLoc S) {
2725    auto Op = make_unique<ARMOperand>(k_ProcIFlags);
2726    Op->IFlags.Val = IFlags;
2727    Op->StartLoc = S;
2728    Op->EndLoc = S;
2729    return Op;
2730  }
2731
2732  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
2733    auto Op = make_unique<ARMOperand>(k_MSRMask);
2734    Op->MMask.Val = MMask;
2735    Op->StartLoc = S;
2736    Op->EndLoc = S;
2737    return Op;
2738  }
2739};
2740
2741} // end anonymous namespace.
2742
2743void ARMOperand::print(raw_ostream &OS) const {
2744  switch (Kind) {
2745  case k_CondCode:
2746    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2747    break;
2748  case k_CCOut:
2749    OS << "<ccout " << getReg() << ">";
2750    break;
2751  case k_ITCondMask: {
2752    static const char *const MaskStr[] = {
2753      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2754      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2755    };
2756    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2757    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2758    break;
2759  }
2760  case k_CoprocNum:
2761    OS << "<coprocessor number: " << getCoproc() << ">";
2762    break;
2763  case k_CoprocReg:
2764    OS << "<coprocessor register: " << getCoproc() << ">";
2765    break;
2766  case k_CoprocOption:
2767    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2768    break;
2769  case k_MSRMask:
2770    OS << "<mask: " << getMSRMask() << ">";
2771    break;
2772  case k_Immediate:
2773    getImm()->print(OS);
2774    break;
2775  case k_MemBarrierOpt:
2776    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2777    break;
2778  case k_InstSyncBarrierOpt:
2779    OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2780    break;
2781  case k_Memory:
2782    OS << "<memory "
2783       << " base:" << Memory.BaseRegNum;
2784    OS << ">";
2785    break;
2786  case k_PostIndexRegister:
2787    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2788       << PostIdxReg.RegNum;
2789    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2790      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2791         << PostIdxReg.ShiftImm;
2792    OS << ">";
2793    break;
2794  case k_ProcIFlags: {
2795    OS << "<ARM_PROC::";
2796    unsigned IFlags = getProcIFlags();
2797    for (int i=2; i >= 0; --i)
2798      if (IFlags & (1 << i))
2799        OS << ARM_PROC::IFlagsToString(1 << i);
2800    OS << ">";
2801    break;
2802  }
2803  case k_Register:
2804    OS << "<register " << getReg() << ">";
2805    break;
2806  case k_ShifterImmediate:
2807    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2808       << " #" << ShifterImm.Imm << ">";
2809    break;
2810  case k_ShiftedRegister:
2811    OS << "<so_reg_reg "
2812       << RegShiftedReg.SrcReg << " "
2813       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2814       << " " << RegShiftedReg.ShiftReg << ">";
2815    break;
2816  case k_ShiftedImmediate:
2817    OS << "<so_reg_imm "
2818       << RegShiftedImm.SrcReg << " "
2819       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2820       << " #" << RegShiftedImm.ShiftImm << ">";
2821    break;
2822  case k_RotateImmediate:
2823    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2824    break;
2825  case k_BitfieldDescriptor:
2826    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2827       << ", width: " << Bitfield.Width << ">";
2828    break;
2829  case k_RegisterList:
2830  case k_DPRRegisterList:
2831  case k_SPRRegisterList: {
2832    OS << "<register_list ";
2833
2834    const SmallVectorImpl<unsigned> &RegList = getRegList();
2835    for (SmallVectorImpl<unsigned>::const_iterator
2836           I = RegList.begin(), E = RegList.end(); I != E; ) {
2837      OS << *I;
2838      if (++I < E) OS << ", ";
2839    }
2840
2841    OS << ">";
2842    break;
2843  }
2844  case k_VectorList:
2845    OS << "<vector_list " << VectorList.Count << " * "
2846       << VectorList.RegNum << ">";
2847    break;
2848  case k_VectorListAllLanes:
2849    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2850       << VectorList.RegNum << ">";
2851    break;
2852  case k_VectorListIndexed:
2853    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2854       << VectorList.Count << " * " << VectorList.RegNum << ">";
2855    break;
2856  case k_Token:
2857    OS << "'" << getToken() << "'";
2858    break;
2859  case k_VectorIndex:
2860    OS << "<vectorindex " << getVectorIndex() << ">";
2861    break;
2862  }
2863}
2864
2865/// @name Auto-generated Match Functions
2866/// {
2867
2868static unsigned MatchRegisterName(StringRef Name);
2869
2870/// }
2871
2872bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2873                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2874  StartLoc = Parser.getTok().getLoc();
2875  EndLoc = Parser.getTok().getEndLoc();
2876  RegNo = tryParseRegister();
2877
2878  return (RegNo == (unsigned)-1);
2879}
2880
2881/// Try to parse a register name.  The token must be an Identifier when called,
2882/// and if it is a register name the token is eaten and the register number is
2883/// returned.  Otherwise return -1.
2884///
2885int ARMAsmParser::tryParseRegister() {
2886  const AsmToken &Tok = Parser.getTok();
2887  if (Tok.isNot(AsmToken::Identifier)) return -1;
2888
2889  std::string lowerCase = Tok.getString().lower();
2890  unsigned RegNum = MatchRegisterName(lowerCase);
2891  if (!RegNum) {
2892    RegNum = StringSwitch<unsigned>(lowerCase)
2893      .Case("r13", ARM::SP)
2894      .Case("r14", ARM::LR)
2895      .Case("r15", ARM::PC)
2896      .Case("ip", ARM::R12)
2897      // Additional register name aliases for 'gas' compatibility.
2898      .Case("a1", ARM::R0)
2899      .Case("a2", ARM::R1)
2900      .Case("a3", ARM::R2)
2901      .Case("a4", ARM::R3)
2902      .Case("v1", ARM::R4)
2903      .Case("v2", ARM::R5)
2904      .Case("v3", ARM::R6)
2905      .Case("v4", ARM::R7)
2906      .Case("v5", ARM::R8)
2907      .Case("v6", ARM::R9)
2908      .Case("v7", ARM::R10)
2909      .Case("v8", ARM::R11)
2910      .Case("sb", ARM::R9)
2911      .Case("sl", ARM::R10)
2912      .Case("fp", ARM::R11)
2913      .Default(0);
2914  }
2915  if (!RegNum) {
2916    // Check for aliases registered via .req. Canonicalize to lower case.
2917    // That's more consistent since register names are case insensitive, and
2918    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2919    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2920    // If no match, return failure.
2921    if (Entry == RegisterReqs.end())
2922      return -1;
2923    Parser.Lex(); // Eat identifier token.
2924    return Entry->getValue();
2925  }
2926
2927  Parser.Lex(); // Eat identifier token.
2928
2929  return RegNum;
2930}
2931
2932// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2933// If a recoverable error occurs, return 1. If an irrecoverable error
2934// occurs, return -1. An irrecoverable error is one where tokens have been
2935// consumed in the process of trying to parse the shifter (i.e., when it is
2936// indeed a shifter operand, but malformed).
2937int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
2938  SMLoc S = Parser.getTok().getLoc();
2939  const AsmToken &Tok = Parser.getTok();
2940  if (Tok.isNot(AsmToken::Identifier))
2941    return -1;
2942
2943  std::string lowerCase = Tok.getString().lower();
2944  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2945      .Case("asl", ARM_AM::lsl)
2946      .Case("lsl", ARM_AM::lsl)
2947      .Case("lsr", ARM_AM::lsr)
2948      .Case("asr", ARM_AM::asr)
2949      .Case("ror", ARM_AM::ror)
2950      .Case("rrx", ARM_AM::rrx)
2951      .Default(ARM_AM::no_shift);
2952
2953  if (ShiftTy == ARM_AM::no_shift)
2954    return 1;
2955
2956  Parser.Lex(); // Eat the operator.
2957
2958  // The source register for the shift has already been added to the
2959  // operand list, so we need to pop it off and combine it into the shifted
2960  // register operand instead.
2961  std::unique_ptr<ARMOperand> PrevOp(
2962      (ARMOperand *)Operands.pop_back_val().release());
2963  if (!PrevOp->isReg())
2964    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2965  int SrcReg = PrevOp->getReg();
2966
2967  SMLoc EndLoc;
2968  int64_t Imm = 0;
2969  int ShiftReg = 0;
2970  if (ShiftTy == ARM_AM::rrx) {
2971    // RRX Doesn't have an explicit shift amount. The encoder expects
2972    // the shift register to be the same as the source register. Seems odd,
2973    // but OK.
2974    ShiftReg = SrcReg;
2975  } else {
2976    // Figure out if this is shifted by a constant or a register (for non-RRX).
2977    if (Parser.getTok().is(AsmToken::Hash) ||
2978        Parser.getTok().is(AsmToken::Dollar)) {
2979      Parser.Lex(); // Eat hash.
2980      SMLoc ImmLoc = Parser.getTok().getLoc();
2981      const MCExpr *ShiftExpr = nullptr;
2982      if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2983        Error(ImmLoc, "invalid immediate shift value");
2984        return -1;
2985      }
2986      // The expression must be evaluatable as an immediate.
2987      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2988      if (!CE) {
2989        Error(ImmLoc, "invalid immediate shift value");
2990        return -1;
2991      }
2992      // Range check the immediate.
2993      // lsl, ror: 0 <= imm <= 31
2994      // lsr, asr: 0 <= imm <= 32
2995      Imm = CE->getValue();
2996      if (Imm < 0 ||
2997          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2998          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2999        Error(ImmLoc, "immediate shift value out of range");
3000        return -1;
3001      }
3002      // shift by zero is a nop. Always send it through as lsl.
3003      // ('as' compatibility)
3004      if (Imm == 0)
3005        ShiftTy = ARM_AM::lsl;
3006    } else if (Parser.getTok().is(AsmToken::Identifier)) {
3007      SMLoc L = Parser.getTok().getLoc();
3008      EndLoc = Parser.getTok().getEndLoc();
3009      ShiftReg = tryParseRegister();
3010      if (ShiftReg == -1) {
3011        Error(L, "expected immediate or register in shift operand");
3012        return -1;
3013      }
3014    } else {
3015      Error(Parser.getTok().getLoc(),
3016            "expected immediate or register in shift operand");
3017      return -1;
3018    }
3019  }
3020
3021  if (ShiftReg && ShiftTy != ARM_AM::rrx)
3022    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3023                                                         ShiftReg, Imm,
3024                                                         S, EndLoc));
3025  else
3026    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3027                                                          S, EndLoc));
3028
3029  return 0;
3030}
3031
3032
3033/// Try to parse a register name.  The token must be an Identifier when called.
3034/// If it's a register, an AsmOperand is created. Another AsmOperand is created
3035/// if there is a "writeback". 'true' if it's not a register.
3036///
3037/// TODO this is likely to change to allow different register types and or to
3038/// parse for a specific register type.
3039bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3040  const AsmToken &RegTok = Parser.getTok();
3041  int RegNo = tryParseRegister();
3042  if (RegNo == -1)
3043    return true;
3044
3045  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3046                                           RegTok.getEndLoc()));
3047
3048  const AsmToken &ExclaimTok = Parser.getTok();
3049  if (ExclaimTok.is(AsmToken::Exclaim)) {
3050    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3051                                               ExclaimTok.getLoc()));
3052    Parser.Lex(); // Eat exclaim token
3053    return false;
3054  }
3055
3056  // Also check for an index operand. This is only legal for vector registers,
3057  // but that'll get caught OK in operand matching, so we don't need to
3058  // explicitly filter everything else out here.
3059  if (Parser.getTok().is(AsmToken::LBrac)) {
3060    SMLoc SIdx = Parser.getTok().getLoc();
3061    Parser.Lex(); // Eat left bracket token.
3062
3063    const MCExpr *ImmVal;
3064    if (getParser().parseExpression(ImmVal))
3065      return true;
3066    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3067    if (!MCE)
3068      return TokError("immediate value expected for vector index");
3069
3070    if (Parser.getTok().isNot(AsmToken::RBrac))
3071      return Error(Parser.getTok().getLoc(), "']' expected");
3072
3073    SMLoc E = Parser.getTok().getEndLoc();
3074    Parser.Lex(); // Eat right bracket token.
3075
3076    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3077                                                     SIdx, E,
3078                                                     getContext()));
3079  }
3080
3081  return false;
3082}
3083
3084/// MatchCoprocessorOperandName - Try to parse an coprocessor related
3085/// instruction with a symbolic operand name.
3086/// We accept "crN" syntax for GAS compatibility.
3087/// <operand-name> ::= <prefix><number>
3088/// If CoprocOp is 'c', then:
3089///   <prefix> ::= c | cr
3090/// If CoprocOp is 'p', then :
3091///   <prefix> ::= p
3092/// <number> ::= integer in range [0, 15]
3093static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3094  // Use the same layout as the tablegen'erated register name matcher. Ugly,
3095  // but efficient.
3096  if (Name.size() < 2 || Name[0] != CoprocOp)
3097    return -1;
3098  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3099
3100  switch (Name.size()) {
3101  default: return -1;
3102  case 1:
3103    switch (Name[0]) {
3104    default:  return -1;
3105    case '0': return 0;
3106    case '1': return 1;
3107    case '2': return 2;
3108    case '3': return 3;
3109    case '4': return 4;
3110    case '5': return 5;
3111    case '6': return 6;
3112    case '7': return 7;
3113    case '8': return 8;
3114    case '9': return 9;
3115    }
3116  case 2:
3117    if (Name[0] != '1')
3118      return -1;
3119    switch (Name[1]) {
3120    default:  return -1;
3121    // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3122    // However, old cores (v5/v6) did use them in that way.
3123    case '0': return 10;
3124    case '1': return 11;
3125    case '2': return 12;
3126    case '3': return 13;
3127    case '4': return 14;
3128    case '5': return 15;
3129    }
3130  }
3131}
3132
3133/// parseITCondCode - Try to parse a condition code for an IT instruction.
3134ARMAsmParser::OperandMatchResultTy
3135ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3136  SMLoc S = Parser.getTok().getLoc();
3137  const AsmToken &Tok = Parser.getTok();
3138  if (!Tok.is(AsmToken::Identifier))
3139    return MatchOperand_NoMatch;
3140  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
3141    .Case("eq", ARMCC::EQ)
3142    .Case("ne", ARMCC::NE)
3143    .Case("hs", ARMCC::HS)
3144    .Case("cs", ARMCC::HS)
3145    .Case("lo", ARMCC::LO)
3146    .Case("cc", ARMCC::LO)
3147    .Case("mi", ARMCC::MI)
3148    .Case("pl", ARMCC::PL)
3149    .Case("vs", ARMCC::VS)
3150    .Case("vc", ARMCC::VC)
3151    .Case("hi", ARMCC::HI)
3152    .Case("ls", ARMCC::LS)
3153    .Case("ge", ARMCC::GE)
3154    .Case("lt", ARMCC::LT)
3155    .Case("gt", ARMCC::GT)
3156    .Case("le", ARMCC::LE)
3157    .Case("al", ARMCC::AL)
3158    .Default(~0U);
3159  if (CC == ~0U)
3160    return MatchOperand_NoMatch;
3161  Parser.Lex(); // Eat the token.
3162
3163  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3164
3165  return MatchOperand_Success;
3166}
3167
3168/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3169/// token must be an Identifier when called, and if it is a coprocessor
3170/// number, the token is eaten and the operand is added to the operand list.
3171ARMAsmParser::OperandMatchResultTy
3172ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3173  SMLoc S = Parser.getTok().getLoc();
3174  const AsmToken &Tok = Parser.getTok();
3175  if (Tok.isNot(AsmToken::Identifier))
3176    return MatchOperand_NoMatch;
3177
3178  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3179  if (Num == -1)
3180    return MatchOperand_NoMatch;
3181  // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3182  if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3183    return MatchOperand_NoMatch;
3184
3185  Parser.Lex(); // Eat identifier token.
3186  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3187  return MatchOperand_Success;
3188}
3189
3190/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3191/// token must be an Identifier when called, and if it is a coprocessor
3192/// number, the token is eaten and the operand is added to the operand list.
3193ARMAsmParser::OperandMatchResultTy
3194ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3195  SMLoc S = Parser.getTok().getLoc();
3196  const AsmToken &Tok = Parser.getTok();
3197  if (Tok.isNot(AsmToken::Identifier))
3198    return MatchOperand_NoMatch;
3199
3200  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3201  if (Reg == -1)
3202    return MatchOperand_NoMatch;
3203
3204  Parser.Lex(); // Eat identifier token.
3205  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3206  return MatchOperand_Success;
3207}
3208
3209/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3210/// coproc_option : '{' imm0_255 '}'
3211ARMAsmParser::OperandMatchResultTy
3212ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3213  SMLoc S = Parser.getTok().getLoc();
3214
3215  // If this isn't a '{', this isn't a coprocessor immediate operand.
3216  if (Parser.getTok().isNot(AsmToken::LCurly))
3217    return MatchOperand_NoMatch;
3218  Parser.Lex(); // Eat the '{'
3219
3220  const MCExpr *Expr;
3221  SMLoc Loc = Parser.getTok().getLoc();
3222  if (getParser().parseExpression(Expr)) {
3223    Error(Loc, "illegal expression");
3224    return MatchOperand_ParseFail;
3225  }
3226  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3227  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3228    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3229    return MatchOperand_ParseFail;
3230  }
3231  int Val = CE->getValue();
3232
3233  // Check for and consume the closing '}'
3234  if (Parser.getTok().isNot(AsmToken::RCurly))
3235    return MatchOperand_ParseFail;
3236  SMLoc E = Parser.getTok().getEndLoc();
3237  Parser.Lex(); // Eat the '}'
3238
3239  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3240  return MatchOperand_Success;
3241}
3242
3243// For register list parsing, we need to map from raw GPR register numbering
3244// to the enumeration values. The enumeration values aren't sorted by
3245// register number due to our using "sp", "lr" and "pc" as canonical names.
3246static unsigned getNextRegister(unsigned Reg) {
3247  // If this is a GPR, we need to do it manually, otherwise we can rely
3248  // on the sort ordering of the enumeration since the other reg-classes
3249  // are sane.
3250  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3251    return Reg + 1;
3252  switch(Reg) {
3253  default: llvm_unreachable("Invalid GPR number!");
3254  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3255  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3256  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3257  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3258  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3259  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3260  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3261  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3262  }
3263}
3264
3265// Return the low-subreg of a given Q register.
3266static unsigned getDRegFromQReg(unsigned QReg) {
3267  switch (QReg) {
3268  default: llvm_unreachable("expected a Q register!");
3269  case ARM::Q0:  return ARM::D0;
3270  case ARM::Q1:  return ARM::D2;
3271  case ARM::Q2:  return ARM::D4;
3272  case ARM::Q3:  return ARM::D6;
3273  case ARM::Q4:  return ARM::D8;
3274  case ARM::Q5:  return ARM::D10;
3275  case ARM::Q6:  return ARM::D12;
3276  case ARM::Q7:  return ARM::D14;
3277  case ARM::Q8:  return ARM::D16;
3278  case ARM::Q9:  return ARM::D18;
3279  case ARM::Q10: return ARM::D20;
3280  case ARM::Q11: return ARM::D22;
3281  case ARM::Q12: return ARM::D24;
3282  case ARM::Q13: return ARM::D26;
3283  case ARM::Q14: return ARM::D28;
3284  case ARM::Q15: return ARM::D30;
3285  }
3286}
3287
3288/// Parse a register list.
3289bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3290  assert(Parser.getTok().is(AsmToken::LCurly) &&
3291         "Token is not a Left Curly Brace");
3292  SMLoc S = Parser.getTok().getLoc();
3293  Parser.Lex(); // Eat '{' token.
3294  SMLoc RegLoc = Parser.getTok().getLoc();
3295
3296  // Check the first register in the list to see what register class
3297  // this is a list of.
3298  int Reg = tryParseRegister();
3299  if (Reg == -1)
3300    return Error(RegLoc, "register expected");
3301
3302  // The reglist instructions have at most 16 registers, so reserve
3303  // space for that many.
3304  int EReg = 0;
3305  SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3306
3307  // Allow Q regs and just interpret them as the two D sub-registers.
3308  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3309    Reg = getDRegFromQReg(Reg);
3310    EReg = MRI->getEncodingValue(Reg);
3311    Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3312    ++Reg;
3313  }
3314  const MCRegisterClass *RC;
3315  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3316    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3317  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3318    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3319  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3320    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3321  else
3322    return Error(RegLoc, "invalid register in register list");
3323
3324  // Store the register.
3325  EReg = MRI->getEncodingValue(Reg);
3326  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3327
3328  // This starts immediately after the first register token in the list,
3329  // so we can see either a comma or a minus (range separator) as a legal
3330  // next token.
3331  while (Parser.getTok().is(AsmToken::Comma) ||
3332         Parser.getTok().is(AsmToken::Minus)) {
3333    if (Parser.getTok().is(AsmToken::Minus)) {
3334      Parser.Lex(); // Eat the minus.
3335      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3336      int EndReg = tryParseRegister();
3337      if (EndReg == -1)
3338        return Error(AfterMinusLoc, "register expected");
3339      // Allow Q regs and just interpret them as the two D sub-registers.
3340      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3341        EndReg = getDRegFromQReg(EndReg) + 1;
3342      // If the register is the same as the start reg, there's nothing
3343      // more to do.
3344      if (Reg == EndReg)
3345        continue;
3346      // The register must be in the same register class as the first.
3347      if (!RC->contains(EndReg))
3348        return Error(AfterMinusLoc, "invalid register in register list");
3349      // Ranges must go from low to high.
3350      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3351        return Error(AfterMinusLoc, "bad range in register list");
3352
3353      // Add all the registers in the range to the register list.
3354      while (Reg != EndReg) {
3355        Reg = getNextRegister(Reg);
3356        EReg = MRI->getEncodingValue(Reg);
3357        Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3358      }
3359      continue;
3360    }
3361    Parser.Lex(); // Eat the comma.
3362    RegLoc = Parser.getTok().getLoc();
3363    int OldReg = Reg;
3364    const AsmToken RegTok = Parser.getTok();
3365    Reg = tryParseRegister();
3366    if (Reg == -1)
3367      return Error(RegLoc, "register expected");
3368    // Allow Q regs and just interpret them as the two D sub-registers.
3369    bool isQReg = false;
3370    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3371      Reg = getDRegFromQReg(Reg);
3372      isQReg = true;
3373    }
3374    // The register must be in the same register class as the first.
3375    if (!RC->contains(Reg))
3376      return Error(RegLoc, "invalid register in register list");
3377    // List must be monotonically increasing.
3378    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3379      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3380        Warning(RegLoc, "register list not in ascending order");
3381      else
3382        return Error(RegLoc, "register list not in ascending order");
3383    }
3384    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3385      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3386              ") in register list");
3387      continue;
3388    }
3389    // VFP register lists must also be contiguous.
3390    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3391        Reg != OldReg + 1)
3392      return Error(RegLoc, "non-contiguous register range");
3393    EReg = MRI->getEncodingValue(Reg);
3394    Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3395    if (isQReg) {
3396      EReg = MRI->getEncodingValue(++Reg);
3397      Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3398    }
3399  }
3400
3401  if (Parser.getTok().isNot(AsmToken::RCurly))
3402    return Error(Parser.getTok().getLoc(), "'}' expected");
3403  SMLoc E = Parser.getTok().getEndLoc();
3404  Parser.Lex(); // Eat '}' token.
3405
3406  // Push the register list operand.
3407  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3408
3409  // The ARM system instruction variants for LDM/STM have a '^' token here.
3410  if (Parser.getTok().is(AsmToken::Caret)) {
3411    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3412    Parser.Lex(); // Eat '^' token.
3413  }
3414
3415  return false;
3416}
3417
3418// Helper function to parse the lane index for vector lists.
3419ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3420parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3421  Index = 0; // Always return a defined index value.
3422  if (Parser.getTok().is(AsmToken::LBrac)) {
3423    Parser.Lex(); // Eat the '['.
3424    if (Parser.getTok().is(AsmToken::RBrac)) {
3425      // "Dn[]" is the 'all lanes' syntax.
3426      LaneKind = AllLanes;
3427      EndLoc = Parser.getTok().getEndLoc();
3428      Parser.Lex(); // Eat the ']'.
3429      return MatchOperand_Success;
3430    }
3431
3432    // There's an optional '#' token here. Normally there wouldn't be, but
3433    // inline assemble puts one in, and it's friendly to accept that.
3434    if (Parser.getTok().is(AsmToken::Hash))
3435      Parser.Lex(); // Eat '#' or '$'.
3436
3437    const MCExpr *LaneIndex;
3438    SMLoc Loc = Parser.getTok().getLoc();
3439    if (getParser().parseExpression(LaneIndex)) {
3440      Error(Loc, "illegal expression");
3441      return MatchOperand_ParseFail;
3442    }
3443    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3444    if (!CE) {
3445      Error(Loc, "lane index must be empty or an integer");
3446      return MatchOperand_ParseFail;
3447    }
3448    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3449      Error(Parser.getTok().getLoc(), "']' expected");
3450      return MatchOperand_ParseFail;
3451    }
3452    EndLoc = Parser.getTok().getEndLoc();
3453    Parser.Lex(); // Eat the ']'.
3454    int64_t Val = CE->getValue();
3455
3456    // FIXME: Make this range check context sensitive for .8, .16, .32.
3457    if (Val < 0 || Val > 7) {
3458      Error(Parser.getTok().getLoc(), "lane index out of range");
3459      return MatchOperand_ParseFail;
3460    }
3461    Index = Val;
3462    LaneKind = IndexedLane;
3463    return MatchOperand_Success;
3464  }
3465  LaneKind = NoLanes;
3466  return MatchOperand_Success;
3467}
3468
3469// parse a vector register list
3470ARMAsmParser::OperandMatchResultTy
3471ARMAsmParser::parseVectorList(OperandVector &Operands) {
3472  VectorLaneTy LaneKind;
3473  unsigned LaneIndex;
3474  SMLoc S = Parser.getTok().getLoc();
3475  // As an extension (to match gas), support a plain D register or Q register
3476  // (without encosing curly braces) as a single or double entry list,
3477  // respectively.
3478  if (Parser.getTok().is(AsmToken::Identifier)) {
3479    SMLoc E = Parser.getTok().getEndLoc();
3480    int Reg = tryParseRegister();
3481    if (Reg == -1)
3482      return MatchOperand_NoMatch;
3483    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3484      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3485      if (Res != MatchOperand_Success)
3486        return Res;
3487      switch (LaneKind) {
3488      case NoLanes:
3489        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3490        break;
3491      case AllLanes:
3492        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3493                                                                S, E));
3494        break;
3495      case IndexedLane:
3496        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3497                                                               LaneIndex,
3498                                                               false, S, E));
3499        break;
3500      }
3501      return MatchOperand_Success;
3502    }
3503    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3504      Reg = getDRegFromQReg(Reg);
3505      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3506      if (Res != MatchOperand_Success)
3507        return Res;
3508      switch (LaneKind) {
3509      case NoLanes:
3510        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3511                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3512        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3513        break;
3514      case AllLanes:
3515        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3516                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3517        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3518                                                                S, E));
3519        break;
3520      case IndexedLane:
3521        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3522                                                               LaneIndex,
3523                                                               false, S, E));
3524        break;
3525      }
3526      return MatchOperand_Success;
3527    }
3528    Error(S, "vector register expected");
3529    return MatchOperand_ParseFail;
3530  }
3531
3532  if (Parser.getTok().isNot(AsmToken::LCurly))
3533    return MatchOperand_NoMatch;
3534
3535  Parser.Lex(); // Eat '{' token.
3536  SMLoc RegLoc = Parser.getTok().getLoc();
3537
3538  int Reg = tryParseRegister();
3539  if (Reg == -1) {
3540    Error(RegLoc, "register expected");
3541    return MatchOperand_ParseFail;
3542  }
3543  unsigned Count = 1;
3544  int Spacing = 0;
3545  unsigned FirstReg = Reg;
3546  // The list is of D registers, but we also allow Q regs and just interpret
3547  // them as the two D sub-registers.
3548  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3549    FirstReg = Reg = getDRegFromQReg(Reg);
3550    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3551                 // it's ambiguous with four-register single spaced.
3552    ++Reg;
3553    ++Count;
3554  }
3555
3556  SMLoc E;
3557  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3558    return MatchOperand_ParseFail;
3559
3560  while (Parser.getTok().is(AsmToken::Comma) ||
3561         Parser.getTok().is(AsmToken::Minus)) {
3562    if (Parser.getTok().is(AsmToken::Minus)) {
3563      if (!Spacing)
3564        Spacing = 1; // Register range implies a single spaced list.
3565      else if (Spacing == 2) {
3566        Error(Parser.getTok().getLoc(),
3567              "sequential registers in double spaced list");
3568        return MatchOperand_ParseFail;
3569      }
3570      Parser.Lex(); // Eat the minus.
3571      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3572      int EndReg = tryParseRegister();
3573      if (EndReg == -1) {
3574        Error(AfterMinusLoc, "register expected");
3575        return MatchOperand_ParseFail;
3576      }
3577      // Allow Q regs and just interpret them as the two D sub-registers.
3578      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3579        EndReg = getDRegFromQReg(EndReg) + 1;
3580      // If the register is the same as the start reg, there's nothing
3581      // more to do.
3582      if (Reg == EndReg)
3583        continue;
3584      // The register must be in the same register class as the first.
3585      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3586        Error(AfterMinusLoc, "invalid register in register list");
3587        return MatchOperand_ParseFail;
3588      }
3589      // Ranges must go from low to high.
3590      if (Reg > EndReg) {
3591        Error(AfterMinusLoc, "bad range in register list");
3592        return MatchOperand_ParseFail;
3593      }
3594      // Parse the lane specifier if present.
3595      VectorLaneTy NextLaneKind;
3596      unsigned NextLaneIndex;
3597      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3598          MatchOperand_Success)
3599        return MatchOperand_ParseFail;
3600      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3601        Error(AfterMinusLoc, "mismatched lane index in register list");
3602        return MatchOperand_ParseFail;
3603      }
3604
3605      // Add all the registers in the range to the register list.
3606      Count += EndReg - Reg;
3607      Reg = EndReg;
3608      continue;
3609    }
3610    Parser.Lex(); // Eat the comma.
3611    RegLoc = Parser.getTok().getLoc();
3612    int OldReg = Reg;
3613    Reg = tryParseRegister();
3614    if (Reg == -1) {
3615      Error(RegLoc, "register expected");
3616      return MatchOperand_ParseFail;
3617    }
3618    // vector register lists must be contiguous.
3619    // It's OK to use the enumeration values directly here rather, as the
3620    // VFP register classes have the enum sorted properly.
3621    //
3622    // The list is of D registers, but we also allow Q regs and just interpret
3623    // them as the two D sub-registers.
3624    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3625      if (!Spacing)
3626        Spacing = 1; // Register range implies a single spaced list.
3627      else if (Spacing == 2) {
3628        Error(RegLoc,
3629              "invalid register in double-spaced list (must be 'D' register')");
3630        return MatchOperand_ParseFail;
3631      }
3632      Reg = getDRegFromQReg(Reg);
3633      if (Reg != OldReg + 1) {
3634        Error(RegLoc, "non-contiguous register range");
3635        return MatchOperand_ParseFail;
3636      }
3637      ++Reg;
3638      Count += 2;
3639      // Parse the lane specifier if present.
3640      VectorLaneTy NextLaneKind;
3641      unsigned NextLaneIndex;
3642      SMLoc LaneLoc = Parser.getTok().getLoc();
3643      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3644          MatchOperand_Success)
3645        return MatchOperand_ParseFail;
3646      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3647        Error(LaneLoc, "mismatched lane index in register list");
3648        return MatchOperand_ParseFail;
3649      }
3650      continue;
3651    }
3652    // Normal D register.
3653    // Figure out the register spacing (single or double) of the list if
3654    // we don't know it already.
3655    if (!Spacing)
3656      Spacing = 1 + (Reg == OldReg + 2);
3657
3658    // Just check that it's contiguous and keep going.
3659    if (Reg != OldReg + Spacing) {
3660      Error(RegLoc, "non-contiguous register range");
3661      return MatchOperand_ParseFail;
3662    }
3663    ++Count;
3664    // Parse the lane specifier if present.
3665    VectorLaneTy NextLaneKind;
3666    unsigned NextLaneIndex;
3667    SMLoc EndLoc = Parser.getTok().getLoc();
3668    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3669      return MatchOperand_ParseFail;
3670    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3671      Error(EndLoc, "mismatched lane index in register list");
3672      return MatchOperand_ParseFail;
3673    }
3674  }
3675
3676  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3677    Error(Parser.getTok().getLoc(), "'}' expected");
3678    return MatchOperand_ParseFail;
3679  }
3680  E = Parser.getTok().getEndLoc();
3681  Parser.Lex(); // Eat '}' token.
3682
3683  switch (LaneKind) {
3684  case NoLanes:
3685    // Two-register operands have been converted to the
3686    // composite register classes.
3687    if (Count == 2) {
3688      const MCRegisterClass *RC = (Spacing == 1) ?
3689        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3690        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3691      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3692    }
3693
3694    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3695                                                    (Spacing == 2), S, E));
3696    break;
3697  case AllLanes:
3698    // Two-register operands have been converted to the
3699    // composite register classes.
3700    if (Count == 2) {
3701      const MCRegisterClass *RC = (Spacing == 1) ?
3702        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3703        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3704      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3705    }
3706    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3707                                                            (Spacing == 2),
3708                                                            S, E));
3709    break;
3710  case IndexedLane:
3711    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3712                                                           LaneIndex,
3713                                                           (Spacing == 2),
3714                                                           S, E));
3715    break;
3716  }
3717  return MatchOperand_Success;
3718}
3719
3720/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3721ARMAsmParser::OperandMatchResultTy
3722ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
3723  SMLoc S = Parser.getTok().getLoc();
3724  const AsmToken &Tok = Parser.getTok();
3725  unsigned Opt;
3726
3727  if (Tok.is(AsmToken::Identifier)) {
3728    StringRef OptStr = Tok.getString();
3729
3730    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3731      .Case("sy",    ARM_MB::SY)
3732      .Case("st",    ARM_MB::ST)
3733      .Case("ld",    ARM_MB::LD)
3734      .Case("sh",    ARM_MB::ISH)
3735      .Case("ish",   ARM_MB::ISH)
3736      .Case("shst",  ARM_MB::ISHST)
3737      .Case("ishst", ARM_MB::ISHST)
3738      .Case("ishld", ARM_MB::ISHLD)
3739      .Case("nsh",   ARM_MB::NSH)
3740      .Case("un",    ARM_MB::NSH)
3741      .Case("nshst", ARM_MB::NSHST)
3742      .Case("nshld", ARM_MB::NSHLD)
3743      .Case("unst",  ARM_MB::NSHST)
3744      .Case("osh",   ARM_MB::OSH)
3745      .Case("oshst", ARM_MB::OSHST)
3746      .Case("oshld", ARM_MB::OSHLD)
3747      .Default(~0U);
3748
3749    // ishld, oshld, nshld and ld are only available from ARMv8.
3750    if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3751                        Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3752      Opt = ~0U;
3753
3754    if (Opt == ~0U)
3755      return MatchOperand_NoMatch;
3756
3757    Parser.Lex(); // Eat identifier token.
3758  } else if (Tok.is(AsmToken::Hash) ||
3759             Tok.is(AsmToken::Dollar) ||
3760             Tok.is(AsmToken::Integer)) {
3761    if (Parser.getTok().isNot(AsmToken::Integer))
3762      Parser.Lex(); // Eat '#' or '$'.
3763    SMLoc Loc = Parser.getTok().getLoc();
3764
3765    const MCExpr *MemBarrierID;
3766    if (getParser().parseExpression(MemBarrierID)) {
3767      Error(Loc, "illegal expression");
3768      return MatchOperand_ParseFail;
3769    }
3770
3771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3772    if (!CE) {
3773      Error(Loc, "constant expression expected");
3774      return MatchOperand_ParseFail;
3775    }
3776
3777    int Val = CE->getValue();
3778    if (Val & ~0xf) {
3779      Error(Loc, "immediate value out of range");
3780      return MatchOperand_ParseFail;
3781    }
3782
3783    Opt = ARM_MB::RESERVED_0 + Val;
3784  } else
3785    return MatchOperand_ParseFail;
3786
3787  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3788  return MatchOperand_Success;
3789}
3790
3791/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3792ARMAsmParser::OperandMatchResultTy
3793ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
3794  SMLoc S = Parser.getTok().getLoc();
3795  const AsmToken &Tok = Parser.getTok();
3796  unsigned Opt;
3797
3798  if (Tok.is(AsmToken::Identifier)) {
3799    StringRef OptStr = Tok.getString();
3800
3801    if (OptStr.equals_lower("sy"))
3802      Opt = ARM_ISB::SY;
3803    else
3804      return MatchOperand_NoMatch;
3805
3806    Parser.Lex(); // Eat identifier token.
3807  } else if (Tok.is(AsmToken::Hash) ||
3808             Tok.is(AsmToken::Dollar) ||
3809             Tok.is(AsmToken::Integer)) {
3810    if (Parser.getTok().isNot(AsmToken::Integer))
3811      Parser.Lex(); // Eat '#' or '$'.
3812    SMLoc Loc = Parser.getTok().getLoc();
3813
3814    const MCExpr *ISBarrierID;
3815    if (getParser().parseExpression(ISBarrierID)) {
3816      Error(Loc, "illegal expression");
3817      return MatchOperand_ParseFail;
3818    }
3819
3820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3821    if (!CE) {
3822      Error(Loc, "constant expression expected");
3823      return MatchOperand_ParseFail;
3824    }
3825
3826    int Val = CE->getValue();
3827    if (Val & ~0xf) {
3828      Error(Loc, "immediate value out of range");
3829      return MatchOperand_ParseFail;
3830    }
3831
3832    Opt = ARM_ISB::RESERVED_0 + Val;
3833  } else
3834    return MatchOperand_ParseFail;
3835
3836  Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3837          (ARM_ISB::InstSyncBOpt)Opt, S));
3838  return MatchOperand_Success;
3839}
3840
3841
3842/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3843ARMAsmParser::OperandMatchResultTy
3844ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
3845  SMLoc S = Parser.getTok().getLoc();
3846  const AsmToken &Tok = Parser.getTok();
3847  if (!Tok.is(AsmToken::Identifier))
3848    return MatchOperand_NoMatch;
3849  StringRef IFlagsStr = Tok.getString();
3850
3851  // An iflags string of "none" is interpreted to mean that none of the AIF
3852  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3853  unsigned IFlags = 0;
3854  if (IFlagsStr != "none") {
3855        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3856      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3857        .Case("a", ARM_PROC::A)
3858        .Case("i", ARM_PROC::I)
3859        .Case("f", ARM_PROC::F)
3860        .Default(~0U);
3861
3862      // If some specific iflag is already set, it means that some letter is
3863      // present more than once, this is not acceptable.
3864      if (Flag == ~0U || (IFlags & Flag))
3865        return MatchOperand_NoMatch;
3866
3867      IFlags |= Flag;
3868    }
3869  }
3870
3871  Parser.Lex(); // Eat identifier token.
3872  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3873  return MatchOperand_Success;
3874}
3875
3876/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3877ARMAsmParser::OperandMatchResultTy
3878ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
3879  SMLoc S = Parser.getTok().getLoc();
3880  const AsmToken &Tok = Parser.getTok();
3881  if (!Tok.is(AsmToken::Identifier))
3882    return MatchOperand_NoMatch;
3883  StringRef Mask = Tok.getString();
3884
3885  if (isMClass()) {
3886    // See ARMv6-M 10.1.1
3887    std::string Name = Mask.lower();
3888    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3889      // Note: in the documentation:
3890      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3891      //  for MSR APSR_nzcvq.
3892      // but we do make it an alias here.  This is so to get the "mask encoding"
3893      // bits correct on MSR APSR writes.
3894      //
3895      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3896      // should really only be allowed when writing a special register.  Note
3897      // they get dropped in the MRS instruction reading a special register as
3898      // the SYSm field is only 8 bits.
3899      //
3900      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3901      // includes the DSP extension but that is not checked.
3902      .Case("apsr", 0x800)
3903      .Case("apsr_nzcvq", 0x800)
3904      .Case("apsr_g", 0x400)
3905      .Case("apsr_nzcvqg", 0xc00)
3906      .Case("iapsr", 0x801)
3907      .Case("iapsr_nzcvq", 0x801)
3908      .Case("iapsr_g", 0x401)
3909      .Case("iapsr_nzcvqg", 0xc01)
3910      .Case("eapsr", 0x802)
3911      .Case("eapsr_nzcvq", 0x802)
3912      .Case("eapsr_g", 0x402)
3913      .Case("eapsr_nzcvqg", 0xc02)
3914      .Case("xpsr", 0x803)
3915      .Case("xpsr_nzcvq", 0x803)
3916      .Case("xpsr_g", 0x403)
3917      .Case("xpsr_nzcvqg", 0xc03)
3918      .Case("ipsr", 0x805)
3919      .Case("epsr", 0x806)
3920      .Case("iepsr", 0x807)
3921      .Case("msp", 0x808)
3922      .Case("psp", 0x809)
3923      .Case("primask", 0x810)
3924      .Case("basepri", 0x811)
3925      .Case("basepri_max", 0x812)
3926      .Case("faultmask", 0x813)
3927      .Case("control", 0x814)
3928      .Default(~0U);
3929
3930    if (FlagsVal == ~0U)
3931      return MatchOperand_NoMatch;
3932
3933    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3934      // basepri, basepri_max and faultmask only valid for V7m.
3935      return MatchOperand_NoMatch;
3936
3937    Parser.Lex(); // Eat identifier token.
3938    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3939    return MatchOperand_Success;
3940  }
3941
3942  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3943  size_t Start = 0, Next = Mask.find('_');
3944  StringRef Flags = "";
3945  std::string SpecReg = Mask.slice(Start, Next).lower();
3946  if (Next != StringRef::npos)
3947    Flags = Mask.slice(Next+1, Mask.size());
3948
3949  // FlagsVal contains the complete mask:
3950  // 3-0: Mask
3951  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3952  unsigned FlagsVal = 0;
3953
3954  if (SpecReg == "apsr") {
3955    FlagsVal = StringSwitch<unsigned>(Flags)
3956    .Case("nzcvq",  0x8) // same as CPSR_f
3957    .Case("g",      0x4) // same as CPSR_s
3958    .Case("nzcvqg", 0xc) // same as CPSR_fs
3959    .Default(~0U);
3960
3961    if (FlagsVal == ~0U) {
3962      if (!Flags.empty())
3963        return MatchOperand_NoMatch;
3964      else
3965        FlagsVal = 8; // No flag
3966    }
3967  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3968    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3969    if (Flags == "all" || Flags == "")
3970      Flags = "fc";
3971    for (int i = 0, e = Flags.size(); i != e; ++i) {
3972      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3973      .Case("c", 1)
3974      .Case("x", 2)
3975      .Case("s", 4)
3976      .Case("f", 8)
3977      .Default(~0U);
3978
3979      // If some specific flag is already set, it means that some letter is
3980      // present more than once, this is not acceptable.
3981      if (FlagsVal == ~0U || (FlagsVal & Flag))
3982        return MatchOperand_NoMatch;
3983      FlagsVal |= Flag;
3984    }
3985  } else // No match for special register.
3986    return MatchOperand_NoMatch;
3987
3988  // Special register without flags is NOT equivalent to "fc" flags.
3989  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3990  // two lines would enable gas compatibility at the expense of breaking
3991  // round-tripping.
3992  //
3993  // if (!FlagsVal)
3994  //  FlagsVal = 0x9;
3995
3996  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3997  if (SpecReg == "spsr")
3998    FlagsVal |= 16;
3999
4000  Parser.Lex(); // Eat identifier token.
4001  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4002  return MatchOperand_Success;
4003}
4004
4005ARMAsmParser::OperandMatchResultTy
4006ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4007                          int High) {
4008  const AsmToken &Tok = Parser.getTok();
4009  if (Tok.isNot(AsmToken::Identifier)) {
4010    Error(Parser.getTok().getLoc(), Op + " operand expected.");
4011    return MatchOperand_ParseFail;
4012  }
4013  StringRef ShiftName = Tok.getString();
4014  std::string LowerOp = Op.lower();
4015  std::string UpperOp = Op.upper();
4016  if (ShiftName != LowerOp && ShiftName != UpperOp) {
4017    Error(Parser.getTok().getLoc(), Op + " operand expected.");
4018    return MatchOperand_ParseFail;
4019  }
4020  Parser.Lex(); // Eat shift type token.
4021
4022  // There must be a '#' and a shift amount.
4023  if (Parser.getTok().isNot(AsmToken::Hash) &&
4024      Parser.getTok().isNot(AsmToken::Dollar)) {
4025    Error(Parser.getTok().getLoc(), "'#' expected");
4026    return MatchOperand_ParseFail;
4027  }
4028  Parser.Lex(); // Eat hash token.
4029
4030  const MCExpr *ShiftAmount;
4031  SMLoc Loc = Parser.getTok().getLoc();
4032  SMLoc EndLoc;
4033  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4034    Error(Loc, "illegal expression");
4035    return MatchOperand_ParseFail;
4036  }
4037  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4038  if (!CE) {
4039    Error(Loc, "constant expression expected");
4040    return MatchOperand_ParseFail;
4041  }
4042  int Val = CE->getValue();
4043  if (Val < Low || Val > High) {
4044    Error(Loc, "immediate value out of range");
4045    return MatchOperand_ParseFail;
4046  }
4047
4048  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4049
4050  return MatchOperand_Success;
4051}
4052
4053ARMAsmParser::OperandMatchResultTy
4054ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4055  const AsmToken &Tok = Parser.getTok();
4056  SMLoc S = Tok.getLoc();
4057  if (Tok.isNot(AsmToken::Identifier)) {
4058    Error(S, "'be' or 'le' operand expected");
4059    return MatchOperand_ParseFail;
4060  }
4061  int Val = StringSwitch<int>(Tok.getString().lower())
4062    .Case("be", 1)
4063    .Case("le", 0)
4064    .Default(-1);
4065  Parser.Lex(); // Eat the token.
4066
4067  if (Val == -1) {
4068    Error(S, "'be' or 'le' operand expected");
4069    return MatchOperand_ParseFail;
4070  }
4071  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
4072                                                                  getContext()),
4073                                           S, Tok.getEndLoc()));
4074  return MatchOperand_Success;
4075}
4076
4077/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4078/// instructions. Legal values are:
4079///     lsl #n  'n' in [0,31]
4080///     asr #n  'n' in [1,32]
4081///             n == 32 encoded as n == 0.
4082ARMAsmParser::OperandMatchResultTy
4083ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4084  const AsmToken &Tok = Parser.getTok();
4085  SMLoc S = Tok.getLoc();
4086  if (Tok.isNot(AsmToken::Identifier)) {
4087    Error(S, "shift operator 'asr' or 'lsl' expected");
4088    return MatchOperand_ParseFail;
4089  }
4090  StringRef ShiftName = Tok.getString();
4091  bool isASR;
4092  if (ShiftName == "lsl" || ShiftName == "LSL")
4093    isASR = false;
4094  else if (ShiftName == "asr" || ShiftName == "ASR")
4095    isASR = true;
4096  else {
4097    Error(S, "shift operator 'asr' or 'lsl' expected");
4098    return MatchOperand_ParseFail;
4099  }
4100  Parser.Lex(); // Eat the operator.
4101
4102  // A '#' and a shift amount.
4103  if (Parser.getTok().isNot(AsmToken::Hash) &&
4104      Parser.getTok().isNot(AsmToken::Dollar)) {
4105    Error(Parser.getTok().getLoc(), "'#' expected");
4106    return MatchOperand_ParseFail;
4107  }
4108  Parser.Lex(); // Eat hash token.
4109  SMLoc ExLoc = Parser.getTok().getLoc();
4110
4111  const MCExpr *ShiftAmount;
4112  SMLoc EndLoc;
4113  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4114    Error(ExLoc, "malformed shift expression");
4115    return MatchOperand_ParseFail;
4116  }
4117  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4118  if (!CE) {
4119    Error(ExLoc, "shift amount must be an immediate");
4120    return MatchOperand_ParseFail;
4121  }
4122
4123  int64_t Val = CE->getValue();
4124  if (isASR) {
4125    // Shift amount must be in [1,32]
4126    if (Val < 1 || Val > 32) {
4127      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4128      return MatchOperand_ParseFail;
4129    }
4130    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4131    if (isThumb() && Val == 32) {
4132      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4133      return MatchOperand_ParseFail;
4134    }
4135    if (Val == 32) Val = 0;
4136  } else {
4137    // Shift amount must be in [1,32]
4138    if (Val < 0 || Val > 31) {
4139      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4140      return MatchOperand_ParseFail;
4141    }
4142  }
4143
4144  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4145
4146  return MatchOperand_Success;
4147}
4148
4149/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4150/// of instructions. Legal values are:
4151///     ror #n  'n' in {0, 8, 16, 24}
4152ARMAsmParser::OperandMatchResultTy
4153ARMAsmParser::parseRotImm(OperandVector &Operands) {
4154  const AsmToken &Tok = Parser.getTok();
4155  SMLoc S = Tok.getLoc();
4156  if (Tok.isNot(AsmToken::Identifier))
4157    return MatchOperand_NoMatch;
4158  StringRef ShiftName = Tok.getString();
4159  if (ShiftName != "ror" && ShiftName != "ROR")
4160    return MatchOperand_NoMatch;
4161  Parser.Lex(); // Eat the operator.
4162
4163  // A '#' and a rotate amount.
4164  if (Parser.getTok().isNot(AsmToken::Hash) &&
4165      Parser.getTok().isNot(AsmToken::Dollar)) {
4166    Error(Parser.getTok().getLoc(), "'#' expected");
4167    return MatchOperand_ParseFail;
4168  }
4169  Parser.Lex(); // Eat hash token.
4170  SMLoc ExLoc = Parser.getTok().getLoc();
4171
4172  const MCExpr *ShiftAmount;
4173  SMLoc EndLoc;
4174  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4175    Error(ExLoc, "malformed rotate expression");
4176    return MatchOperand_ParseFail;
4177  }
4178  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4179  if (!CE) {
4180    Error(ExLoc, "rotate amount must be an immediate");
4181    return MatchOperand_ParseFail;
4182  }
4183
4184  int64_t Val = CE->getValue();
4185  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4186  // normally, zero is represented in asm by omitting the rotate operand
4187  // entirely.
4188  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4189    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4190    return MatchOperand_ParseFail;
4191  }
4192
4193  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4194
4195  return MatchOperand_Success;
4196}
4197
4198ARMAsmParser::OperandMatchResultTy
4199ARMAsmParser::parseBitfield(OperandVector &Operands) {
4200  SMLoc S = Parser.getTok().getLoc();
4201  // The bitfield descriptor is really two operands, the LSB and the width.
4202  if (Parser.getTok().isNot(AsmToken::Hash) &&
4203      Parser.getTok().isNot(AsmToken::Dollar)) {
4204    Error(Parser.getTok().getLoc(), "'#' expected");
4205    return MatchOperand_ParseFail;
4206  }
4207  Parser.Lex(); // Eat hash token.
4208
4209  const MCExpr *LSBExpr;
4210  SMLoc E = Parser.getTok().getLoc();
4211  if (getParser().parseExpression(LSBExpr)) {
4212    Error(E, "malformed immediate expression");
4213    return MatchOperand_ParseFail;
4214  }
4215  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4216  if (!CE) {
4217    Error(E, "'lsb' operand must be an immediate");
4218    return MatchOperand_ParseFail;
4219  }
4220
4221  int64_t LSB = CE->getValue();
4222  // The LSB must be in the range [0,31]
4223  if (LSB < 0 || LSB > 31) {
4224    Error(E, "'lsb' operand must be in the range [0,31]");
4225    return MatchOperand_ParseFail;
4226  }
4227  E = Parser.getTok().getLoc();
4228
4229  // Expect another immediate operand.
4230  if (Parser.getTok().isNot(AsmToken::Comma)) {
4231    Error(Parser.getTok().getLoc(), "too few operands");
4232    return MatchOperand_ParseFail;
4233  }
4234  Parser.Lex(); // Eat hash token.
4235  if (Parser.getTok().isNot(AsmToken::Hash) &&
4236      Parser.getTok().isNot(AsmToken::Dollar)) {
4237    Error(Parser.getTok().getLoc(), "'#' expected");
4238    return MatchOperand_ParseFail;
4239  }
4240  Parser.Lex(); // Eat hash token.
4241
4242  const MCExpr *WidthExpr;
4243  SMLoc EndLoc;
4244  if (getParser().parseExpression(WidthExpr, EndLoc)) {
4245    Error(E, "malformed immediate expression");
4246    return MatchOperand_ParseFail;
4247  }
4248  CE = dyn_cast<MCConstantExpr>(WidthExpr);
4249  if (!CE) {
4250    Error(E, "'width' operand must be an immediate");
4251    return MatchOperand_ParseFail;
4252  }
4253
4254  int64_t Width = CE->getValue();
4255  // The LSB must be in the range [1,32-lsb]
4256  if (Width < 1 || Width > 32 - LSB) {
4257    Error(E, "'width' operand must be in the range [1,32-lsb]");
4258    return MatchOperand_ParseFail;
4259  }
4260
4261  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4262
4263  return MatchOperand_Success;
4264}
4265
4266ARMAsmParser::OperandMatchResultTy
4267ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4268  // Check for a post-index addressing register operand. Specifically:
4269  // postidx_reg := '+' register {, shift}
4270  //              | '-' register {, shift}
4271  //              | register {, shift}
4272
4273  // This method must return MatchOperand_NoMatch without consuming any tokens
4274  // in the case where there is no match, as other alternatives take other
4275  // parse methods.
4276  AsmToken Tok = Parser.getTok();
4277  SMLoc S = Tok.getLoc();
4278  bool haveEaten = false;
4279  bool isAdd = true;
4280  if (Tok.is(AsmToken::Plus)) {
4281    Parser.Lex(); // Eat the '+' token.
4282    haveEaten = true;
4283  } else if (Tok.is(AsmToken::Minus)) {
4284    Parser.Lex(); // Eat the '-' token.
4285    isAdd = false;
4286    haveEaten = true;
4287  }
4288
4289  SMLoc E = Parser.getTok().getEndLoc();
4290  int Reg = tryParseRegister();
4291  if (Reg == -1) {
4292    if (!haveEaten)
4293      return MatchOperand_NoMatch;
4294    Error(Parser.getTok().getLoc(), "register expected");
4295    return MatchOperand_ParseFail;
4296  }
4297
4298  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4299  unsigned ShiftImm = 0;
4300  if (Parser.getTok().is(AsmToken::Comma)) {
4301    Parser.Lex(); // Eat the ','.
4302    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4303      return MatchOperand_ParseFail;
4304
4305    // FIXME: Only approximates end...may include intervening whitespace.
4306    E = Parser.getTok().getLoc();
4307  }
4308
4309  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4310                                                  ShiftImm, S, E));
4311
4312  return MatchOperand_Success;
4313}
4314
4315ARMAsmParser::OperandMatchResultTy
4316ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4317  // Check for a post-index addressing register operand. Specifically:
4318  // am3offset := '+' register
4319  //              | '-' register
4320  //              | register
4321  //              | # imm
4322  //              | # + imm
4323  //              | # - imm
4324
4325  // This method must return MatchOperand_NoMatch without consuming any tokens
4326  // in the case where there is no match, as other alternatives take other
4327  // parse methods.
4328  AsmToken Tok = Parser.getTok();
4329  SMLoc S = Tok.getLoc();
4330
4331  // Do immediates first, as we always parse those if we have a '#'.
4332  if (Parser.getTok().is(AsmToken::Hash) ||
4333      Parser.getTok().is(AsmToken::Dollar)) {
4334    Parser.Lex(); // Eat '#' or '$'.
4335    // Explicitly look for a '-', as we need to encode negative zero
4336    // differently.
4337    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4338    const MCExpr *Offset;
4339    SMLoc E;
4340    if (getParser().parseExpression(Offset, E))
4341      return MatchOperand_ParseFail;
4342    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4343    if (!CE) {
4344      Error(S, "constant expression expected");
4345      return MatchOperand_ParseFail;
4346    }
4347    // Negative zero is encoded as the flag value INT32_MIN.
4348    int32_t Val = CE->getValue();
4349    if (isNegative && Val == 0)
4350      Val = INT32_MIN;
4351
4352    Operands.push_back(
4353      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
4354
4355    return MatchOperand_Success;
4356  }
4357
4358
4359  bool haveEaten = false;
4360  bool isAdd = true;
4361  if (Tok.is(AsmToken::Plus)) {
4362    Parser.Lex(); // Eat the '+' token.
4363    haveEaten = true;
4364  } else if (Tok.is(AsmToken::Minus)) {
4365    Parser.Lex(); // Eat the '-' token.
4366    isAdd = false;
4367    haveEaten = true;
4368  }
4369
4370  Tok = Parser.getTok();
4371  int Reg = tryParseRegister();
4372  if (Reg == -1) {
4373    if (!haveEaten)
4374      return MatchOperand_NoMatch;
4375    Error(Tok.getLoc(), "register expected");
4376    return MatchOperand_ParseFail;
4377  }
4378
4379  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4380                                                  0, S, Tok.getEndLoc()));
4381
4382  return MatchOperand_Success;
4383}
4384
4385/// Convert parsed operands to MCInst.  Needed here because this instruction
4386/// only has two register operands, but multiplication is commutative so
4387/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4388void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4389                                    const OperandVector &Operands) {
4390  ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4391  ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4392  // If we have a three-operand form, make sure to set Rn to be the operand
4393  // that isn't the same as Rd.
4394  unsigned RegOp = 4;
4395  if (Operands.size() == 6 &&
4396      ((ARMOperand &)*Operands[4]).getReg() ==
4397          ((ARMOperand &)*Operands[3]).getReg())
4398    RegOp = 5;
4399  ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4400  Inst.addOperand(Inst.getOperand(0));
4401  ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4402}
4403
4404void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4405                                    const OperandVector &Operands) {
4406  int CondOp = -1, ImmOp = -1;
4407  switch(Inst.getOpcode()) {
4408    case ARM::tB:
4409    case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
4410
4411    case ARM::t2B:
4412    case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4413
4414    default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4415  }
4416  // first decide whether or not the branch should be conditional
4417  // by looking at it's location relative to an IT block
4418  if(inITBlock()) {
4419    // inside an IT block we cannot have any conditional branches. any
4420    // such instructions needs to be converted to unconditional form
4421    switch(Inst.getOpcode()) {
4422      case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4423      case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4424    }
4425  } else {
4426    // outside IT blocks we can only have unconditional branches with AL
4427    // condition code or conditional branches with non-AL condition code
4428    unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4429    switch(Inst.getOpcode()) {
4430      case ARM::tB:
4431      case ARM::tBcc:
4432        Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4433        break;
4434      case ARM::t2B:
4435      case ARM::t2Bcc:
4436        Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4437        break;
4438    }
4439  }
4440
4441  // now decide on encoding size based on branch target range
4442  switch(Inst.getOpcode()) {
4443    // classify tB as either t2B or t1B based on range of immediate operand
4444    case ARM::tB: {
4445      ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4446      if (!op.isSignedOffset<11, 1>() && isThumbTwo())
4447        Inst.setOpcode(ARM::t2B);
4448      break;
4449    }
4450    // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4451    case ARM::tBcc: {
4452      ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4453      if (!op.isSignedOffset<8, 1>() && isThumbTwo())
4454        Inst.setOpcode(ARM::t2Bcc);
4455      break;
4456    }
4457  }
4458  ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4459  ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4460}
4461
4462/// Parse an ARM memory expression, return false if successful else return true
4463/// or an error.  The first token must be a '[' when called.
4464bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4465  SMLoc S, E;
4466  assert(Parser.getTok().is(AsmToken::LBrac) &&
4467         "Token is not a Left Bracket");
4468  S = Parser.getTok().getLoc();
4469  Parser.Lex(); // Eat left bracket token.
4470
4471  const AsmToken &BaseRegTok = Parser.getTok();
4472  int BaseRegNum = tryParseRegister();
4473  if (BaseRegNum == -1)
4474    return Error(BaseRegTok.getLoc(), "register expected");
4475
4476  // The next token must either be a comma, a colon or a closing bracket.
4477  const AsmToken &Tok = Parser.getTok();
4478  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4479      !Tok.is(AsmToken::RBrac))
4480    return Error(Tok.getLoc(), "malformed memory operand");
4481
4482  if (Tok.is(AsmToken::RBrac)) {
4483    E = Tok.getEndLoc();
4484    Parser.Lex(); // Eat right bracket token.
4485
4486    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4487                                             ARM_AM::no_shift, 0, 0, false,
4488                                             S, E));
4489
4490    // If there's a pre-indexing writeback marker, '!', just add it as a token
4491    // operand. It's rather odd, but syntactically valid.
4492    if (Parser.getTok().is(AsmToken::Exclaim)) {
4493      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4494      Parser.Lex(); // Eat the '!'.
4495    }
4496
4497    return false;
4498  }
4499
4500  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4501         "Lost colon or comma in memory operand?!");
4502  if (Tok.is(AsmToken::Comma)) {
4503    Parser.Lex(); // Eat the comma.
4504  }
4505
4506  // If we have a ':', it's an alignment specifier.
4507  if (Parser.getTok().is(AsmToken::Colon)) {
4508    Parser.Lex(); // Eat the ':'.
4509    E = Parser.getTok().getLoc();
4510    SMLoc AlignmentLoc = Tok.getLoc();
4511
4512    const MCExpr *Expr;
4513    if (getParser().parseExpression(Expr))
4514     return true;
4515
4516    // The expression has to be a constant. Memory references with relocations
4517    // don't come through here, as they use the <label> forms of the relevant
4518    // instructions.
4519    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4520    if (!CE)
4521      return Error (E, "constant expression expected");
4522
4523    unsigned Align = 0;
4524    switch (CE->getValue()) {
4525    default:
4526      return Error(E,
4527                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4528    case 16:  Align = 2; break;
4529    case 32:  Align = 4; break;
4530    case 64:  Align = 8; break;
4531    case 128: Align = 16; break;
4532    case 256: Align = 32; break;
4533    }
4534
4535    // Now we should have the closing ']'
4536    if (Parser.getTok().isNot(AsmToken::RBrac))
4537      return Error(Parser.getTok().getLoc(), "']' expected");
4538    E = Parser.getTok().getEndLoc();
4539    Parser.Lex(); // Eat right bracket token.
4540
4541    // Don't worry about range checking the value here. That's handled by
4542    // the is*() predicates.
4543    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4544                                             ARM_AM::no_shift, 0, Align,
4545                                             false, S, E, AlignmentLoc));
4546
4547    // If there's a pre-indexing writeback marker, '!', just add it as a token
4548    // operand.
4549    if (Parser.getTok().is(AsmToken::Exclaim)) {
4550      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4551      Parser.Lex(); // Eat the '!'.
4552    }
4553
4554    return false;
4555  }
4556
4557  // If we have a '#', it's an immediate offset, else assume it's a register
4558  // offset. Be friendly and also accept a plain integer (without a leading
4559  // hash) for gas compatibility.
4560  if (Parser.getTok().is(AsmToken::Hash) ||
4561      Parser.getTok().is(AsmToken::Dollar) ||
4562      Parser.getTok().is(AsmToken::Integer)) {
4563    if (Parser.getTok().isNot(AsmToken::Integer))
4564      Parser.Lex(); // Eat '#' or '$'.
4565    E = Parser.getTok().getLoc();
4566
4567    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4568    const MCExpr *Offset;
4569    if (getParser().parseExpression(Offset))
4570     return true;
4571
4572    // The expression has to be a constant. Memory references with relocations
4573    // don't come through here, as they use the <label> forms of the relevant
4574    // instructions.
4575    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4576    if (!CE)
4577      return Error (E, "constant expression expected");
4578
4579    // If the constant was #-0, represent it as INT32_MIN.
4580    int32_t Val = CE->getValue();
4581    if (isNegative && Val == 0)
4582      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4583
4584    // Now we should have the closing ']'
4585    if (Parser.getTok().isNot(AsmToken::RBrac))
4586      return Error(Parser.getTok().getLoc(), "']' expected");
4587    E = Parser.getTok().getEndLoc();
4588    Parser.Lex(); // Eat right bracket token.
4589
4590    // Don't worry about range checking the value here. That's handled by
4591    // the is*() predicates.
4592    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4593                                             ARM_AM::no_shift, 0, 0,
4594                                             false, S, E));
4595
4596    // If there's a pre-indexing writeback marker, '!', just add it as a token
4597    // operand.
4598    if (Parser.getTok().is(AsmToken::Exclaim)) {
4599      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4600      Parser.Lex(); // Eat the '!'.
4601    }
4602
4603    return false;
4604  }
4605
4606  // The register offset is optionally preceded by a '+' or '-'
4607  bool isNegative = false;
4608  if (Parser.getTok().is(AsmToken::Minus)) {
4609    isNegative = true;
4610    Parser.Lex(); // Eat the '-'.
4611  } else if (Parser.getTok().is(AsmToken::Plus)) {
4612    // Nothing to do.
4613    Parser.Lex(); // Eat the '+'.
4614  }
4615
4616  E = Parser.getTok().getLoc();
4617  int OffsetRegNum = tryParseRegister();
4618  if (OffsetRegNum == -1)
4619    return Error(E, "register expected");
4620
4621  // If there's a shift operator, handle it.
4622  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4623  unsigned ShiftImm = 0;
4624  if (Parser.getTok().is(AsmToken::Comma)) {
4625    Parser.Lex(); // Eat the ','.
4626    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4627      return true;
4628  }
4629
4630  // Now we should have the closing ']'
4631  if (Parser.getTok().isNot(AsmToken::RBrac))
4632    return Error(Parser.getTok().getLoc(), "']' expected");
4633  E = Parser.getTok().getEndLoc();
4634  Parser.Lex(); // Eat right bracket token.
4635
4636  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
4637                                           ShiftType, ShiftImm, 0, isNegative,
4638                                           S, E));
4639
4640  // If there's a pre-indexing writeback marker, '!', just add it as a token
4641  // operand.
4642  if (Parser.getTok().is(AsmToken::Exclaim)) {
4643    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4644    Parser.Lex(); // Eat the '!'.
4645  }
4646
4647  return false;
4648}
4649
4650/// parseMemRegOffsetShift - one of these two:
4651///   ( lsl | lsr | asr | ror ) , # shift_amount
4652///   rrx
4653/// return true if it parses a shift otherwise it returns false.
4654bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4655                                          unsigned &Amount) {
4656  SMLoc Loc = Parser.getTok().getLoc();
4657  const AsmToken &Tok = Parser.getTok();
4658  if (Tok.isNot(AsmToken::Identifier))
4659    return true;
4660  StringRef ShiftName = Tok.getString();
4661  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4662      ShiftName == "asl" || ShiftName == "ASL")
4663    St = ARM_AM::lsl;
4664  else if (ShiftName == "lsr" || ShiftName == "LSR")
4665    St = ARM_AM::lsr;
4666  else if (ShiftName == "asr" || ShiftName == "ASR")
4667    St = ARM_AM::asr;
4668  else if (ShiftName == "ror" || ShiftName == "ROR")
4669    St = ARM_AM::ror;
4670  else if (ShiftName == "rrx" || ShiftName == "RRX")
4671    St = ARM_AM::rrx;
4672  else
4673    return Error(Loc, "illegal shift operator");
4674  Parser.Lex(); // Eat shift type token.
4675
4676  // rrx stands alone.
4677  Amount = 0;
4678  if (St != ARM_AM::rrx) {
4679    Loc = Parser.getTok().getLoc();
4680    // A '#' and a shift amount.
4681    const AsmToken &HashTok = Parser.getTok();
4682    if (HashTok.isNot(AsmToken::Hash) &&
4683        HashTok.isNot(AsmToken::Dollar))
4684      return Error(HashTok.getLoc(), "'#' expected");
4685    Parser.Lex(); // Eat hash token.
4686
4687    const MCExpr *Expr;
4688    if (getParser().parseExpression(Expr))
4689      return true;
4690    // Range check the immediate.
4691    // lsl, ror: 0 <= imm <= 31
4692    // lsr, asr: 0 <= imm <= 32
4693    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4694    if (!CE)
4695      return Error(Loc, "shift amount must be an immediate");
4696    int64_t Imm = CE->getValue();
4697    if (Imm < 0 ||
4698        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4699        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4700      return Error(Loc, "immediate shift value out of range");
4701    // If <ShiftTy> #0, turn it into a no_shift.
4702    if (Imm == 0)
4703      St = ARM_AM::lsl;
4704    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4705    if (Imm == 32)
4706      Imm = 0;
4707    Amount = Imm;
4708  }
4709
4710  return false;
4711}
4712
4713/// parseFPImm - A floating point immediate expression operand.
4714ARMAsmParser::OperandMatchResultTy
4715ARMAsmParser::parseFPImm(OperandVector &Operands) {
4716  // Anything that can accept a floating point constant as an operand
4717  // needs to go through here, as the regular parseExpression is
4718  // integer only.
4719  //
4720  // This routine still creates a generic Immediate operand, containing
4721  // a bitcast of the 64-bit floating point value. The various operands
4722  // that accept floats can check whether the value is valid for them
4723  // via the standard is*() predicates.
4724
4725  SMLoc S = Parser.getTok().getLoc();
4726
4727  if (Parser.getTok().isNot(AsmToken::Hash) &&
4728      Parser.getTok().isNot(AsmToken::Dollar))
4729    return MatchOperand_NoMatch;
4730
4731  // Disambiguate the VMOV forms that can accept an FP immediate.
4732  // vmov.f32 <sreg>, #imm
4733  // vmov.f64 <dreg>, #imm
4734  // vmov.f32 <dreg>, #imm  @ vector f32x2
4735  // vmov.f32 <qreg>, #imm  @ vector f32x4
4736  //
4737  // There are also the NEON VMOV instructions which expect an
4738  // integer constant. Make sure we don't try to parse an FPImm
4739  // for these:
4740  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4741  ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
4742  bool isVmovf = TyOp.isToken() &&
4743                 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
4744  ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
4745  bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
4746                                         Mnemonic.getToken() == "fconsts");
4747  if (!(isVmovf || isFconst))
4748    return MatchOperand_NoMatch;
4749
4750  Parser.Lex(); // Eat '#' or '$'.
4751
4752  // Handle negation, as that still comes through as a separate token.
4753  bool isNegative = false;
4754  if (Parser.getTok().is(AsmToken::Minus)) {
4755    isNegative = true;
4756    Parser.Lex();
4757  }
4758  const AsmToken &Tok = Parser.getTok();
4759  SMLoc Loc = Tok.getLoc();
4760  if (Tok.is(AsmToken::Real) && isVmovf) {
4761    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4762    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4763    // If we had a '-' in front, toggle the sign bit.
4764    IntVal ^= (uint64_t)isNegative << 31;
4765    Parser.Lex(); // Eat the token.
4766    Operands.push_back(ARMOperand::CreateImm(
4767          MCConstantExpr::Create(IntVal, getContext()),
4768          S, Parser.getTok().getLoc()));
4769    return MatchOperand_Success;
4770  }
4771  // Also handle plain integers. Instructions which allow floating point
4772  // immediates also allow a raw encoded 8-bit value.
4773  if (Tok.is(AsmToken::Integer) && isFconst) {
4774    int64_t Val = Tok.getIntVal();
4775    Parser.Lex(); // Eat the token.
4776    if (Val > 255 || Val < 0) {
4777      Error(Loc, "encoded floating point value out of range");
4778      return MatchOperand_ParseFail;
4779    }
4780    float RealVal = ARM_AM::getFPImmFloat(Val);
4781    Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
4782
4783    Operands.push_back(ARMOperand::CreateImm(
4784        MCConstantExpr::Create(Val, getContext()), S,
4785        Parser.getTok().getLoc()));
4786    return MatchOperand_Success;
4787  }
4788
4789  Error(Loc, "invalid floating point immediate");
4790  return MatchOperand_ParseFail;
4791}
4792
4793/// Parse a arm instruction operand.  For now this parses the operand regardless
4794/// of the mnemonic.
4795bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
4796  SMLoc S, E;
4797
4798  // Check if the current operand has a custom associated parser, if so, try to
4799  // custom parse the operand, or fallback to the general approach.
4800  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4801  if (ResTy == MatchOperand_Success)
4802    return false;
4803  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4804  // there was a match, but an error occurred, in which case, just return that
4805  // the operand parsing failed.
4806  if (ResTy == MatchOperand_ParseFail)
4807    return true;
4808
4809  switch (getLexer().getKind()) {
4810  default:
4811    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4812    return true;
4813  case AsmToken::Identifier: {
4814    // If we've seen a branch mnemonic, the next operand must be a label.  This
4815    // is true even if the label is a register name.  So "br r1" means branch to
4816    // label "r1".
4817    bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4818    if (!ExpectLabel) {
4819      if (!tryParseRegisterWithWriteBack(Operands))
4820        return false;
4821      int Res = tryParseShiftRegister(Operands);
4822      if (Res == 0) // success
4823        return false;
4824      else if (Res == -1) // irrecoverable error
4825        return true;
4826      // If this is VMRS, check for the apsr_nzcv operand.
4827      if (Mnemonic == "vmrs" &&
4828          Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4829        S = Parser.getTok().getLoc();
4830        Parser.Lex();
4831        Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4832        return false;
4833      }
4834    }
4835
4836    // Fall though for the Identifier case that is not a register or a
4837    // special name.
4838  }
4839  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4840  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4841  case AsmToken::String:  // quoted label names.
4842  case AsmToken::Dot: {   // . as a branch target
4843    // This was not a register so parse other operands that start with an
4844    // identifier (like labels) as expressions and create them as immediates.
4845    const MCExpr *IdVal;
4846    S = Parser.getTok().getLoc();
4847    if (getParser().parseExpression(IdVal))
4848      return true;
4849    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4850    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4851    return false;
4852  }
4853  case AsmToken::LBrac:
4854    return parseMemory(Operands);
4855  case AsmToken::LCurly:
4856    return parseRegisterList(Operands);
4857  case AsmToken::Dollar:
4858  case AsmToken::Hash: {
4859    // #42 -> immediate.
4860    S = Parser.getTok().getLoc();
4861    Parser.Lex();
4862
4863    if (Parser.getTok().isNot(AsmToken::Colon)) {
4864      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4865      const MCExpr *ImmVal;
4866      if (getParser().parseExpression(ImmVal))
4867        return true;
4868      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4869      if (CE) {
4870        int32_t Val = CE->getValue();
4871        if (isNegative && Val == 0)
4872          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4873      }
4874      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4875      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4876
4877      // There can be a trailing '!' on operands that we want as a separate
4878      // '!' Token operand. Handle that here. For example, the compatibility
4879      // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4880      if (Parser.getTok().is(AsmToken::Exclaim)) {
4881        Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4882                                                   Parser.getTok().getLoc()));
4883        Parser.Lex(); // Eat exclaim token
4884      }
4885      return false;
4886    }
4887    // w/ a ':' after the '#', it's just like a plain ':'.
4888    // FALLTHROUGH
4889  }
4890  case AsmToken::Colon: {
4891    // ":lower16:" and ":upper16:" expression prefixes
4892    // FIXME: Check it's an expression prefix,
4893    // e.g. (FOO - :lower16:BAR) isn't legal.
4894    ARMMCExpr::VariantKind RefKind;
4895    if (parsePrefix(RefKind))
4896      return true;
4897
4898    const MCExpr *SubExprVal;
4899    if (getParser().parseExpression(SubExprVal))
4900      return true;
4901
4902    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4903                                              getContext());
4904    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4905    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4906    return false;
4907  }
4908  case AsmToken::Equal: {
4909    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4910      return Error(Parser.getTok().getLoc(), "unexpected token in operand");
4911
4912    Parser.Lex(); // Eat '='
4913    const MCExpr *SubExprVal;
4914    if (getParser().parseExpression(SubExprVal))
4915      return true;
4916    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4917
4918    const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
4919    Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
4920    return false;
4921  }
4922  }
4923}
4924
4925// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4926//  :lower16: and :upper16:.
4927bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4928  RefKind = ARMMCExpr::VK_ARM_None;
4929
4930  // consume an optional '#' (GNU compatibility)
4931  if (getLexer().is(AsmToken::Hash))
4932    Parser.Lex();
4933
4934  // :lower16: and :upper16: modifiers
4935  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4936  Parser.Lex(); // Eat ':'
4937
4938  if (getLexer().isNot(AsmToken::Identifier)) {
4939    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4940    return true;
4941  }
4942
4943  StringRef IDVal = Parser.getTok().getIdentifier();
4944  if (IDVal == "lower16") {
4945    RefKind = ARMMCExpr::VK_ARM_LO16;
4946  } else if (IDVal == "upper16") {
4947    RefKind = ARMMCExpr::VK_ARM_HI16;
4948  } else {
4949    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4950    return true;
4951  }
4952  Parser.Lex();
4953
4954  if (getLexer().isNot(AsmToken::Colon)) {
4955    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4956    return true;
4957  }
4958  Parser.Lex(); // Eat the last ':'
4959  return false;
4960}
4961
4962/// \brief Given a mnemonic, split out possible predication code and carry
4963/// setting letters to form a canonical mnemonic and flags.
4964//
4965// FIXME: Would be nice to autogen this.
4966// FIXME: This is a bit of a maze of special cases.
4967StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4968                                      unsigned &PredicationCode,
4969                                      bool &CarrySetting,
4970                                      unsigned &ProcessorIMod,
4971                                      StringRef &ITMask) {
4972  PredicationCode = ARMCC::AL;
4973  CarrySetting = false;
4974  ProcessorIMod = 0;
4975
4976  // Ignore some mnemonics we know aren't predicated forms.
4977  //
4978  // FIXME: Would be nice to autogen this.
4979  if ((Mnemonic == "movs" && isThumb()) ||
4980      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4981      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4982      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4983      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4984      Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
4985      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4986      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4987      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4988      Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
4989      Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
4990      Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
4991      Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
4992    return Mnemonic;
4993
4994  // First, split out any predication code. Ignore mnemonics we know aren't
4995  // predicated but do have a carry-set and so weren't caught above.
4996  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4997      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4998      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4999      Mnemonic != "sbcs" && Mnemonic != "rscs") {
5000    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
5001      .Case("eq", ARMCC::EQ)
5002      .Case("ne", ARMCC::NE)
5003      .Case("hs", ARMCC::HS)
5004      .Case("cs", ARMCC::HS)
5005      .Case("lo", ARMCC::LO)
5006      .Case("cc", ARMCC::LO)
5007      .Case("mi", ARMCC::MI)
5008      .Case("pl", ARMCC::PL)
5009      .Case("vs", ARMCC::VS)
5010      .Case("vc", ARMCC::VC)
5011      .Case("hi", ARMCC::HI)
5012      .Case("ls", ARMCC::LS)
5013      .Case("ge", ARMCC::GE)
5014      .Case("lt", ARMCC::LT)
5015      .Case("gt", ARMCC::GT)
5016      .Case("le", ARMCC::LE)
5017      .Case("al", ARMCC::AL)
5018      .Default(~0U);
5019    if (CC != ~0U) {
5020      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5021      PredicationCode = CC;
5022    }
5023  }
5024
5025  // Next, determine if we have a carry setting bit. We explicitly ignore all
5026  // the instructions we know end in 's'.
5027  if (Mnemonic.endswith("s") &&
5028      !(Mnemonic == "cps" || Mnemonic == "mls" ||
5029        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5030        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5031        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5032        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5033        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5034        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5035        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5036        Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5037        (Mnemonic == "movs" && isThumb()))) {
5038    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5039    CarrySetting = true;
5040  }
5041
5042  // The "cps" instruction can have a interrupt mode operand which is glued into
5043  // the mnemonic. Check if this is the case, split it and parse the imod op
5044  if (Mnemonic.startswith("cps")) {
5045    // Split out any imod code.
5046    unsigned IMod =
5047      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5048      .Case("ie", ARM_PROC::IE)
5049      .Case("id", ARM_PROC::ID)
5050      .Default(~0U);
5051    if (IMod != ~0U) {
5052      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5053      ProcessorIMod = IMod;
5054    }
5055  }
5056
5057  // The "it" instruction has the condition mask on the end of the mnemonic.
5058  if (Mnemonic.startswith("it")) {
5059    ITMask = Mnemonic.slice(2, Mnemonic.size());
5060    Mnemonic = Mnemonic.slice(0, 2);
5061  }
5062
5063  return Mnemonic;
5064}
5065
5066/// \brief Given a canonical mnemonic, determine if the instruction ever allows
5067/// inclusion of carry set or predication code operands.
5068//
5069// FIXME: It would be nice to autogen this.
5070void ARMAsmParser::
5071getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5072                     bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) {
5073  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5074      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5075      Mnemonic == "add" || Mnemonic == "adc" ||
5076      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
5077      Mnemonic == "orr" || Mnemonic == "mvn" ||
5078      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
5079      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
5080      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5081      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
5082                      Mnemonic == "mla" || Mnemonic == "smlal" ||
5083                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
5084    CanAcceptCarrySet = true;
5085  } else
5086    CanAcceptCarrySet = false;
5087
5088  if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5089      Mnemonic == "cps" ||  Mnemonic == "it" ||  Mnemonic == "cbz" ||
5090      Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5091      Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5092      Mnemonic.startswith("vsel") ||
5093      Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
5094      Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
5095      Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
5096      Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
5097      Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5098      (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
5099    // These mnemonics are never predicable
5100    CanAcceptPredicationCode = false;
5101  } else if (!isThumb()) {
5102    // Some instructions are only predicable in Thumb mode
5103    CanAcceptPredicationCode
5104      = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5105        Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5106        Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5107        Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5108        Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
5109        Mnemonic != "stc2" && Mnemonic != "stc2l" &&
5110        !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
5111  } else if (isThumbOne()) {
5112    if (hasV6MOps())
5113      CanAcceptPredicationCode = Mnemonic != "movs";
5114    else
5115      CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5116  } else
5117    CanAcceptPredicationCode = true;
5118}
5119
5120bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5121                                          OperandVector &Operands) {
5122  // FIXME: This is all horribly hacky. We really need a better way to deal
5123  // with optional operands like this in the matcher table.
5124
5125  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5126  // another does not. Specifically, the MOVW instruction does not. So we
5127  // special case it here and remove the defaulted (non-setting) cc_out
5128  // operand if that's the instruction we're trying to match.
5129  //
5130  // We do this as post-processing of the explicit operands rather than just
5131  // conditionally adding the cc_out in the first place because we need
5132  // to check the type of the parsed immediate operand.
5133  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5134      !static_cast<ARMOperand &>(*Operands[4]).isARMSOImm() &&
5135      static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5136      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5137    return true;
5138
5139  // Register-register 'add' for thumb does not have a cc_out operand
5140  // when there are only two register operands.
5141  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5142      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5143      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5144      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5145    return true;
5146  // Register-register 'add' for thumb does not have a cc_out operand
5147  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5148  // have to check the immediate range here since Thumb2 has a variant
5149  // that can handle a different range and has a cc_out operand.
5150  if (((isThumb() && Mnemonic == "add") ||
5151       (isThumbTwo() && Mnemonic == "sub")) &&
5152      Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5153      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5154      static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5155      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5156      ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5157       static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5158    return true;
5159  // For Thumb2, add/sub immediate does not have a cc_out operand for the
5160  // imm0_4095 variant. That's the least-preferred variant when
5161  // selecting via the generic "add" mnemonic, so to know that we
5162  // should remove the cc_out operand, we have to explicitly check that
5163  // it's not one of the other variants. Ugh.
5164  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5165      Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5166      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5167      static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5168    // Nest conditions rather than one big 'if' statement for readability.
5169    //
5170    // If both registers are low, we're in an IT block, and the immediate is
5171    // in range, we should use encoding T1 instead, which has a cc_out.
5172    if (inITBlock() &&
5173        isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5174        isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5175        static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5176      return false;
5177    // Check against T3. If the second register is the PC, this is an
5178    // alternate form of ADR, which uses encoding T4, so check for that too.
5179    if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5180        static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5181      return false;
5182
5183    // Otherwise, we use encoding T4, which does not have a cc_out
5184    // operand.
5185    return true;
5186  }
5187
5188  // The thumb2 multiply instruction doesn't have a CCOut register, so
5189  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5190  // use the 16-bit encoding or not.
5191  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5192      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5193      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5194      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5195      static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5196      // If the registers aren't low regs, the destination reg isn't the
5197      // same as one of the source regs, or the cc_out operand is zero
5198      // outside of an IT block, we have to use the 32-bit encoding, so
5199      // remove the cc_out operand.
5200      (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5201       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5202       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5203       !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5204                            static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5205                        static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5206                            static_cast<ARMOperand &>(*Operands[4]).getReg())))
5207    return true;
5208
5209  // Also check the 'mul' syntax variant that doesn't specify an explicit
5210  // destination register.
5211  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5212      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5213      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5214      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5215      // If the registers aren't low regs  or the cc_out operand is zero
5216      // outside of an IT block, we have to use the 32-bit encoding, so
5217      // remove the cc_out operand.
5218      (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5219       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5220       !inITBlock()))
5221    return true;
5222
5223
5224
5225  // Register-register 'add/sub' for thumb does not have a cc_out operand
5226  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5227  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5228  // right, this will result in better diagnostics (which operand is off)
5229  // anyway.
5230  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5231      (Operands.size() == 5 || Operands.size() == 6) &&
5232      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5233      static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5234      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5235      (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5236       (Operands.size() == 6 &&
5237        static_cast<ARMOperand &>(*Operands[5]).isImm())))
5238    return true;
5239
5240  return false;
5241}
5242
5243bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5244                                              OperandVector &Operands) {
5245  // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5246  unsigned RegIdx = 3;
5247  if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5248      static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32") {
5249    if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5250        static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32")
5251      RegIdx = 4;
5252
5253    if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5254        (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5255             static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5256         ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5257             static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5258      return true;
5259  }
5260  return false;
5261}
5262
5263static bool isDataTypeToken(StringRef Tok) {
5264  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5265    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5266    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5267    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5268    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5269    Tok == ".f" || Tok == ".d";
5270}
5271
5272// FIXME: This bit should probably be handled via an explicit match class
5273// in the .td files that matches the suffix instead of having it be
5274// a literal string token the way it is now.
5275static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5276  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5277}
5278static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5279                                 unsigned VariantID);
5280
5281static bool RequiresVFPRegListValidation(StringRef Inst,
5282                                         bool &AcceptSinglePrecisionOnly,
5283                                         bool &AcceptDoublePrecisionOnly) {
5284  if (Inst.size() < 7)
5285    return false;
5286
5287  if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5288    StringRef AddressingMode = Inst.substr(4, 2);
5289    if (AddressingMode == "ia" || AddressingMode == "db" ||
5290        AddressingMode == "ea" || AddressingMode == "fd") {
5291      AcceptSinglePrecisionOnly = Inst[6] == 's';
5292      AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5293      return true;
5294    }
5295  }
5296
5297  return false;
5298}
5299
5300/// Parse an arm instruction mnemonic followed by its operands.
5301bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5302                                    SMLoc NameLoc, OperandVector &Operands) {
5303  // FIXME: Can this be done via tablegen in some fashion?
5304  bool RequireVFPRegisterListCheck;
5305  bool AcceptSinglePrecisionOnly;
5306  bool AcceptDoublePrecisionOnly;
5307  RequireVFPRegisterListCheck =
5308    RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5309                                 AcceptDoublePrecisionOnly);
5310
5311  // Apply mnemonic aliases before doing anything else, as the destination
5312  // mnemonic may include suffices and we want to handle them normally.
5313  // The generic tblgen'erated code does this later, at the start of
5314  // MatchInstructionImpl(), but that's too late for aliases that include
5315  // any sort of suffix.
5316  unsigned AvailableFeatures = getAvailableFeatures();
5317  unsigned AssemblerDialect = getParser().getAssemblerDialect();
5318  applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5319
5320  // First check for the ARM-specific .req directive.
5321  if (Parser.getTok().is(AsmToken::Identifier) &&
5322      Parser.getTok().getIdentifier() == ".req") {
5323    parseDirectiveReq(Name, NameLoc);
5324    // We always return 'error' for this, as we're done with this
5325    // statement and don't need to match the 'instruction."
5326    return true;
5327  }
5328
5329  // Create the leading tokens for the mnemonic, split by '.' characters.
5330  size_t Start = 0, Next = Name.find('.');
5331  StringRef Mnemonic = Name.slice(Start, Next);
5332
5333  // Split out the predication code and carry setting flag from the mnemonic.
5334  unsigned PredicationCode;
5335  unsigned ProcessorIMod;
5336  bool CarrySetting;
5337  StringRef ITMask;
5338  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5339                           ProcessorIMod, ITMask);
5340
5341  // In Thumb1, only the branch (B) instruction can be predicated.
5342  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5343    Parser.eatToEndOfStatement();
5344    return Error(NameLoc, "conditional execution not supported in Thumb1");
5345  }
5346
5347  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5348
5349  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5350  // is the mask as it will be for the IT encoding if the conditional
5351  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5352  // where the conditional bit0 is zero, the instruction post-processing
5353  // will adjust the mask accordingly.
5354  if (Mnemonic == "it") {
5355    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5356    if (ITMask.size() > 3) {
5357      Parser.eatToEndOfStatement();
5358      return Error(Loc, "too many conditions on IT instruction");
5359    }
5360    unsigned Mask = 8;
5361    for (unsigned i = ITMask.size(); i != 0; --i) {
5362      char pos = ITMask[i - 1];
5363      if (pos != 't' && pos != 'e') {
5364        Parser.eatToEndOfStatement();
5365        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5366      }
5367      Mask >>= 1;
5368      if (ITMask[i - 1] == 't')
5369        Mask |= 8;
5370    }
5371    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5372  }
5373
5374  // FIXME: This is all a pretty gross hack. We should automatically handle
5375  // optional operands like this via tblgen.
5376
5377  // Next, add the CCOut and ConditionCode operands, if needed.
5378  //
5379  // For mnemonics which can ever incorporate a carry setting bit or predication
5380  // code, our matching model involves us always generating CCOut and
5381  // ConditionCode operands to match the mnemonic "as written" and then we let
5382  // the matcher deal with finding the right instruction or generating an
5383  // appropriate error.
5384  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5385  getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5386
5387  // If we had a carry-set on an instruction that can't do that, issue an
5388  // error.
5389  if (!CanAcceptCarrySet && CarrySetting) {
5390    Parser.eatToEndOfStatement();
5391    return Error(NameLoc, "instruction '" + Mnemonic +
5392                 "' can not set flags, but 's' suffix specified");
5393  }
5394  // If we had a predication code on an instruction that can't do that, issue an
5395  // error.
5396  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5397    Parser.eatToEndOfStatement();
5398    return Error(NameLoc, "instruction '" + Mnemonic +
5399                 "' is not predicable, but condition code specified");
5400  }
5401
5402  // Add the carry setting operand, if necessary.
5403  if (CanAcceptCarrySet) {
5404    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5405    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5406                                               Loc));
5407  }
5408
5409  // Add the predication code operand, if necessary.
5410  if (CanAcceptPredicationCode) {
5411    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5412                                      CarrySetting);
5413    Operands.push_back(ARMOperand::CreateCondCode(
5414                         ARMCC::CondCodes(PredicationCode), Loc));
5415  }
5416
5417  // Add the processor imod operand, if necessary.
5418  if (ProcessorIMod) {
5419    Operands.push_back(ARMOperand::CreateImm(
5420          MCConstantExpr::Create(ProcessorIMod, getContext()),
5421                                 NameLoc, NameLoc));
5422  }
5423
5424  // Add the remaining tokens in the mnemonic.
5425  while (Next != StringRef::npos) {
5426    Start = Next;
5427    Next = Name.find('.', Start + 1);
5428    StringRef ExtraToken = Name.slice(Start, Next);
5429
5430    // Some NEON instructions have an optional datatype suffix that is
5431    // completely ignored. Check for that.
5432    if (isDataTypeToken(ExtraToken) &&
5433        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5434      continue;
5435
5436    // For for ARM mode generate an error if the .n qualifier is used.
5437    if (ExtraToken == ".n" && !isThumb()) {
5438      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5439      Parser.eatToEndOfStatement();
5440      return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5441                   "arm mode");
5442    }
5443
5444    // The .n qualifier is always discarded as that is what the tables
5445    // and matcher expect.  In ARM mode the .w qualifier has no effect,
5446    // so discard it to avoid errors that can be caused by the matcher.
5447    if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5448      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5449      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5450    }
5451  }
5452
5453  // Read the remaining operands.
5454  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5455    // Read the first operand.
5456    if (parseOperand(Operands, Mnemonic)) {
5457      Parser.eatToEndOfStatement();
5458      return true;
5459    }
5460
5461    while (getLexer().is(AsmToken::Comma)) {
5462      Parser.Lex();  // Eat the comma.
5463
5464      // Parse and remember the operand.
5465      if (parseOperand(Operands, Mnemonic)) {
5466        Parser.eatToEndOfStatement();
5467        return true;
5468      }
5469    }
5470  }
5471
5472  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5473    SMLoc Loc = getLexer().getLoc();
5474    Parser.eatToEndOfStatement();
5475    return Error(Loc, "unexpected token in argument list");
5476  }
5477
5478  Parser.Lex(); // Consume the EndOfStatement
5479
5480  if (RequireVFPRegisterListCheck) {
5481    ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
5482    if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
5483      return Error(Op.getStartLoc(),
5484                   "VFP/Neon single precision register expected");
5485    if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
5486      return Error(Op.getStartLoc(),
5487                   "VFP/Neon double precision register expected");
5488  }
5489
5490  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5491  // do and don't have a cc_out optional-def operand. With some spot-checks
5492  // of the operand list, we can figure out which variant we're trying to
5493  // parse and adjust accordingly before actually matching. We shouldn't ever
5494  // try to remove a cc_out operand that was explicitly set on the the
5495  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5496  // table driven matcher doesn't fit well with the ARM instruction set.
5497  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
5498    Operands.erase(Operands.begin() + 1);
5499
5500  // Some instructions have the same mnemonic, but don't always
5501  // have a predicate. Distinguish them here and delete the
5502  // predicate if needed.
5503  if (shouldOmitPredicateOperand(Mnemonic, Operands))
5504    Operands.erase(Operands.begin() + 1);
5505
5506  // ARM mode 'blx' need special handling, as the register operand version
5507  // is predicable, but the label operand version is not. So, we can't rely
5508  // on the Mnemonic based checking to correctly figure out when to put
5509  // a k_CondCode operand in the list. If we're trying to match the label
5510  // version, remove the k_CondCode operand here.
5511  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5512      static_cast<ARMOperand &>(*Operands[2]).isImm())
5513    Operands.erase(Operands.begin() + 1);
5514
5515  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5516  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5517  // a single GPRPair reg operand is used in the .td file to replace the two
5518  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5519  // expressed as a GPRPair, so we have to manually merge them.
5520  // FIXME: We would really like to be able to tablegen'erate this.
5521  if (!isThumb() && Operands.size() > 4 &&
5522      (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5523       Mnemonic == "stlexd")) {
5524    bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5525    unsigned Idx = isLoad ? 2 : 3;
5526    ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
5527    ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
5528
5529    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5530    // Adjust only if Op1 and Op2 are GPRs.
5531    if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
5532        MRC.contains(Op2.getReg())) {
5533      unsigned Reg1 = Op1.getReg();
5534      unsigned Reg2 = Op2.getReg();
5535      unsigned Rt = MRI->getEncodingValue(Reg1);
5536      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5537
5538      // Rt2 must be Rt + 1 and Rt must be even.
5539      if (Rt + 1 != Rt2 || (Rt & 1)) {
5540        Error(Op2.getStartLoc(), isLoad
5541                                     ? "destination operands must be sequential"
5542                                     : "source operands must be sequential");
5543        return true;
5544      }
5545      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5546          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5547      Operands[Idx] =
5548          ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
5549      Operands.erase(Operands.begin() + Idx + 1);
5550    }
5551  }
5552
5553  // GNU Assembler extension (compatibility)
5554  if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
5555    ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5556    ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5557    if (Op3.isMem()) {
5558      assert(Op2.isReg() && "expected register argument");
5559
5560      unsigned SuperReg = MRI->getMatchingSuperReg(
5561          Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
5562
5563      assert(SuperReg && "expected register pair");
5564
5565      unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
5566
5567      Operands.insert(
5568          Operands.begin() + 3,
5569          ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5570    }
5571  }
5572
5573  // FIXME: As said above, this is all a pretty gross hack.  This instruction
5574  // does not fit with other "subs" and tblgen.
5575  // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5576  // so the Mnemonic is the original name "subs" and delete the predicate
5577  // operand so it will match the table entry.
5578  if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5579      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5580      static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
5581      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5582      static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
5583      static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5584    Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
5585    Operands.erase(Operands.begin() + 1);
5586  }
5587  return false;
5588}
5589
5590// Validate context-sensitive operand constraints.
5591
5592// return 'true' if register list contains non-low GPR registers,
5593// 'false' otherwise. If Reg is in the register list or is HiReg, set
5594// 'containsReg' to true.
5595static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5596                                 unsigned HiReg, bool &containsReg) {
5597  containsReg = false;
5598  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5599    unsigned OpReg = Inst.getOperand(i).getReg();
5600    if (OpReg == Reg)
5601      containsReg = true;
5602    // Anything other than a low register isn't legal here.
5603    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5604      return true;
5605  }
5606  return false;
5607}
5608
5609// Check if the specified regisgter is in the register list of the inst,
5610// starting at the indicated operand number.
5611static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5612  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5613    unsigned OpReg = Inst.getOperand(i).getReg();
5614    if (OpReg == Reg)
5615      return true;
5616  }
5617  return false;
5618}
5619
5620// Return true if instruction has the interesting property of being
5621// allowed in IT blocks, but not being predicable.
5622static bool instIsBreakpoint(const MCInst &Inst) {
5623    return Inst.getOpcode() == ARM::tBKPT ||
5624           Inst.getOpcode() == ARM::BKPT ||
5625           Inst.getOpcode() == ARM::tHLT ||
5626           Inst.getOpcode() == ARM::HLT;
5627
5628}
5629
5630// FIXME: We would really like to be able to tablegen'erate this.
5631bool ARMAsmParser::validateInstruction(MCInst &Inst,
5632                                       const OperandVector &Operands) {
5633  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5634  SMLoc Loc = Operands[0]->getStartLoc();
5635
5636  // Check the IT block state first.
5637  // NOTE: BKPT and HLT instructions have the interesting property of being
5638  // allowed in IT blocks, but not being predicable. They just always execute.
5639  if (inITBlock() && !instIsBreakpoint(Inst)) {
5640    unsigned Bit = 1;
5641    if (ITState.FirstCond)
5642      ITState.FirstCond = false;
5643    else
5644      Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5645    // The instruction must be predicable.
5646    if (!MCID.isPredicable())
5647      return Error(Loc, "instructions in IT block must be predicable");
5648    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5649    unsigned ITCond = Bit ? ITState.Cond :
5650      ARMCC::getOppositeCondition(ITState.Cond);
5651    if (Cond != ITCond) {
5652      // Find the condition code Operand to get its SMLoc information.
5653      SMLoc CondLoc;
5654      for (unsigned I = 1; I < Operands.size(); ++I)
5655        if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
5656          CondLoc = Operands[I]->getStartLoc();
5657      return Error(CondLoc, "incorrect condition in IT block; got '" +
5658                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5659                   "', but expected '" +
5660                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5661    }
5662  // Check for non-'al' condition codes outside of the IT block.
5663  } else if (isThumbTwo() && MCID.isPredicable() &&
5664             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5665             ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
5666             Inst.getOpcode() != ARM::t2Bcc)
5667    return Error(Loc, "predicated instructions must be in IT block");
5668
5669  const unsigned Opcode = Inst.getOpcode();
5670  switch (Opcode) {
5671  case ARM::LDRD:
5672  case ARM::LDRD_PRE:
5673  case ARM::LDRD_POST: {
5674    const unsigned RtReg = Inst.getOperand(0).getReg();
5675
5676    // Rt can't be R14.
5677    if (RtReg == ARM::LR)
5678      return Error(Operands[3]->getStartLoc(),
5679                   "Rt can't be R14");
5680
5681    const unsigned Rt = MRI->getEncodingValue(RtReg);
5682    // Rt must be even-numbered.
5683    if ((Rt & 1) == 1)
5684      return Error(Operands[3]->getStartLoc(),
5685                   "Rt must be even-numbered");
5686
5687    // Rt2 must be Rt + 1.
5688    const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5689    if (Rt2 != Rt + 1)
5690      return Error(Operands[3]->getStartLoc(),
5691                   "destination operands must be sequential");
5692
5693    if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
5694      const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
5695      // For addressing modes with writeback, the base register needs to be
5696      // different from the destination registers.
5697      if (Rn == Rt || Rn == Rt2)
5698        return Error(Operands[3]->getStartLoc(),
5699                     "base register needs to be different from destination "
5700                     "registers");
5701    }
5702
5703    return false;
5704  }
5705  case ARM::t2LDRDi8:
5706  case ARM::t2LDRD_PRE:
5707  case ARM::t2LDRD_POST: {
5708    // Rt2 must be different from Rt.
5709    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5710    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5711    if (Rt2 == Rt)
5712      return Error(Operands[3]->getStartLoc(),
5713                   "destination operands can't be identical");
5714    return false;
5715  }
5716  case ARM::STRD: {
5717    // Rt2 must be Rt + 1.
5718    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5719    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5720    if (Rt2 != Rt + 1)
5721      return Error(Operands[3]->getStartLoc(),
5722                   "source operands must be sequential");
5723    return false;
5724  }
5725  case ARM::STRD_PRE:
5726  case ARM::STRD_POST: {
5727    // Rt2 must be Rt + 1.
5728    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5729    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5730    if (Rt2 != Rt + 1)
5731      return Error(Operands[3]->getStartLoc(),
5732                   "source operands must be sequential");
5733    return false;
5734  }
5735  case ARM::SBFX:
5736  case ARM::UBFX: {
5737    // Width must be in range [1, 32-lsb].
5738    unsigned LSB = Inst.getOperand(2).getImm();
5739    unsigned Widthm1 = Inst.getOperand(3).getImm();
5740    if (Widthm1 >= 32 - LSB)
5741      return Error(Operands[5]->getStartLoc(),
5742                   "bitfield width must be in range [1,32-lsb]");
5743    return false;
5744  }
5745  // Notionally handles ARM::tLDMIA_UPD too.
5746  case ARM::tLDMIA: {
5747    // If we're parsing Thumb2, the .w variant is available and handles
5748    // most cases that are normally illegal for a Thumb1 LDM instruction.
5749    // We'll make the transformation in processInstruction() if necessary.
5750    //
5751    // Thumb LDM instructions are writeback iff the base register is not
5752    // in the register list.
5753    unsigned Rn = Inst.getOperand(0).getReg();
5754    bool HasWritebackToken =
5755        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5756         static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
5757    bool ListContainsBase;
5758    if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
5759      return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
5760                   "registers must be in range r0-r7");
5761    // If we should have writeback, then there should be a '!' token.
5762    if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
5763      return Error(Operands[2]->getStartLoc(),
5764                   "writeback operator '!' expected");
5765    // If we should not have writeback, there must not be a '!'. This is
5766    // true even for the 32-bit wide encodings.
5767    if (ListContainsBase && HasWritebackToken)
5768      return Error(Operands[3]->getStartLoc(),
5769                   "writeback operator '!' not allowed when base register "
5770                   "in register list");
5771
5772    break;
5773  }
5774  case ARM::LDMIA_UPD:
5775  case ARM::LDMDB_UPD:
5776  case ARM::LDMIB_UPD:
5777  case ARM::LDMDA_UPD:
5778    // ARM variants loading and updating the same register are only officially
5779    // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
5780    if (!hasV7Ops())
5781      break;
5782    // Fallthrough
5783  case ARM::t2LDMIA_UPD:
5784  case ARM::t2LDMDB_UPD:
5785  case ARM::t2STMIA_UPD:
5786  case ARM::t2STMDB_UPD: {
5787    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5788      return Error(Operands.back()->getStartLoc(),
5789                   "writeback register not allowed in register list");
5790    break;
5791  }
5792  case ARM::sysLDMIA_UPD:
5793  case ARM::sysLDMDA_UPD:
5794  case ARM::sysLDMDB_UPD:
5795  case ARM::sysLDMIB_UPD:
5796    if (!listContainsReg(Inst, 3, ARM::PC))
5797      return Error(Operands[4]->getStartLoc(),
5798                   "writeback register only allowed on system LDM "
5799                   "if PC in register-list");
5800    break;
5801  case ARM::sysSTMIA_UPD:
5802  case ARM::sysSTMDA_UPD:
5803  case ARM::sysSTMDB_UPD:
5804  case ARM::sysSTMIB_UPD:
5805    return Error(Operands[2]->getStartLoc(),
5806                 "system STM cannot have writeback register");
5807  case ARM::tMUL: {
5808    // The second source operand must be the same register as the destination
5809    // operand.
5810    //
5811    // In this case, we must directly check the parsed operands because the
5812    // cvtThumbMultiply() function is written in such a way that it guarantees
5813    // this first statement is always true for the new Inst.  Essentially, the
5814    // destination is unconditionally copied into the second source operand
5815    // without checking to see if it matches what we actually parsed.
5816    if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
5817                                 ((ARMOperand &)*Operands[5]).getReg()) &&
5818        (((ARMOperand &)*Operands[3]).getReg() !=
5819         ((ARMOperand &)*Operands[4]).getReg())) {
5820      return Error(Operands[3]->getStartLoc(),
5821                   "destination register must match source register");
5822    }
5823    break;
5824  }
5825  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5826  // so only issue a diagnostic for thumb1. The instructions will be
5827  // switched to the t2 encodings in processInstruction() if necessary.
5828  case ARM::tPOP: {
5829    bool ListContainsBase;
5830    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
5831        !isThumbTwo())
5832      return Error(Operands[2]->getStartLoc(),
5833                   "registers must be in range r0-r7 or pc");
5834    break;
5835  }
5836  case ARM::tPUSH: {
5837    bool ListContainsBase;
5838    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
5839        !isThumbTwo())
5840      return Error(Operands[2]->getStartLoc(),
5841                   "registers must be in range r0-r7 or lr");
5842    break;
5843  }
5844  case ARM::tSTMIA_UPD: {
5845    bool ListContainsBase, InvalidLowList;
5846    InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
5847                                          0, ListContainsBase);
5848    if (InvalidLowList && !isThumbTwo())
5849      return Error(Operands[4]->getStartLoc(),
5850                   "registers must be in range r0-r7");
5851
5852    // This would be converted to a 32-bit stm, but that's not valid if the
5853    // writeback register is in the list.
5854    if (InvalidLowList && ListContainsBase)
5855      return Error(Operands[4]->getStartLoc(),
5856                   "writeback operator '!' not allowed when base register "
5857                   "in register list");
5858    break;
5859  }
5860  case ARM::tADDrSP: {
5861    // If the non-SP source operand and the destination operand are not the
5862    // same, we need thumb2 (for the wide encoding), or we have an error.
5863    if (!isThumbTwo() &&
5864        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5865      return Error(Operands[4]->getStartLoc(),
5866                   "source register must be the same as destination");
5867    }
5868    break;
5869  }
5870  // Final range checking for Thumb unconditional branch instructions.
5871  case ARM::tB:
5872    if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
5873      return Error(Operands[2]->getStartLoc(), "branch target out of range");
5874    break;
5875  case ARM::t2B: {
5876    int op = (Operands[2]->isImm()) ? 2 : 3;
5877    if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
5878      return Error(Operands[op]->getStartLoc(), "branch target out of range");
5879    break;
5880  }
5881  // Final range checking for Thumb conditional branch instructions.
5882  case ARM::tBcc:
5883    if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
5884      return Error(Operands[2]->getStartLoc(), "branch target out of range");
5885    break;
5886  case ARM::t2Bcc: {
5887    int Op = (Operands[2]->isImm()) ? 2 : 3;
5888    if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
5889      return Error(Operands[Op]->getStartLoc(), "branch target out of range");
5890    break;
5891  }
5892  case ARM::MOVi16:
5893  case ARM::t2MOVi16:
5894  case ARM::t2MOVTi16:
5895    {
5896    // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
5897    // especially when we turn it into a movw and the expression <symbol> does
5898    // not have a :lower16: or :upper16 as part of the expression.  We don't
5899    // want the behavior of silently truncating, which can be unexpected and
5900    // lead to bugs that are difficult to find since this is an easy mistake
5901    // to make.
5902    int i = (Operands[3]->isImm()) ? 3 : 4;
5903    ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
5904    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5905    if (CE) break;
5906    const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
5907    if (!E) break;
5908    const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
5909    if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
5910                       ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
5911      return Error(
5912          Op.getStartLoc(),
5913          "immediate expression for mov requires :lower16: or :upper16");
5914    break;
5915  }
5916  }
5917
5918  return false;
5919}
5920
5921static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5922  switch(Opc) {
5923  default: llvm_unreachable("unexpected opcode!");
5924  // VST1LN
5925  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5926  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5927  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5928  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5929  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5930  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5931  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5932  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5933  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5934
5935  // VST2LN
5936  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5937  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5938  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5939  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5940  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5941
5942  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5943  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5944  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5945  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5946  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5947
5948  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5949  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5950  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5951  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5952  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5953
5954  // VST3LN
5955  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5956  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5957  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5958  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5959  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5960  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5961  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5962  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5963  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5964  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5965  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5966  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5967  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5968  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5969  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5970
5971  // VST3
5972  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5973  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5974  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5975  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5976  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5977  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5978  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5979  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5980  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5981  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5982  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5983  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5984  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5985  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5986  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5987  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5988  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5989  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5990
5991  // VST4LN
5992  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5993  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5994  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5995  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5996  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5997  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5998  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5999  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6000  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6001  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6002  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
6003  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6004  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6005  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6006  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6007
6008  // VST4
6009  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6010  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6011  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6012  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6013  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6014  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6015  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6016  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6017  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6018  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6019  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6020  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6021  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
6022  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6023  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6024  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
6025  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6026  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6027  }
6028}
6029
6030static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6031  switch(Opc) {
6032  default: llvm_unreachable("unexpected opcode!");
6033  // VLD1LN
6034  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6035  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6036  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6037  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6038  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6039  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6040  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
6041  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6042  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6043
6044  // VLD2LN
6045  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6046  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6047  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6048  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6049  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6050  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6051  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6052  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6053  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6054  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6055  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
6056  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6057  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6058  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6059  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6060
6061  // VLD3DUP
6062  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6063  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6064  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6065  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6066  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6067  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6068  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6069  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6070  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6071  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6072  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6073  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6074  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
6075  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6076  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6077  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6078  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6079  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6080
6081  // VLD3LN
6082  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6083  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6084  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6085  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6086  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6087  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6088  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6089  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6090  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6091  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6092  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
6093  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6094  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6095  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6096  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6097
6098  // VLD3
6099  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6100  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6101  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6102  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6103  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6104  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6105  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6106  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6107  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6108  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6109  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6110  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6111  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
6112  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6113  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6114  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
6115  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6116  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6117
6118  // VLD4LN
6119  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6120  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6121  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6122  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6123  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6124  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6125  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6126  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6127  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6128  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6129  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
6130  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6131  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6132  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6133  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6134
6135  // VLD4DUP
6136  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6137  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6138  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6139  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6140  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6141  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6142  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6143  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6144  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6145  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6146  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6147  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6148  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
6149  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6150  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6151  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6152  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6153  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6154
6155  // VLD4
6156  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6157  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6158  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6159  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6160  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6161  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6162  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6163  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6164  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6165  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6166  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6167  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6168  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
6169  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6170  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6171  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
6172  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6173  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6174  }
6175}
6176
6177bool ARMAsmParser::processInstruction(MCInst &Inst,
6178                                      const OperandVector &Operands,
6179                                      MCStreamer &Out) {
6180  switch (Inst.getOpcode()) {
6181  // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6182  case ARM::LDRT_POST:
6183  case ARM::LDRBT_POST: {
6184    const unsigned Opcode =
6185      (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6186                                           : ARM::LDRBT_POST_IMM;
6187    MCInst TmpInst;
6188    TmpInst.setOpcode(Opcode);
6189    TmpInst.addOperand(Inst.getOperand(0));
6190    TmpInst.addOperand(Inst.getOperand(1));
6191    TmpInst.addOperand(Inst.getOperand(1));
6192    TmpInst.addOperand(MCOperand::CreateReg(0));
6193    TmpInst.addOperand(MCOperand::CreateImm(0));
6194    TmpInst.addOperand(Inst.getOperand(2));
6195    TmpInst.addOperand(Inst.getOperand(3));
6196    Inst = TmpInst;
6197    return true;
6198  }
6199  // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6200  case ARM::STRT_POST:
6201  case ARM::STRBT_POST: {
6202    const unsigned Opcode =
6203      (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6204                                           : ARM::STRBT_POST_IMM;
6205    MCInst TmpInst;
6206    TmpInst.setOpcode(Opcode);
6207    TmpInst.addOperand(Inst.getOperand(1));
6208    TmpInst.addOperand(Inst.getOperand(0));
6209    TmpInst.addOperand(Inst.getOperand(1));
6210    TmpInst.addOperand(MCOperand::CreateReg(0));
6211    TmpInst.addOperand(MCOperand::CreateImm(0));
6212    TmpInst.addOperand(Inst.getOperand(2));
6213    TmpInst.addOperand(Inst.getOperand(3));
6214    Inst = TmpInst;
6215    return true;
6216  }
6217  // Alias for alternate form of 'ADR Rd, #imm' instruction.
6218  case ARM::ADDri: {
6219    if (Inst.getOperand(1).getReg() != ARM::PC ||
6220        Inst.getOperand(5).getReg() != 0 ||
6221        !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6222      return false;
6223    MCInst TmpInst;
6224    TmpInst.setOpcode(ARM::ADR);
6225    TmpInst.addOperand(Inst.getOperand(0));
6226    if (Inst.getOperand(2).isImm()) {
6227      TmpInst.addOperand(Inst.getOperand(2));
6228    } else {
6229      // Turn PC-relative expression into absolute expression.
6230      // Reading PC provides the start of the current instruction + 8 and
6231      // the transform to adr is biased by that.
6232      MCSymbol *Dot = getContext().CreateTempSymbol();
6233      Out.EmitLabel(Dot);
6234      const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6235      const MCExpr *InstPC = MCSymbolRefExpr::Create(Dot,
6236                                                     MCSymbolRefExpr::VK_None,
6237                                                     getContext());
6238      const MCExpr *Const8 = MCConstantExpr::Create(8, getContext());
6239      const MCExpr *ReadPC = MCBinaryExpr::CreateAdd(InstPC, Const8,
6240                                                     getContext());
6241      const MCExpr *FixupAddr = MCBinaryExpr::CreateAdd(ReadPC, OpExpr,
6242                                                        getContext());
6243      TmpInst.addOperand(MCOperand::CreateExpr(FixupAddr));
6244    }
6245    TmpInst.addOperand(Inst.getOperand(3));
6246    TmpInst.addOperand(Inst.getOperand(4));
6247    Inst = TmpInst;
6248    return true;
6249  }
6250  // Aliases for alternate PC+imm syntax of LDR instructions.
6251  case ARM::t2LDRpcrel:
6252    // Select the narrow version if the immediate will fit.
6253    if (Inst.getOperand(1).getImm() > 0 &&
6254        Inst.getOperand(1).getImm() <= 0xff &&
6255        !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
6256          static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
6257      Inst.setOpcode(ARM::tLDRpci);
6258    else
6259      Inst.setOpcode(ARM::t2LDRpci);
6260    return true;
6261  case ARM::t2LDRBpcrel:
6262    Inst.setOpcode(ARM::t2LDRBpci);
6263    return true;
6264  case ARM::t2LDRHpcrel:
6265    Inst.setOpcode(ARM::t2LDRHpci);
6266    return true;
6267  case ARM::t2LDRSBpcrel:
6268    Inst.setOpcode(ARM::t2LDRSBpci);
6269    return true;
6270  case ARM::t2LDRSHpcrel:
6271    Inst.setOpcode(ARM::t2LDRSHpci);
6272    return true;
6273  // Handle NEON VST complex aliases.
6274  case ARM::VST1LNdWB_register_Asm_8:
6275  case ARM::VST1LNdWB_register_Asm_16:
6276  case ARM::VST1LNdWB_register_Asm_32: {
6277    MCInst TmpInst;
6278    // Shuffle the operands around so the lane index operand is in the
6279    // right place.
6280    unsigned Spacing;
6281    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6282    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6283    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6284    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6285    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6286    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6287    TmpInst.addOperand(Inst.getOperand(1)); // lane
6288    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6289    TmpInst.addOperand(Inst.getOperand(6));
6290    Inst = TmpInst;
6291    return true;
6292  }
6293
6294  case ARM::VST2LNdWB_register_Asm_8:
6295  case ARM::VST2LNdWB_register_Asm_16:
6296  case ARM::VST2LNdWB_register_Asm_32:
6297  case ARM::VST2LNqWB_register_Asm_16:
6298  case ARM::VST2LNqWB_register_Asm_32: {
6299    MCInst TmpInst;
6300    // Shuffle the operands around so the lane index operand is in the
6301    // right place.
6302    unsigned Spacing;
6303    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6304    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6305    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6306    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6307    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6308    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6309    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6310                                            Spacing));
6311    TmpInst.addOperand(Inst.getOperand(1)); // lane
6312    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6313    TmpInst.addOperand(Inst.getOperand(6));
6314    Inst = TmpInst;
6315    return true;
6316  }
6317
6318  case ARM::VST3LNdWB_register_Asm_8:
6319  case ARM::VST3LNdWB_register_Asm_16:
6320  case ARM::VST3LNdWB_register_Asm_32:
6321  case ARM::VST3LNqWB_register_Asm_16:
6322  case ARM::VST3LNqWB_register_Asm_32: {
6323    MCInst TmpInst;
6324    // Shuffle the operands around so the lane index operand is in the
6325    // right place.
6326    unsigned Spacing;
6327    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6328    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6329    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6330    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6331    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6332    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6333    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6334                                            Spacing));
6335    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6336                                            Spacing * 2));
6337    TmpInst.addOperand(Inst.getOperand(1)); // lane
6338    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6339    TmpInst.addOperand(Inst.getOperand(6));
6340    Inst = TmpInst;
6341    return true;
6342  }
6343
6344  case ARM::VST4LNdWB_register_Asm_8:
6345  case ARM::VST4LNdWB_register_Asm_16:
6346  case ARM::VST4LNdWB_register_Asm_32:
6347  case ARM::VST4LNqWB_register_Asm_16:
6348  case ARM::VST4LNqWB_register_Asm_32: {
6349    MCInst TmpInst;
6350    // Shuffle the operands around so the lane index operand is in the
6351    // right place.
6352    unsigned Spacing;
6353    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6354    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6355    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6356    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6357    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6358    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6359    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6360                                            Spacing));
6361    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6362                                            Spacing * 2));
6363    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6364                                            Spacing * 3));
6365    TmpInst.addOperand(Inst.getOperand(1)); // lane
6366    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6367    TmpInst.addOperand(Inst.getOperand(6));
6368    Inst = TmpInst;
6369    return true;
6370  }
6371
6372  case ARM::VST1LNdWB_fixed_Asm_8:
6373  case ARM::VST1LNdWB_fixed_Asm_16:
6374  case ARM::VST1LNdWB_fixed_Asm_32: {
6375    MCInst TmpInst;
6376    // Shuffle the operands around so the lane index operand is in the
6377    // right place.
6378    unsigned Spacing;
6379    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6380    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6381    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6382    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6383    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6384    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6385    TmpInst.addOperand(Inst.getOperand(1)); // lane
6386    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6387    TmpInst.addOperand(Inst.getOperand(5));
6388    Inst = TmpInst;
6389    return true;
6390  }
6391
6392  case ARM::VST2LNdWB_fixed_Asm_8:
6393  case ARM::VST2LNdWB_fixed_Asm_16:
6394  case ARM::VST2LNdWB_fixed_Asm_32:
6395  case ARM::VST2LNqWB_fixed_Asm_16:
6396  case ARM::VST2LNqWB_fixed_Asm_32: {
6397    MCInst TmpInst;
6398    // Shuffle the operands around so the lane index operand is in the
6399    // right place.
6400    unsigned Spacing;
6401    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6402    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6403    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6404    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6405    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6406    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6407    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6408                                            Spacing));
6409    TmpInst.addOperand(Inst.getOperand(1)); // lane
6410    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6411    TmpInst.addOperand(Inst.getOperand(5));
6412    Inst = TmpInst;
6413    return true;
6414  }
6415
6416  case ARM::VST3LNdWB_fixed_Asm_8:
6417  case ARM::VST3LNdWB_fixed_Asm_16:
6418  case ARM::VST3LNdWB_fixed_Asm_32:
6419  case ARM::VST3LNqWB_fixed_Asm_16:
6420  case ARM::VST3LNqWB_fixed_Asm_32: {
6421    MCInst TmpInst;
6422    // Shuffle the operands around so the lane index operand is in the
6423    // right place.
6424    unsigned Spacing;
6425    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6426    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6427    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6428    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6429    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6430    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6431    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6432                                            Spacing));
6433    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6434                                            Spacing * 2));
6435    TmpInst.addOperand(Inst.getOperand(1)); // lane
6436    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6437    TmpInst.addOperand(Inst.getOperand(5));
6438    Inst = TmpInst;
6439    return true;
6440  }
6441
6442  case ARM::VST4LNdWB_fixed_Asm_8:
6443  case ARM::VST4LNdWB_fixed_Asm_16:
6444  case ARM::VST4LNdWB_fixed_Asm_32:
6445  case ARM::VST4LNqWB_fixed_Asm_16:
6446  case ARM::VST4LNqWB_fixed_Asm_32: {
6447    MCInst TmpInst;
6448    // Shuffle the operands around so the lane index operand is in the
6449    // right place.
6450    unsigned Spacing;
6451    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6452    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6453    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6454    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6455    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6456    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6457    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6458                                            Spacing));
6459    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6460                                            Spacing * 2));
6461    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6462                                            Spacing * 3));
6463    TmpInst.addOperand(Inst.getOperand(1)); // lane
6464    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6465    TmpInst.addOperand(Inst.getOperand(5));
6466    Inst = TmpInst;
6467    return true;
6468  }
6469
6470  case ARM::VST1LNdAsm_8:
6471  case ARM::VST1LNdAsm_16:
6472  case ARM::VST1LNdAsm_32: {
6473    MCInst TmpInst;
6474    // Shuffle the operands around so the lane index operand is in the
6475    // right place.
6476    unsigned Spacing;
6477    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6478    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6479    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6480    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6481    TmpInst.addOperand(Inst.getOperand(1)); // lane
6482    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6483    TmpInst.addOperand(Inst.getOperand(5));
6484    Inst = TmpInst;
6485    return true;
6486  }
6487
6488  case ARM::VST2LNdAsm_8:
6489  case ARM::VST2LNdAsm_16:
6490  case ARM::VST2LNdAsm_32:
6491  case ARM::VST2LNqAsm_16:
6492  case ARM::VST2LNqAsm_32: {
6493    MCInst TmpInst;
6494    // Shuffle the operands around so the lane index operand is in the
6495    // right place.
6496    unsigned Spacing;
6497    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6498    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6499    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6500    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6501    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6502                                            Spacing));
6503    TmpInst.addOperand(Inst.getOperand(1)); // lane
6504    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6505    TmpInst.addOperand(Inst.getOperand(5));
6506    Inst = TmpInst;
6507    return true;
6508  }
6509
6510  case ARM::VST3LNdAsm_8:
6511  case ARM::VST3LNdAsm_16:
6512  case ARM::VST3LNdAsm_32:
6513  case ARM::VST3LNqAsm_16:
6514  case ARM::VST3LNqAsm_32: {
6515    MCInst TmpInst;
6516    // Shuffle the operands around so the lane index operand is in the
6517    // right place.
6518    unsigned Spacing;
6519    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6520    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6521    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6522    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6523    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6524                                            Spacing));
6525    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6526                                            Spacing * 2));
6527    TmpInst.addOperand(Inst.getOperand(1)); // lane
6528    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6529    TmpInst.addOperand(Inst.getOperand(5));
6530    Inst = TmpInst;
6531    return true;
6532  }
6533
6534  case ARM::VST4LNdAsm_8:
6535  case ARM::VST4LNdAsm_16:
6536  case ARM::VST4LNdAsm_32:
6537  case ARM::VST4LNqAsm_16:
6538  case ARM::VST4LNqAsm_32: {
6539    MCInst TmpInst;
6540    // Shuffle the operands around so the lane index operand is in the
6541    // right place.
6542    unsigned Spacing;
6543    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6544    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6545    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6546    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6547    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6548                                            Spacing));
6549    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6550                                            Spacing * 2));
6551    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6552                                            Spacing * 3));
6553    TmpInst.addOperand(Inst.getOperand(1)); // lane
6554    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6555    TmpInst.addOperand(Inst.getOperand(5));
6556    Inst = TmpInst;
6557    return true;
6558  }
6559
6560  // Handle NEON VLD complex aliases.
6561  case ARM::VLD1LNdWB_register_Asm_8:
6562  case ARM::VLD1LNdWB_register_Asm_16:
6563  case ARM::VLD1LNdWB_register_Asm_32: {
6564    MCInst TmpInst;
6565    // Shuffle the operands around so the lane index operand is in the
6566    // right place.
6567    unsigned Spacing;
6568    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6569    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6570    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6571    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6572    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6573    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6574    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6575    TmpInst.addOperand(Inst.getOperand(1)); // lane
6576    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6577    TmpInst.addOperand(Inst.getOperand(6));
6578    Inst = TmpInst;
6579    return true;
6580  }
6581
6582  case ARM::VLD2LNdWB_register_Asm_8:
6583  case ARM::VLD2LNdWB_register_Asm_16:
6584  case ARM::VLD2LNdWB_register_Asm_32:
6585  case ARM::VLD2LNqWB_register_Asm_16:
6586  case ARM::VLD2LNqWB_register_Asm_32: {
6587    MCInst TmpInst;
6588    // Shuffle the operands around so the lane index operand is in the
6589    // right place.
6590    unsigned Spacing;
6591    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6592    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6593    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6594                                            Spacing));
6595    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6596    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6597    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6598    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6599    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6600    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6601                                            Spacing));
6602    TmpInst.addOperand(Inst.getOperand(1)); // lane
6603    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6604    TmpInst.addOperand(Inst.getOperand(6));
6605    Inst = TmpInst;
6606    return true;
6607  }
6608
6609  case ARM::VLD3LNdWB_register_Asm_8:
6610  case ARM::VLD3LNdWB_register_Asm_16:
6611  case ARM::VLD3LNdWB_register_Asm_32:
6612  case ARM::VLD3LNqWB_register_Asm_16:
6613  case ARM::VLD3LNqWB_register_Asm_32: {
6614    MCInst TmpInst;
6615    // Shuffle the operands around so the lane index operand is in the
6616    // right place.
6617    unsigned Spacing;
6618    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6619    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6620    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6621                                            Spacing));
6622    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6623                                            Spacing * 2));
6624    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6625    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6626    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6627    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6628    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6630                                            Spacing));
6631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6632                                            Spacing * 2));
6633    TmpInst.addOperand(Inst.getOperand(1)); // lane
6634    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6635    TmpInst.addOperand(Inst.getOperand(6));
6636    Inst = TmpInst;
6637    return true;
6638  }
6639
6640  case ARM::VLD4LNdWB_register_Asm_8:
6641  case ARM::VLD4LNdWB_register_Asm_16:
6642  case ARM::VLD4LNdWB_register_Asm_32:
6643  case ARM::VLD4LNqWB_register_Asm_16:
6644  case ARM::VLD4LNqWB_register_Asm_32: {
6645    MCInst TmpInst;
6646    // Shuffle the operands around so the lane index operand is in the
6647    // right place.
6648    unsigned Spacing;
6649    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6650    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6651    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6652                                            Spacing));
6653    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6654                                            Spacing * 2));
6655    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6656                                            Spacing * 3));
6657    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6658    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6659    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6660    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6661    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6662    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6663                                            Spacing));
6664    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6665                                            Spacing * 2));
6666    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6667                                            Spacing * 3));
6668    TmpInst.addOperand(Inst.getOperand(1)); // lane
6669    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6670    TmpInst.addOperand(Inst.getOperand(6));
6671    Inst = TmpInst;
6672    return true;
6673  }
6674
6675  case ARM::VLD1LNdWB_fixed_Asm_8:
6676  case ARM::VLD1LNdWB_fixed_Asm_16:
6677  case ARM::VLD1LNdWB_fixed_Asm_32: {
6678    MCInst TmpInst;
6679    // Shuffle the operands around so the lane index operand is in the
6680    // right place.
6681    unsigned Spacing;
6682    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6683    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6684    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6685    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6686    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6687    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6688    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6689    TmpInst.addOperand(Inst.getOperand(1)); // lane
6690    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6691    TmpInst.addOperand(Inst.getOperand(5));
6692    Inst = TmpInst;
6693    return true;
6694  }
6695
6696  case ARM::VLD2LNdWB_fixed_Asm_8:
6697  case ARM::VLD2LNdWB_fixed_Asm_16:
6698  case ARM::VLD2LNdWB_fixed_Asm_32:
6699  case ARM::VLD2LNqWB_fixed_Asm_16:
6700  case ARM::VLD2LNqWB_fixed_Asm_32: {
6701    MCInst TmpInst;
6702    // Shuffle the operands around so the lane index operand is in the
6703    // right place.
6704    unsigned Spacing;
6705    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6706    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6707    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6708                                            Spacing));
6709    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6710    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6711    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6712    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6713    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6714    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6715                                            Spacing));
6716    TmpInst.addOperand(Inst.getOperand(1)); // lane
6717    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6718    TmpInst.addOperand(Inst.getOperand(5));
6719    Inst = TmpInst;
6720    return true;
6721  }
6722
6723  case ARM::VLD3LNdWB_fixed_Asm_8:
6724  case ARM::VLD3LNdWB_fixed_Asm_16:
6725  case ARM::VLD3LNdWB_fixed_Asm_32:
6726  case ARM::VLD3LNqWB_fixed_Asm_16:
6727  case ARM::VLD3LNqWB_fixed_Asm_32: {
6728    MCInst TmpInst;
6729    // Shuffle the operands around so the lane index operand is in the
6730    // right place.
6731    unsigned Spacing;
6732    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6733    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6734    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6735                                            Spacing));
6736    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6737                                            Spacing * 2));
6738    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6739    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6740    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6741    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6742    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6743    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6744                                            Spacing));
6745    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6746                                            Spacing * 2));
6747    TmpInst.addOperand(Inst.getOperand(1)); // lane
6748    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6749    TmpInst.addOperand(Inst.getOperand(5));
6750    Inst = TmpInst;
6751    return true;
6752  }
6753
6754  case ARM::VLD4LNdWB_fixed_Asm_8:
6755  case ARM::VLD4LNdWB_fixed_Asm_16:
6756  case ARM::VLD4LNdWB_fixed_Asm_32:
6757  case ARM::VLD4LNqWB_fixed_Asm_16:
6758  case ARM::VLD4LNqWB_fixed_Asm_32: {
6759    MCInst TmpInst;
6760    // Shuffle the operands around so the lane index operand is in the
6761    // right place.
6762    unsigned Spacing;
6763    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6764    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6765    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6766                                            Spacing));
6767    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6768                                            Spacing * 2));
6769    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6770                                            Spacing * 3));
6771    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6772    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6773    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6774    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6775    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6776    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6777                                            Spacing));
6778    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6779                                            Spacing * 2));
6780    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6781                                            Spacing * 3));
6782    TmpInst.addOperand(Inst.getOperand(1)); // lane
6783    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6784    TmpInst.addOperand(Inst.getOperand(5));
6785    Inst = TmpInst;
6786    return true;
6787  }
6788
6789  case ARM::VLD1LNdAsm_8:
6790  case ARM::VLD1LNdAsm_16:
6791  case ARM::VLD1LNdAsm_32: {
6792    MCInst TmpInst;
6793    // Shuffle the operands around so the lane index operand is in the
6794    // right place.
6795    unsigned Spacing;
6796    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6797    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6798    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6799    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6800    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6801    TmpInst.addOperand(Inst.getOperand(1)); // lane
6802    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6803    TmpInst.addOperand(Inst.getOperand(5));
6804    Inst = TmpInst;
6805    return true;
6806  }
6807
6808  case ARM::VLD2LNdAsm_8:
6809  case ARM::VLD2LNdAsm_16:
6810  case ARM::VLD2LNdAsm_32:
6811  case ARM::VLD2LNqAsm_16:
6812  case ARM::VLD2LNqAsm_32: {
6813    MCInst TmpInst;
6814    // Shuffle the operands around so the lane index operand is in the
6815    // right place.
6816    unsigned Spacing;
6817    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6818    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6819    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6820                                            Spacing));
6821    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6822    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6823    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6824    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6825                                            Spacing));
6826    TmpInst.addOperand(Inst.getOperand(1)); // lane
6827    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6828    TmpInst.addOperand(Inst.getOperand(5));
6829    Inst = TmpInst;
6830    return true;
6831  }
6832
6833  case ARM::VLD3LNdAsm_8:
6834  case ARM::VLD3LNdAsm_16:
6835  case ARM::VLD3LNdAsm_32:
6836  case ARM::VLD3LNqAsm_16:
6837  case ARM::VLD3LNqAsm_32: {
6838    MCInst TmpInst;
6839    // Shuffle the operands around so the lane index operand is in the
6840    // right place.
6841    unsigned Spacing;
6842    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6843    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6844    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6845                                            Spacing));
6846    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6847                                            Spacing * 2));
6848    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6849    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6850    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6851    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6852                                            Spacing));
6853    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6854                                            Spacing * 2));
6855    TmpInst.addOperand(Inst.getOperand(1)); // lane
6856    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6857    TmpInst.addOperand(Inst.getOperand(5));
6858    Inst = TmpInst;
6859    return true;
6860  }
6861
6862  case ARM::VLD4LNdAsm_8:
6863  case ARM::VLD4LNdAsm_16:
6864  case ARM::VLD4LNdAsm_32:
6865  case ARM::VLD4LNqAsm_16:
6866  case ARM::VLD4LNqAsm_32: {
6867    MCInst TmpInst;
6868    // Shuffle the operands around so the lane index operand is in the
6869    // right place.
6870    unsigned Spacing;
6871    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6872    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6873    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6874                                            Spacing));
6875    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6876                                            Spacing * 2));
6877    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6878                                            Spacing * 3));
6879    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6880    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6881    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6882    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6883                                            Spacing));
6884    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6885                                            Spacing * 2));
6886    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6887                                            Spacing * 3));
6888    TmpInst.addOperand(Inst.getOperand(1)); // lane
6889    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6890    TmpInst.addOperand(Inst.getOperand(5));
6891    Inst = TmpInst;
6892    return true;
6893  }
6894
6895  // VLD3DUP single 3-element structure to all lanes instructions.
6896  case ARM::VLD3DUPdAsm_8:
6897  case ARM::VLD3DUPdAsm_16:
6898  case ARM::VLD3DUPdAsm_32:
6899  case ARM::VLD3DUPqAsm_8:
6900  case ARM::VLD3DUPqAsm_16:
6901  case ARM::VLD3DUPqAsm_32: {
6902    MCInst TmpInst;
6903    unsigned Spacing;
6904    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6905    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6906    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6907                                            Spacing));
6908    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6909                                            Spacing * 2));
6910    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6911    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6912    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6913    TmpInst.addOperand(Inst.getOperand(4));
6914    Inst = TmpInst;
6915    return true;
6916  }
6917
6918  case ARM::VLD3DUPdWB_fixed_Asm_8:
6919  case ARM::VLD3DUPdWB_fixed_Asm_16:
6920  case ARM::VLD3DUPdWB_fixed_Asm_32:
6921  case ARM::VLD3DUPqWB_fixed_Asm_8:
6922  case ARM::VLD3DUPqWB_fixed_Asm_16:
6923  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6924    MCInst TmpInst;
6925    unsigned Spacing;
6926    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6927    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6928    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6929                                            Spacing));
6930    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6931                                            Spacing * 2));
6932    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6933    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6934    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6935    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6936    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6937    TmpInst.addOperand(Inst.getOperand(4));
6938    Inst = TmpInst;
6939    return true;
6940  }
6941
6942  case ARM::VLD3DUPdWB_register_Asm_8:
6943  case ARM::VLD3DUPdWB_register_Asm_16:
6944  case ARM::VLD3DUPdWB_register_Asm_32:
6945  case ARM::VLD3DUPqWB_register_Asm_8:
6946  case ARM::VLD3DUPqWB_register_Asm_16:
6947  case ARM::VLD3DUPqWB_register_Asm_32: {
6948    MCInst TmpInst;
6949    unsigned Spacing;
6950    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6951    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6952    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6953                                            Spacing));
6954    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6955                                            Spacing * 2));
6956    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6957    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6958    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6959    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6960    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6961    TmpInst.addOperand(Inst.getOperand(5));
6962    Inst = TmpInst;
6963    return true;
6964  }
6965
6966  // VLD3 multiple 3-element structure instructions.
6967  case ARM::VLD3dAsm_8:
6968  case ARM::VLD3dAsm_16:
6969  case ARM::VLD3dAsm_32:
6970  case ARM::VLD3qAsm_8:
6971  case ARM::VLD3qAsm_16:
6972  case ARM::VLD3qAsm_32: {
6973    MCInst TmpInst;
6974    unsigned Spacing;
6975    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6976    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6977    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6978                                            Spacing));
6979    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6980                                            Spacing * 2));
6981    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6982    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6983    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6984    TmpInst.addOperand(Inst.getOperand(4));
6985    Inst = TmpInst;
6986    return true;
6987  }
6988
6989  case ARM::VLD3dWB_fixed_Asm_8:
6990  case ARM::VLD3dWB_fixed_Asm_16:
6991  case ARM::VLD3dWB_fixed_Asm_32:
6992  case ARM::VLD3qWB_fixed_Asm_8:
6993  case ARM::VLD3qWB_fixed_Asm_16:
6994  case ARM::VLD3qWB_fixed_Asm_32: {
6995    MCInst TmpInst;
6996    unsigned Spacing;
6997    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6998    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6999    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7000                                            Spacing));
7001    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7002                                            Spacing * 2));
7003    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7004    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7005    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7006    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7007    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7008    TmpInst.addOperand(Inst.getOperand(4));
7009    Inst = TmpInst;
7010    return true;
7011  }
7012
7013  case ARM::VLD3dWB_register_Asm_8:
7014  case ARM::VLD3dWB_register_Asm_16:
7015  case ARM::VLD3dWB_register_Asm_32:
7016  case ARM::VLD3qWB_register_Asm_8:
7017  case ARM::VLD3qWB_register_Asm_16:
7018  case ARM::VLD3qWB_register_Asm_32: {
7019    MCInst TmpInst;
7020    unsigned Spacing;
7021    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7022    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7023    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7024                                            Spacing));
7025    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7026                                            Spacing * 2));
7027    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7028    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7029    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7030    TmpInst.addOperand(Inst.getOperand(3)); // Rm
7031    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7032    TmpInst.addOperand(Inst.getOperand(5));
7033    Inst = TmpInst;
7034    return true;
7035  }
7036
7037  // VLD4DUP single 3-element structure to all lanes instructions.
7038  case ARM::VLD4DUPdAsm_8:
7039  case ARM::VLD4DUPdAsm_16:
7040  case ARM::VLD4DUPdAsm_32:
7041  case ARM::VLD4DUPqAsm_8:
7042  case ARM::VLD4DUPqAsm_16:
7043  case ARM::VLD4DUPqAsm_32: {
7044    MCInst TmpInst;
7045    unsigned Spacing;
7046    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7047    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7049                                            Spacing));
7050    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7051                                            Spacing * 2));
7052    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7053                                            Spacing * 3));
7054    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7055    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7056    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7057    TmpInst.addOperand(Inst.getOperand(4));
7058    Inst = TmpInst;
7059    return true;
7060  }
7061
7062  case ARM::VLD4DUPdWB_fixed_Asm_8:
7063  case ARM::VLD4DUPdWB_fixed_Asm_16:
7064  case ARM::VLD4DUPdWB_fixed_Asm_32:
7065  case ARM::VLD4DUPqWB_fixed_Asm_8:
7066  case ARM::VLD4DUPqWB_fixed_Asm_16:
7067  case ARM::VLD4DUPqWB_fixed_Asm_32: {
7068    MCInst TmpInst;
7069    unsigned Spacing;
7070    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7071    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7072    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7073                                            Spacing));
7074    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7075                                            Spacing * 2));
7076    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7077                                            Spacing * 3));
7078    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7079    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7080    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7081    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7082    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7083    TmpInst.addOperand(Inst.getOperand(4));
7084    Inst = TmpInst;
7085    return true;
7086  }
7087
7088  case ARM::VLD4DUPdWB_register_Asm_8:
7089  case ARM::VLD4DUPdWB_register_Asm_16:
7090  case ARM::VLD4DUPdWB_register_Asm_32:
7091  case ARM::VLD4DUPqWB_register_Asm_8:
7092  case ARM::VLD4DUPqWB_register_Asm_16:
7093  case ARM::VLD4DUPqWB_register_Asm_32: {
7094    MCInst TmpInst;
7095    unsigned Spacing;
7096    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7097    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7098    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7099                                            Spacing));
7100    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7101                                            Spacing * 2));
7102    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7103                                            Spacing * 3));
7104    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7105    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7106    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7107    TmpInst.addOperand(Inst.getOperand(3)); // Rm
7108    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7109    TmpInst.addOperand(Inst.getOperand(5));
7110    Inst = TmpInst;
7111    return true;
7112  }
7113
7114  // VLD4 multiple 4-element structure instructions.
7115  case ARM::VLD4dAsm_8:
7116  case ARM::VLD4dAsm_16:
7117  case ARM::VLD4dAsm_32:
7118  case ARM::VLD4qAsm_8:
7119  case ARM::VLD4qAsm_16:
7120  case ARM::VLD4qAsm_32: {
7121    MCInst TmpInst;
7122    unsigned Spacing;
7123    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7124    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7125    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7126                                            Spacing));
7127    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7128                                            Spacing * 2));
7129    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7130                                            Spacing * 3));
7131    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7132    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7133    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7134    TmpInst.addOperand(Inst.getOperand(4));
7135    Inst = TmpInst;
7136    return true;
7137  }
7138
7139  case ARM::VLD4dWB_fixed_Asm_8:
7140  case ARM::VLD4dWB_fixed_Asm_16:
7141  case ARM::VLD4dWB_fixed_Asm_32:
7142  case ARM::VLD4qWB_fixed_Asm_8:
7143  case ARM::VLD4qWB_fixed_Asm_16:
7144  case ARM::VLD4qWB_fixed_Asm_32: {
7145    MCInst TmpInst;
7146    unsigned Spacing;
7147    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7148    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7149    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7150                                            Spacing));
7151    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7152                                            Spacing * 2));
7153    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7154                                            Spacing * 3));
7155    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7156    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7157    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7158    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7159    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7160    TmpInst.addOperand(Inst.getOperand(4));
7161    Inst = TmpInst;
7162    return true;
7163  }
7164
7165  case ARM::VLD4dWB_register_Asm_8:
7166  case ARM::VLD4dWB_register_Asm_16:
7167  case ARM::VLD4dWB_register_Asm_32:
7168  case ARM::VLD4qWB_register_Asm_8:
7169  case ARM::VLD4qWB_register_Asm_16:
7170  case ARM::VLD4qWB_register_Asm_32: {
7171    MCInst TmpInst;
7172    unsigned Spacing;
7173    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7174    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7175    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7176                                            Spacing));
7177    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7178                                            Spacing * 2));
7179    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7180                                            Spacing * 3));
7181    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7182    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7183    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7184    TmpInst.addOperand(Inst.getOperand(3)); // Rm
7185    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7186    TmpInst.addOperand(Inst.getOperand(5));
7187    Inst = TmpInst;
7188    return true;
7189  }
7190
7191  // VST3 multiple 3-element structure instructions.
7192  case ARM::VST3dAsm_8:
7193  case ARM::VST3dAsm_16:
7194  case ARM::VST3dAsm_32:
7195  case ARM::VST3qAsm_8:
7196  case ARM::VST3qAsm_16:
7197  case ARM::VST3qAsm_32: {
7198    MCInst TmpInst;
7199    unsigned Spacing;
7200    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7201    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7202    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7203    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7204    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7205                                            Spacing));
7206    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7207                                            Spacing * 2));
7208    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7209    TmpInst.addOperand(Inst.getOperand(4));
7210    Inst = TmpInst;
7211    return true;
7212  }
7213
7214  case ARM::VST3dWB_fixed_Asm_8:
7215  case ARM::VST3dWB_fixed_Asm_16:
7216  case ARM::VST3dWB_fixed_Asm_32:
7217  case ARM::VST3qWB_fixed_Asm_8:
7218  case ARM::VST3qWB_fixed_Asm_16:
7219  case ARM::VST3qWB_fixed_Asm_32: {
7220    MCInst TmpInst;
7221    unsigned Spacing;
7222    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7223    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7224    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7225    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7226    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7227    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7228    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7229                                            Spacing));
7230    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7231                                            Spacing * 2));
7232    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7233    TmpInst.addOperand(Inst.getOperand(4));
7234    Inst = TmpInst;
7235    return true;
7236  }
7237
7238  case ARM::VST3dWB_register_Asm_8:
7239  case ARM::VST3dWB_register_Asm_16:
7240  case ARM::VST3dWB_register_Asm_32:
7241  case ARM::VST3qWB_register_Asm_8:
7242  case ARM::VST3qWB_register_Asm_16:
7243  case ARM::VST3qWB_register_Asm_32: {
7244    MCInst TmpInst;
7245    unsigned Spacing;
7246    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7247    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7248    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7249    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7250    TmpInst.addOperand(Inst.getOperand(3)); // Rm
7251    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7252    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7253                                            Spacing));
7254    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7255                                            Spacing * 2));
7256    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7257    TmpInst.addOperand(Inst.getOperand(5));
7258    Inst = TmpInst;
7259    return true;
7260  }
7261
7262  // VST4 multiple 3-element structure instructions.
7263  case ARM::VST4dAsm_8:
7264  case ARM::VST4dAsm_16:
7265  case ARM::VST4dAsm_32:
7266  case ARM::VST4qAsm_8:
7267  case ARM::VST4qAsm_16:
7268  case ARM::VST4qAsm_32: {
7269    MCInst TmpInst;
7270    unsigned Spacing;
7271    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7272    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7273    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7274    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7275    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7276                                            Spacing));
7277    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7278                                            Spacing * 2));
7279    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7280                                            Spacing * 3));
7281    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7282    TmpInst.addOperand(Inst.getOperand(4));
7283    Inst = TmpInst;
7284    return true;
7285  }
7286
7287  case ARM::VST4dWB_fixed_Asm_8:
7288  case ARM::VST4dWB_fixed_Asm_16:
7289  case ARM::VST4dWB_fixed_Asm_32:
7290  case ARM::VST4qWB_fixed_Asm_8:
7291  case ARM::VST4qWB_fixed_Asm_16:
7292  case ARM::VST4qWB_fixed_Asm_32: {
7293    MCInst TmpInst;
7294    unsigned Spacing;
7295    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7296    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7297    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7298    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7299    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7300    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7301    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7302                                            Spacing));
7303    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7304                                            Spacing * 2));
7305    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7306                                            Spacing * 3));
7307    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7308    TmpInst.addOperand(Inst.getOperand(4));
7309    Inst = TmpInst;
7310    return true;
7311  }
7312
7313  case ARM::VST4dWB_register_Asm_8:
7314  case ARM::VST4dWB_register_Asm_16:
7315  case ARM::VST4dWB_register_Asm_32:
7316  case ARM::VST4qWB_register_Asm_8:
7317  case ARM::VST4qWB_register_Asm_16:
7318  case ARM::VST4qWB_register_Asm_32: {
7319    MCInst TmpInst;
7320    unsigned Spacing;
7321    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7322    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7323    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7324    TmpInst.addOperand(Inst.getOperand(2)); // alignment
7325    TmpInst.addOperand(Inst.getOperand(3)); // Rm
7326    TmpInst.addOperand(Inst.getOperand(0)); // Vd
7327    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7328                                            Spacing));
7329    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7330                                            Spacing * 2));
7331    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7332                                            Spacing * 3));
7333    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7334    TmpInst.addOperand(Inst.getOperand(5));
7335    Inst = TmpInst;
7336    return true;
7337  }
7338
7339  // Handle encoding choice for the shift-immediate instructions.
7340  case ARM::t2LSLri:
7341  case ARM::t2LSRri:
7342  case ARM::t2ASRri: {
7343    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7344        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7345        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
7346        !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7347          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
7348      unsigned NewOpc;
7349      switch (Inst.getOpcode()) {
7350      default: llvm_unreachable("unexpected opcode");
7351      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
7352      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
7353      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
7354      }
7355      // The Thumb1 operands aren't in the same order. Awesome, eh?
7356      MCInst TmpInst;
7357      TmpInst.setOpcode(NewOpc);
7358      TmpInst.addOperand(Inst.getOperand(0));
7359      TmpInst.addOperand(Inst.getOperand(5));
7360      TmpInst.addOperand(Inst.getOperand(1));
7361      TmpInst.addOperand(Inst.getOperand(2));
7362      TmpInst.addOperand(Inst.getOperand(3));
7363      TmpInst.addOperand(Inst.getOperand(4));
7364      Inst = TmpInst;
7365      return true;
7366    }
7367    return false;
7368  }
7369
7370  // Handle the Thumb2 mode MOV complex aliases.
7371  case ARM::t2MOVsr:
7372  case ARM::t2MOVSsr: {
7373    // Which instruction to expand to depends on the CCOut operand and
7374    // whether we're in an IT block if the register operands are low
7375    // registers.
7376    bool isNarrow = false;
7377    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7378        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7379        isARMLowRegister(Inst.getOperand(2).getReg()) &&
7380        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7381        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
7382      isNarrow = true;
7383    MCInst TmpInst;
7384    unsigned newOpc;
7385    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
7386    default: llvm_unreachable("unexpected opcode!");
7387    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
7388    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
7389    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
7390    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
7391    }
7392    TmpInst.setOpcode(newOpc);
7393    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7394    if (isNarrow)
7395      TmpInst.addOperand(MCOperand::CreateReg(
7396          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7397    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7398    TmpInst.addOperand(Inst.getOperand(2)); // Rm
7399    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7400    TmpInst.addOperand(Inst.getOperand(5));
7401    if (!isNarrow)
7402      TmpInst.addOperand(MCOperand::CreateReg(
7403          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7404    Inst = TmpInst;
7405    return true;
7406  }
7407  case ARM::t2MOVsi:
7408  case ARM::t2MOVSsi: {
7409    // Which instruction to expand to depends on the CCOut operand and
7410    // whether we're in an IT block if the register operands are low
7411    // registers.
7412    bool isNarrow = false;
7413    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7414        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7415        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7416      isNarrow = true;
7417    MCInst TmpInst;
7418    unsigned newOpc;
7419    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7420    default: llvm_unreachable("unexpected opcode!");
7421    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7422    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7423    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7424    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7425    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7426    }
7427    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7428    if (Amount == 32) Amount = 0;
7429    TmpInst.setOpcode(newOpc);
7430    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7431    if (isNarrow)
7432      TmpInst.addOperand(MCOperand::CreateReg(
7433          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7434    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7435    if (newOpc != ARM::t2RRX)
7436      TmpInst.addOperand(MCOperand::CreateImm(Amount));
7437    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7438    TmpInst.addOperand(Inst.getOperand(4));
7439    if (!isNarrow)
7440      TmpInst.addOperand(MCOperand::CreateReg(
7441          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7442    Inst = TmpInst;
7443    return true;
7444  }
7445  // Handle the ARM mode MOV complex aliases.
7446  case ARM::ASRr:
7447  case ARM::LSRr:
7448  case ARM::LSLr:
7449  case ARM::RORr: {
7450    ARM_AM::ShiftOpc ShiftTy;
7451    switch(Inst.getOpcode()) {
7452    default: llvm_unreachable("unexpected opcode!");
7453    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7454    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7455    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7456    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7457    }
7458    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7459    MCInst TmpInst;
7460    TmpInst.setOpcode(ARM::MOVsr);
7461    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7462    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7463    TmpInst.addOperand(Inst.getOperand(2)); // Rm
7464    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7465    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7466    TmpInst.addOperand(Inst.getOperand(4));
7467    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7468    Inst = TmpInst;
7469    return true;
7470  }
7471  case ARM::ASRi:
7472  case ARM::LSRi:
7473  case ARM::LSLi:
7474  case ARM::RORi: {
7475    ARM_AM::ShiftOpc ShiftTy;
7476    switch(Inst.getOpcode()) {
7477    default: llvm_unreachable("unexpected opcode!");
7478    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
7479    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
7480    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
7481    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7482    }
7483    // A shift by zero is a plain MOVr, not a MOVsi.
7484    unsigned Amt = Inst.getOperand(2).getImm();
7485    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7486    // A shift by 32 should be encoded as 0 when permitted
7487    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7488      Amt = 0;
7489    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7490    MCInst TmpInst;
7491    TmpInst.setOpcode(Opc);
7492    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7493    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7494    if (Opc == ARM::MOVsi)
7495      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7496    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7497    TmpInst.addOperand(Inst.getOperand(4));
7498    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7499    Inst = TmpInst;
7500    return true;
7501  }
7502  case ARM::RRXi: {
7503    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7504    MCInst TmpInst;
7505    TmpInst.setOpcode(ARM::MOVsi);
7506    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7507    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7508    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7509    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7510    TmpInst.addOperand(Inst.getOperand(3));
7511    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7512    Inst = TmpInst;
7513    return true;
7514  }
7515  case ARM::t2LDMIA_UPD: {
7516    // If this is a load of a single register, then we should use
7517    // a post-indexed LDR instruction instead, per the ARM ARM.
7518    if (Inst.getNumOperands() != 5)
7519      return false;
7520    MCInst TmpInst;
7521    TmpInst.setOpcode(ARM::t2LDR_POST);
7522    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7523    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7524    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7525    TmpInst.addOperand(MCOperand::CreateImm(4));
7526    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7527    TmpInst.addOperand(Inst.getOperand(3));
7528    Inst = TmpInst;
7529    return true;
7530  }
7531  case ARM::t2STMDB_UPD: {
7532    // If this is a store of a single register, then we should use
7533    // a pre-indexed STR instruction instead, per the ARM ARM.
7534    if (Inst.getNumOperands() != 5)
7535      return false;
7536    MCInst TmpInst;
7537    TmpInst.setOpcode(ARM::t2STR_PRE);
7538    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7539    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7540    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7541    TmpInst.addOperand(MCOperand::CreateImm(-4));
7542    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7543    TmpInst.addOperand(Inst.getOperand(3));
7544    Inst = TmpInst;
7545    return true;
7546  }
7547  case ARM::LDMIA_UPD:
7548    // If this is a load of a single register via a 'pop', then we should use
7549    // a post-indexed LDR instruction instead, per the ARM ARM.
7550    if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
7551        Inst.getNumOperands() == 5) {
7552      MCInst TmpInst;
7553      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7554      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7555      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7556      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7557      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7558      TmpInst.addOperand(MCOperand::CreateImm(4));
7559      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7560      TmpInst.addOperand(Inst.getOperand(3));
7561      Inst = TmpInst;
7562      return true;
7563    }
7564    break;
7565  case ARM::STMDB_UPD:
7566    // If this is a store of a single register via a 'push', then we should use
7567    // a pre-indexed STR instruction instead, per the ARM ARM.
7568    if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
7569        Inst.getNumOperands() == 5) {
7570      MCInst TmpInst;
7571      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7572      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7573      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7574      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7575      TmpInst.addOperand(MCOperand::CreateImm(-4));
7576      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7577      TmpInst.addOperand(Inst.getOperand(3));
7578      Inst = TmpInst;
7579    }
7580    break;
7581  case ARM::t2ADDri12:
7582    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7583    // mnemonic was used (not "addw"), encoding T3 is preferred.
7584    if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
7585        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7586      break;
7587    Inst.setOpcode(ARM::t2ADDri);
7588    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7589    break;
7590  case ARM::t2SUBri12:
7591    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7592    // mnemonic was used (not "subw"), encoding T3 is preferred.
7593    if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
7594        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7595      break;
7596    Inst.setOpcode(ARM::t2SUBri);
7597    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7598    break;
7599  case ARM::tADDi8:
7600    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7601    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7602    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7603    // to encoding T1 if <Rd> is omitted."
7604    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7605      Inst.setOpcode(ARM::tADDi3);
7606      return true;
7607    }
7608    break;
7609  case ARM::tSUBi8:
7610    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7611    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7612    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7613    // to encoding T1 if <Rd> is omitted."
7614    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7615      Inst.setOpcode(ARM::tSUBi3);
7616      return true;
7617    }
7618    break;
7619  case ARM::t2ADDri:
7620  case ARM::t2SUBri: {
7621    // If the destination and first source operand are the same, and
7622    // the flags are compatible with the current IT status, use encoding T2
7623    // instead of T3. For compatibility with the system 'as'. Make sure the
7624    // wide encoding wasn't explicit.
7625    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7626        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7627        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7628        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7629         (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7630        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7631         static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
7632      break;
7633    MCInst TmpInst;
7634    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7635                      ARM::tADDi8 : ARM::tSUBi8);
7636    TmpInst.addOperand(Inst.getOperand(0));
7637    TmpInst.addOperand(Inst.getOperand(5));
7638    TmpInst.addOperand(Inst.getOperand(0));
7639    TmpInst.addOperand(Inst.getOperand(2));
7640    TmpInst.addOperand(Inst.getOperand(3));
7641    TmpInst.addOperand(Inst.getOperand(4));
7642    Inst = TmpInst;
7643    return true;
7644  }
7645  case ARM::t2ADDrr: {
7646    // If the destination and first source operand are the same, and
7647    // there's no setting of the flags, use encoding T2 instead of T3.
7648    // Note that this is only for ADD, not SUB. This mirrors the system
7649    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7650    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7651        Inst.getOperand(5).getReg() != 0 ||
7652        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7653         static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
7654      break;
7655    MCInst TmpInst;
7656    TmpInst.setOpcode(ARM::tADDhirr);
7657    TmpInst.addOperand(Inst.getOperand(0));
7658    TmpInst.addOperand(Inst.getOperand(0));
7659    TmpInst.addOperand(Inst.getOperand(2));
7660    TmpInst.addOperand(Inst.getOperand(3));
7661    TmpInst.addOperand(Inst.getOperand(4));
7662    Inst = TmpInst;
7663    return true;
7664  }
7665  case ARM::tADDrSP: {
7666    // If the non-SP source operand and the destination operand are not the
7667    // same, we need to use the 32-bit encoding if it's available.
7668    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7669      Inst.setOpcode(ARM::t2ADDrr);
7670      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7671      return true;
7672    }
7673    break;
7674  }
7675  case ARM::tB:
7676    // A Thumb conditional branch outside of an IT block is a tBcc.
7677    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7678      Inst.setOpcode(ARM::tBcc);
7679      return true;
7680    }
7681    break;
7682  case ARM::t2B:
7683    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7684    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7685      Inst.setOpcode(ARM::t2Bcc);
7686      return true;
7687    }
7688    break;
7689  case ARM::t2Bcc:
7690    // If the conditional is AL or we're in an IT block, we really want t2B.
7691    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7692      Inst.setOpcode(ARM::t2B);
7693      return true;
7694    }
7695    break;
7696  case ARM::tBcc:
7697    // If the conditional is AL, we really want tB.
7698    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7699      Inst.setOpcode(ARM::tB);
7700      return true;
7701    }
7702    break;
7703  case ARM::tLDMIA: {
7704    // If the register list contains any high registers, or if the writeback
7705    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7706    // instead if we're in Thumb2. Otherwise, this should have generated
7707    // an error in validateInstruction().
7708    unsigned Rn = Inst.getOperand(0).getReg();
7709    bool hasWritebackToken =
7710        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7711         static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7712    bool listContainsBase;
7713    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7714        (!listContainsBase && !hasWritebackToken) ||
7715        (listContainsBase && hasWritebackToken)) {
7716      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7717      assert (isThumbTwo());
7718      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7719      // If we're switching to the updating version, we need to insert
7720      // the writeback tied operand.
7721      if (hasWritebackToken)
7722        Inst.insert(Inst.begin(),
7723                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7724      return true;
7725    }
7726    break;
7727  }
7728  case ARM::tSTMIA_UPD: {
7729    // If the register list contains any high registers, we need to use
7730    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7731    // should have generated an error in validateInstruction().
7732    unsigned Rn = Inst.getOperand(0).getReg();
7733    bool listContainsBase;
7734    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7735      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7736      assert (isThumbTwo());
7737      Inst.setOpcode(ARM::t2STMIA_UPD);
7738      return true;
7739    }
7740    break;
7741  }
7742  case ARM::tPOP: {
7743    bool listContainsBase;
7744    // If the register list contains any high registers, we need to use
7745    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7746    // should have generated an error in validateInstruction().
7747    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7748      return false;
7749    assert (isThumbTwo());
7750    Inst.setOpcode(ARM::t2LDMIA_UPD);
7751    // Add the base register and writeback operands.
7752    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7753    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7754    return true;
7755  }
7756  case ARM::tPUSH: {
7757    bool listContainsBase;
7758    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7759      return false;
7760    assert (isThumbTwo());
7761    Inst.setOpcode(ARM::t2STMDB_UPD);
7762    // Add the base register and writeback operands.
7763    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7764    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7765    return true;
7766  }
7767  case ARM::t2MOVi: {
7768    // If we can use the 16-bit encoding and the user didn't explicitly
7769    // request the 32-bit variant, transform it here.
7770    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7771        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7772        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7773          Inst.getOperand(4).getReg() == ARM::CPSR) ||
7774         (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7775        (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
7776         static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
7777      // The operands aren't in the same order for tMOVi8...
7778      MCInst TmpInst;
7779      TmpInst.setOpcode(ARM::tMOVi8);
7780      TmpInst.addOperand(Inst.getOperand(0));
7781      TmpInst.addOperand(Inst.getOperand(4));
7782      TmpInst.addOperand(Inst.getOperand(1));
7783      TmpInst.addOperand(Inst.getOperand(2));
7784      TmpInst.addOperand(Inst.getOperand(3));
7785      Inst = TmpInst;
7786      return true;
7787    }
7788    break;
7789  }
7790  case ARM::t2MOVr: {
7791    // If we can use the 16-bit encoding and the user didn't explicitly
7792    // request the 32-bit variant, transform it here.
7793    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7794        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7795        Inst.getOperand(2).getImm() == ARMCC::AL &&
7796        Inst.getOperand(4).getReg() == ARM::CPSR &&
7797        (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
7798         static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
7799      // The operands aren't the same for tMOV[S]r... (no cc_out)
7800      MCInst TmpInst;
7801      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7802      TmpInst.addOperand(Inst.getOperand(0));
7803      TmpInst.addOperand(Inst.getOperand(1));
7804      TmpInst.addOperand(Inst.getOperand(2));
7805      TmpInst.addOperand(Inst.getOperand(3));
7806      Inst = TmpInst;
7807      return true;
7808    }
7809    break;
7810  }
7811  case ARM::t2SXTH:
7812  case ARM::t2SXTB:
7813  case ARM::t2UXTH:
7814  case ARM::t2UXTB: {
7815    // If we can use the 16-bit encoding and the user didn't explicitly
7816    // request the 32-bit variant, transform it here.
7817    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7818        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7819        Inst.getOperand(2).getImm() == 0 &&
7820        (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
7821         static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
7822      unsigned NewOpc;
7823      switch (Inst.getOpcode()) {
7824      default: llvm_unreachable("Illegal opcode!");
7825      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7826      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7827      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7828      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7829      }
7830      // The operands aren't the same for thumb1 (no rotate operand).
7831      MCInst TmpInst;
7832      TmpInst.setOpcode(NewOpc);
7833      TmpInst.addOperand(Inst.getOperand(0));
7834      TmpInst.addOperand(Inst.getOperand(1));
7835      TmpInst.addOperand(Inst.getOperand(3));
7836      TmpInst.addOperand(Inst.getOperand(4));
7837      Inst = TmpInst;
7838      return true;
7839    }
7840    break;
7841  }
7842  case ARM::MOVsi: {
7843    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7844    // rrx shifts and asr/lsr of #32 is encoded as 0
7845    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7846      return false;
7847    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7848      // Shifting by zero is accepted as a vanilla 'MOVr'
7849      MCInst TmpInst;
7850      TmpInst.setOpcode(ARM::MOVr);
7851      TmpInst.addOperand(Inst.getOperand(0));
7852      TmpInst.addOperand(Inst.getOperand(1));
7853      TmpInst.addOperand(Inst.getOperand(3));
7854      TmpInst.addOperand(Inst.getOperand(4));
7855      TmpInst.addOperand(Inst.getOperand(5));
7856      Inst = TmpInst;
7857      return true;
7858    }
7859    return false;
7860  }
7861  case ARM::ANDrsi:
7862  case ARM::ORRrsi:
7863  case ARM::EORrsi:
7864  case ARM::BICrsi:
7865  case ARM::SUBrsi:
7866  case ARM::ADDrsi: {
7867    unsigned newOpc;
7868    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7869    if (SOpc == ARM_AM::rrx) return false;
7870    switch (Inst.getOpcode()) {
7871    default: llvm_unreachable("unexpected opcode!");
7872    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7873    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7874    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7875    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7876    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7877    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7878    }
7879    // If the shift is by zero, use the non-shifted instruction definition.
7880    // The exception is for right shifts, where 0 == 32
7881    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7882        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7883      MCInst TmpInst;
7884      TmpInst.setOpcode(newOpc);
7885      TmpInst.addOperand(Inst.getOperand(0));
7886      TmpInst.addOperand(Inst.getOperand(1));
7887      TmpInst.addOperand(Inst.getOperand(2));
7888      TmpInst.addOperand(Inst.getOperand(4));
7889      TmpInst.addOperand(Inst.getOperand(5));
7890      TmpInst.addOperand(Inst.getOperand(6));
7891      Inst = TmpInst;
7892      return true;
7893    }
7894    return false;
7895  }
7896  case ARM::ITasm:
7897  case ARM::t2IT: {
7898    // The mask bits for all but the first condition are represented as
7899    // the low bit of the condition code value implies 't'. We currently
7900    // always have 1 implies 't', so XOR toggle the bits if the low bit
7901    // of the condition code is zero.
7902    MCOperand &MO = Inst.getOperand(1);
7903    unsigned Mask = MO.getImm();
7904    unsigned OrigMask = Mask;
7905    unsigned TZ = countTrailingZeros(Mask);
7906    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7907      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7908      Mask ^= (0xE << TZ) & 0xF;
7909    }
7910    MO.setImm(Mask);
7911
7912    // Set up the IT block state according to the IT instruction we just
7913    // matched.
7914    assert(!inITBlock() && "nested IT blocks?!");
7915    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7916    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7917    ITState.CurPosition = 0;
7918    ITState.FirstCond = true;
7919    break;
7920  }
7921  case ARM::t2LSLrr:
7922  case ARM::t2LSRrr:
7923  case ARM::t2ASRrr:
7924  case ARM::t2SBCrr:
7925  case ARM::t2RORrr:
7926  case ARM::t2BICrr:
7927  {
7928    // Assemblers should use the narrow encodings of these instructions when permissible.
7929    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7930         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7931        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7932        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7933         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7934        (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
7935         !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
7936             ".w"))) {
7937      unsigned NewOpc;
7938      switch (Inst.getOpcode()) {
7939        default: llvm_unreachable("unexpected opcode");
7940        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7941        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7942        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7943        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7944        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7945        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7946      }
7947      MCInst TmpInst;
7948      TmpInst.setOpcode(NewOpc);
7949      TmpInst.addOperand(Inst.getOperand(0));
7950      TmpInst.addOperand(Inst.getOperand(5));
7951      TmpInst.addOperand(Inst.getOperand(1));
7952      TmpInst.addOperand(Inst.getOperand(2));
7953      TmpInst.addOperand(Inst.getOperand(3));
7954      TmpInst.addOperand(Inst.getOperand(4));
7955      Inst = TmpInst;
7956      return true;
7957    }
7958    return false;
7959  }
7960  case ARM::t2ANDrr:
7961  case ARM::t2EORrr:
7962  case ARM::t2ADCrr:
7963  case ARM::t2ORRrr:
7964  {
7965    // Assemblers should use the narrow encodings of these instructions when permissible.
7966    // These instructions are special in that they are commutable, so shorter encodings
7967    // are available more often.
7968    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7969         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7970        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7971         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7972        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7973         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7974        (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
7975         !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
7976             ".w"))) {
7977      unsigned NewOpc;
7978      switch (Inst.getOpcode()) {
7979        default: llvm_unreachable("unexpected opcode");
7980        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7981        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7982        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7983        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7984      }
7985      MCInst TmpInst;
7986      TmpInst.setOpcode(NewOpc);
7987      TmpInst.addOperand(Inst.getOperand(0));
7988      TmpInst.addOperand(Inst.getOperand(5));
7989      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7990        TmpInst.addOperand(Inst.getOperand(1));
7991        TmpInst.addOperand(Inst.getOperand(2));
7992      } else {
7993        TmpInst.addOperand(Inst.getOperand(2));
7994        TmpInst.addOperand(Inst.getOperand(1));
7995      }
7996      TmpInst.addOperand(Inst.getOperand(3));
7997      TmpInst.addOperand(Inst.getOperand(4));
7998      Inst = TmpInst;
7999      return true;
8000    }
8001    return false;
8002  }
8003  }
8004  return false;
8005}
8006
8007unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8008  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8009  // suffix depending on whether they're in an IT block or not.
8010  unsigned Opc = Inst.getOpcode();
8011  const MCInstrDesc &MCID = MII.get(Opc);
8012  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8013    assert(MCID.hasOptionalDef() &&
8014           "optionally flag setting instruction missing optional def operand");
8015    assert(MCID.NumOperands == Inst.getNumOperands() &&
8016           "operand count mismatch!");
8017    // Find the optional-def operand (cc_out).
8018    unsigned OpNo;
8019    for (OpNo = 0;
8020         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8021         ++OpNo)
8022      ;
8023    // If we're parsing Thumb1, reject it completely.
8024    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8025      return Match_MnemonicFail;
8026    // If we're parsing Thumb2, which form is legal depends on whether we're
8027    // in an IT block.
8028    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8029        !inITBlock())
8030      return Match_RequiresITBlock;
8031    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8032        inITBlock())
8033      return Match_RequiresNotITBlock;
8034  }
8035  // Some high-register supporting Thumb1 encodings only allow both registers
8036  // to be from r0-r7 when in Thumb2.
8037  else if (Opc == ARM::tADDhirr && isThumbOne() &&
8038           isARMLowRegister(Inst.getOperand(1).getReg()) &&
8039           isARMLowRegister(Inst.getOperand(2).getReg()))
8040    return Match_RequiresThumb2;
8041  // Others only require ARMv6 or later.
8042  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
8043           isARMLowRegister(Inst.getOperand(0).getReg()) &&
8044           isARMLowRegister(Inst.getOperand(1).getReg()))
8045    return Match_RequiresV6;
8046  return Match_Success;
8047}
8048
8049namespace llvm {
8050template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
8051  return true; // In an assembly source, no need to second-guess
8052}
8053}
8054
8055static const char *getSubtargetFeatureName(unsigned Val);
8056bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
8057                                           OperandVector &Operands,
8058                                           MCStreamer &Out, unsigned &ErrorInfo,
8059                                           bool MatchingInlineAsm) {
8060  MCInst Inst;
8061  unsigned MatchResult;
8062
8063  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
8064                                     MatchingInlineAsm);
8065  switch (MatchResult) {
8066  default: break;
8067  case Match_Success:
8068    // Context sensitive operand constraints aren't handled by the matcher,
8069    // so check them here.
8070    if (validateInstruction(Inst, Operands)) {
8071      // Still progress the IT block, otherwise one wrong condition causes
8072      // nasty cascading errors.
8073      forwardITPosition();
8074      return true;
8075    }
8076
8077    { // processInstruction() updates inITBlock state, we need to save it away
8078      bool wasInITBlock = inITBlock();
8079
8080      // Some instructions need post-processing to, for example, tweak which
8081      // encoding is selected. Loop on it while changes happen so the
8082      // individual transformations can chain off each other. E.g.,
8083      // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
8084      while (processInstruction(Inst, Operands, Out))
8085        ;
8086
8087      // Only after the instruction is fully processed, we can validate it
8088      if (wasInITBlock && hasV8Ops() && isThumb() &&
8089          !isV8EligibleForIT(&Inst)) {
8090        Warning(IDLoc, "deprecated instruction in IT block");
8091      }
8092    }
8093
8094    // Only move forward at the very end so that everything in validate
8095    // and process gets a consistent answer about whether we're in an IT
8096    // block.
8097    forwardITPosition();
8098
8099    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
8100    // doesn't actually encode.
8101    if (Inst.getOpcode() == ARM::ITasm)
8102      return false;
8103
8104    Inst.setLoc(IDLoc);
8105    Out.EmitInstruction(Inst, STI);
8106    return false;
8107  case Match_MissingFeature: {
8108    assert(ErrorInfo && "Unknown missing feature!");
8109    // Special case the error message for the very common case where only
8110    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
8111    std::string Msg = "instruction requires:";
8112    unsigned Mask = 1;
8113    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
8114      if (ErrorInfo & Mask) {
8115        Msg += " ";
8116        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
8117      }
8118      Mask <<= 1;
8119    }
8120    return Error(IDLoc, Msg);
8121  }
8122  case Match_InvalidOperand: {
8123    SMLoc ErrorLoc = IDLoc;
8124    if (ErrorInfo != ~0U) {
8125      if (ErrorInfo >= Operands.size())
8126        return Error(IDLoc, "too few operands for instruction");
8127
8128      ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8129      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8130    }
8131
8132    return Error(ErrorLoc, "invalid operand for instruction");
8133  }
8134  case Match_MnemonicFail:
8135    return Error(IDLoc, "invalid instruction",
8136                 ((ARMOperand &)*Operands[0]).getLocRange());
8137  case Match_RequiresNotITBlock:
8138    return Error(IDLoc, "flag setting instruction only valid outside IT block");
8139  case Match_RequiresITBlock:
8140    return Error(IDLoc, "instruction only valid inside IT block");
8141  case Match_RequiresV6:
8142    return Error(IDLoc, "instruction variant requires ARMv6 or later");
8143  case Match_RequiresThumb2:
8144    return Error(IDLoc, "instruction variant requires Thumb2");
8145  case Match_ImmRange0_15: {
8146    SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8147    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8148    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
8149  }
8150  case Match_ImmRange0_239: {
8151    SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8152    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8153    return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
8154  }
8155  case Match_AlignedMemoryRequiresNone:
8156  case Match_DupAlignedMemoryRequiresNone:
8157  case Match_AlignedMemoryRequires16:
8158  case Match_DupAlignedMemoryRequires16:
8159  case Match_AlignedMemoryRequires32:
8160  case Match_DupAlignedMemoryRequires32:
8161  case Match_AlignedMemoryRequires64:
8162  case Match_DupAlignedMemoryRequires64:
8163  case Match_AlignedMemoryRequires64or128:
8164  case Match_DupAlignedMemoryRequires64or128:
8165  case Match_AlignedMemoryRequires64or128or256:
8166  {
8167    SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
8168    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8169    switch (MatchResult) {
8170      default:
8171        llvm_unreachable("Missing Match_Aligned type");
8172      case Match_AlignedMemoryRequiresNone:
8173      case Match_DupAlignedMemoryRequiresNone:
8174        return Error(ErrorLoc, "alignment must be omitted");
8175      case Match_AlignedMemoryRequires16:
8176      case Match_DupAlignedMemoryRequires16:
8177        return Error(ErrorLoc, "alignment must be 16 or omitted");
8178      case Match_AlignedMemoryRequires32:
8179      case Match_DupAlignedMemoryRequires32:
8180        return Error(ErrorLoc, "alignment must be 32 or omitted");
8181      case Match_AlignedMemoryRequires64:
8182      case Match_DupAlignedMemoryRequires64:
8183        return Error(ErrorLoc, "alignment must be 64 or omitted");
8184      case Match_AlignedMemoryRequires64or128:
8185      case Match_DupAlignedMemoryRequires64or128:
8186        return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
8187      case Match_AlignedMemoryRequires64or128or256:
8188        return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
8189    }
8190  }
8191  }
8192
8193  llvm_unreachable("Implement any new match types added!");
8194}
8195
8196/// parseDirective parses the arm specific directives
8197bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
8198  const MCObjectFileInfo::Environment Format =
8199    getContext().getObjectFileInfo()->getObjectFileType();
8200  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8201
8202  StringRef IDVal = DirectiveID.getIdentifier();
8203  if (IDVal == ".word")
8204    return parseLiteralValues(4, DirectiveID.getLoc());
8205  else if (IDVal == ".short" || IDVal == ".hword")
8206    return parseLiteralValues(2, DirectiveID.getLoc());
8207  else if (IDVal == ".thumb")
8208    return parseDirectiveThumb(DirectiveID.getLoc());
8209  else if (IDVal == ".arm")
8210    return parseDirectiveARM(DirectiveID.getLoc());
8211  else if (IDVal == ".thumb_func")
8212    return parseDirectiveThumbFunc(DirectiveID.getLoc());
8213  else if (IDVal == ".code")
8214    return parseDirectiveCode(DirectiveID.getLoc());
8215  else if (IDVal == ".syntax")
8216    return parseDirectiveSyntax(DirectiveID.getLoc());
8217  else if (IDVal == ".unreq")
8218    return parseDirectiveUnreq(DirectiveID.getLoc());
8219  else if (IDVal == ".fnend")
8220    return parseDirectiveFnEnd(DirectiveID.getLoc());
8221  else if (IDVal == ".cantunwind")
8222    return parseDirectiveCantUnwind(DirectiveID.getLoc());
8223  else if (IDVal == ".personality")
8224    return parseDirectivePersonality(DirectiveID.getLoc());
8225  else if (IDVal == ".handlerdata")
8226    return parseDirectiveHandlerData(DirectiveID.getLoc());
8227  else if (IDVal == ".setfp")
8228    return parseDirectiveSetFP(DirectiveID.getLoc());
8229  else if (IDVal == ".pad")
8230    return parseDirectivePad(DirectiveID.getLoc());
8231  else if (IDVal == ".save")
8232    return parseDirectiveRegSave(DirectiveID.getLoc(), false);
8233  else if (IDVal == ".vsave")
8234    return parseDirectiveRegSave(DirectiveID.getLoc(), true);
8235  else if (IDVal == ".ltorg" || IDVal == ".pool")
8236    return parseDirectiveLtorg(DirectiveID.getLoc());
8237  else if (IDVal == ".even")
8238    return parseDirectiveEven(DirectiveID.getLoc());
8239  else if (IDVal == ".personalityindex")
8240    return parseDirectivePersonalityIndex(DirectiveID.getLoc());
8241  else if (IDVal == ".unwind_raw")
8242    return parseDirectiveUnwindRaw(DirectiveID.getLoc());
8243  else if (IDVal == ".movsp")
8244    return parseDirectiveMovSP(DirectiveID.getLoc());
8245  else if (IDVal == ".arch_extension")
8246    return parseDirectiveArchExtension(DirectiveID.getLoc());
8247  else if (IDVal == ".align")
8248    return parseDirectiveAlign(DirectiveID.getLoc());
8249  else if (IDVal == ".thumb_set")
8250    return parseDirectiveThumbSet(DirectiveID.getLoc());
8251
8252  if (!IsMachO) {
8253    if (IDVal == ".arch")
8254      return parseDirectiveArch(DirectiveID.getLoc());
8255    else if (IDVal == ".cpu")
8256      return parseDirectiveCPU(DirectiveID.getLoc());
8257    else if (IDVal == ".eabi_attribute")
8258      return parseDirectiveEabiAttr(DirectiveID.getLoc());
8259    else if (IDVal == ".fpu")
8260      return parseDirectiveFPU(DirectiveID.getLoc());
8261    else if (IDVal == ".fnstart")
8262      return parseDirectiveFnStart(DirectiveID.getLoc());
8263    else if (IDVal == ".inst")
8264      return parseDirectiveInst(DirectiveID.getLoc());
8265    else if (IDVal == ".inst.n")
8266      return parseDirectiveInst(DirectiveID.getLoc(), 'n');
8267    else if (IDVal == ".inst.w")
8268      return parseDirectiveInst(DirectiveID.getLoc(), 'w');
8269    else if (IDVal == ".object_arch")
8270      return parseDirectiveObjectArch(DirectiveID.getLoc());
8271    else if (IDVal == ".tlsdescseq")
8272      return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
8273  }
8274
8275  return true;
8276}
8277
8278/// parseLiteralValues
8279///  ::= .hword expression [, expression]*
8280///  ::= .short expression [, expression]*
8281///  ::= .word expression [, expression]*
8282bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
8283  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8284    for (;;) {
8285      const MCExpr *Value;
8286      if (getParser().parseExpression(Value)) {
8287        Parser.eatToEndOfStatement();
8288        return false;
8289      }
8290
8291      getParser().getStreamer().EmitValue(Value, Size);
8292
8293      if (getLexer().is(AsmToken::EndOfStatement))
8294        break;
8295
8296      // FIXME: Improve diagnostic.
8297      if (getLexer().isNot(AsmToken::Comma)) {
8298        Error(L, "unexpected token in directive");
8299        return false;
8300      }
8301      Parser.Lex();
8302    }
8303  }
8304
8305  Parser.Lex();
8306  return false;
8307}
8308
8309/// parseDirectiveThumb
8310///  ::= .thumb
8311bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
8312  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8313    Error(L, "unexpected token in directive");
8314    return false;
8315  }
8316  Parser.Lex();
8317
8318  if (!hasThumb()) {
8319    Error(L, "target does not support Thumb mode");
8320    return false;
8321  }
8322
8323  if (!isThumb())
8324    SwitchMode();
8325
8326  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8327  return false;
8328}
8329
8330/// parseDirectiveARM
8331///  ::= .arm
8332bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
8333  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8334    Error(L, "unexpected token in directive");
8335    return false;
8336  }
8337  Parser.Lex();
8338
8339  if (!hasARM()) {
8340    Error(L, "target does not support ARM mode");
8341    return false;
8342  }
8343
8344  if (isThumb())
8345    SwitchMode();
8346
8347  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8348  return false;
8349}
8350
8351void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
8352  if (NextSymbolIsThumb) {
8353    getParser().getStreamer().EmitThumbFunc(Symbol);
8354    NextSymbolIsThumb = false;
8355  }
8356}
8357
8358/// parseDirectiveThumbFunc
8359///  ::= .thumbfunc symbol_name
8360bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
8361  const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8362  bool isMachO = MAI->hasSubsectionsViaSymbols();
8363
8364  // Darwin asm has (optionally) function name after .thumb_func direction
8365  // ELF doesn't
8366  if (isMachO) {
8367    const AsmToken &Tok = Parser.getTok();
8368    if (Tok.isNot(AsmToken::EndOfStatement)) {
8369      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
8370        Error(L, "unexpected token in .thumb_func directive");
8371        return false;
8372      }
8373
8374      MCSymbol *Func =
8375          getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
8376      getParser().getStreamer().EmitThumbFunc(Func);
8377      Parser.Lex(); // Consume the identifier token.
8378      return false;
8379    }
8380  }
8381
8382  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8383    Error(L, "unexpected token in directive");
8384    return false;
8385  }
8386
8387  NextSymbolIsThumb = true;
8388  return false;
8389}
8390
8391/// parseDirectiveSyntax
8392///  ::= .syntax unified | divided
8393bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
8394  const AsmToken &Tok = Parser.getTok();
8395  if (Tok.isNot(AsmToken::Identifier)) {
8396    Error(L, "unexpected token in .syntax directive");
8397    return false;
8398  }
8399
8400  StringRef Mode = Tok.getString();
8401  if (Mode == "unified" || Mode == "UNIFIED") {
8402    Parser.Lex();
8403  } else if (Mode == "divided" || Mode == "DIVIDED") {
8404    Error(L, "'.syntax divided' arm asssembly not supported");
8405    return false;
8406  } else {
8407    Error(L, "unrecognized syntax mode in .syntax directive");
8408    return false;
8409  }
8410
8411  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8412    Error(Parser.getTok().getLoc(), "unexpected token in directive");
8413    return false;
8414  }
8415  Parser.Lex();
8416
8417  // TODO tell the MC streamer the mode
8418  // getParser().getStreamer().Emit???();
8419  return false;
8420}
8421
8422/// parseDirectiveCode
8423///  ::= .code 16 | 32
8424bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
8425  const AsmToken &Tok = Parser.getTok();
8426  if (Tok.isNot(AsmToken::Integer)) {
8427    Error(L, "unexpected token in .code directive");
8428    return false;
8429  }
8430  int64_t Val = Parser.getTok().getIntVal();
8431  if (Val != 16 && Val != 32) {
8432    Error(L, "invalid operand to .code directive");
8433    return false;
8434  }
8435  Parser.Lex();
8436
8437  if (getLexer().isNot(AsmToken::EndOfStatement)) {
8438    Error(Parser.getTok().getLoc(), "unexpected token in directive");
8439    return false;
8440  }
8441  Parser.Lex();
8442
8443  if (Val == 16) {
8444    if (!hasThumb()) {
8445      Error(L, "target does not support Thumb mode");
8446      return false;
8447    }
8448
8449    if (!isThumb())
8450      SwitchMode();
8451    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8452  } else {
8453    if (!hasARM()) {
8454      Error(L, "target does not support ARM mode");
8455      return false;
8456    }
8457
8458    if (isThumb())
8459      SwitchMode();
8460    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8461  }
8462
8463  return false;
8464}
8465
8466/// parseDirectiveReq
8467///  ::= name .req registername
8468bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
8469  Parser.Lex(); // Eat the '.req' token.
8470  unsigned Reg;
8471  SMLoc SRegLoc, ERegLoc;
8472  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
8473    Parser.eatToEndOfStatement();
8474    Error(SRegLoc, "register name expected");
8475    return false;
8476  }
8477
8478  // Shouldn't be anything else.
8479  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
8480    Parser.eatToEndOfStatement();
8481    Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
8482    return false;
8483  }
8484
8485  Parser.Lex(); // Consume the EndOfStatement
8486
8487  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) {
8488    Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
8489    return false;
8490  }
8491
8492  return false;
8493}
8494
8495/// parseDirectiveUneq
8496///  ::= .unreq registername
8497bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
8498  if (Parser.getTok().isNot(AsmToken::Identifier)) {
8499    Parser.eatToEndOfStatement();
8500    Error(L, "unexpected input in .unreq directive.");
8501    return false;
8502  }
8503  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
8504  Parser.Lex(); // Eat the identifier.
8505  return false;
8506}
8507
8508/// parseDirectiveArch
8509///  ::= .arch token
8510bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
8511  StringRef Arch = getParser().parseStringToEndOfStatement().trim();
8512
8513  unsigned ID = StringSwitch<unsigned>(Arch)
8514#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
8515    .Case(NAME, ARM::ID)
8516#define ARM_ARCH_ALIAS(NAME, ID) \
8517    .Case(NAME, ARM::ID)
8518#include "MCTargetDesc/ARMArchName.def"
8519    .Default(ARM::INVALID_ARCH);
8520
8521  if (ID == ARM::INVALID_ARCH) {
8522    Error(L, "Unknown arch name");
8523    return false;
8524  }
8525
8526  getTargetStreamer().emitArch(ID);
8527  return false;
8528}
8529
8530/// parseDirectiveEabiAttr
8531///  ::= .eabi_attribute int, int [, "str"]
8532///  ::= .eabi_attribute Tag_name, int [, "str"]
8533bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
8534  int64_t Tag;
8535  SMLoc TagLoc;
8536  TagLoc = Parser.getTok().getLoc();
8537  if (Parser.getTok().is(AsmToken::Identifier)) {
8538    StringRef Name = Parser.getTok().getIdentifier();
8539    Tag = ARMBuildAttrs::AttrTypeFromString(Name);
8540    if (Tag == -1) {
8541      Error(TagLoc, "attribute name not recognised: " + Name);
8542      Parser.eatToEndOfStatement();
8543      return false;
8544    }
8545    Parser.Lex();
8546  } else {
8547    const MCExpr *AttrExpr;
8548
8549    TagLoc = Parser.getTok().getLoc();
8550    if (Parser.parseExpression(AttrExpr)) {
8551      Parser.eatToEndOfStatement();
8552      return false;
8553    }
8554
8555    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
8556    if (!CE) {
8557      Error(TagLoc, "expected numeric constant");
8558      Parser.eatToEndOfStatement();
8559      return false;
8560    }
8561
8562    Tag = CE->getValue();
8563  }
8564
8565  if (Parser.getTok().isNot(AsmToken::Comma)) {
8566    Error(Parser.getTok().getLoc(), "comma expected");
8567    Parser.eatToEndOfStatement();
8568    return false;
8569  }
8570  Parser.Lex(); // skip comma
8571
8572  StringRef StringValue = "";
8573  bool IsStringValue = false;
8574
8575  int64_t IntegerValue = 0;
8576  bool IsIntegerValue = false;
8577
8578  if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
8579    IsStringValue = true;
8580  else if (Tag == ARMBuildAttrs::compatibility) {
8581    IsStringValue = true;
8582    IsIntegerValue = true;
8583  } else if (Tag < 32 || Tag % 2 == 0)
8584    IsIntegerValue = true;
8585  else if (Tag % 2 == 1)
8586    IsStringValue = true;
8587  else
8588    llvm_unreachable("invalid tag type");
8589
8590  if (IsIntegerValue) {
8591    const MCExpr *ValueExpr;
8592    SMLoc ValueExprLoc = Parser.getTok().getLoc();
8593    if (Parser.parseExpression(ValueExpr)) {
8594      Parser.eatToEndOfStatement();
8595      return false;
8596    }
8597
8598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
8599    if (!CE) {
8600      Error(ValueExprLoc, "expected numeric constant");
8601      Parser.eatToEndOfStatement();
8602      return false;
8603    }
8604
8605    IntegerValue = CE->getValue();
8606  }
8607
8608  if (Tag == ARMBuildAttrs::compatibility) {
8609    if (Parser.getTok().isNot(AsmToken::Comma))
8610      IsStringValue = false;
8611    else
8612      Parser.Lex();
8613  }
8614
8615  if (IsStringValue) {
8616    if (Parser.getTok().isNot(AsmToken::String)) {
8617      Error(Parser.getTok().getLoc(), "bad string constant");
8618      Parser.eatToEndOfStatement();
8619      return false;
8620    }
8621
8622    StringValue = Parser.getTok().getStringContents();
8623    Parser.Lex();
8624  }
8625
8626  if (IsIntegerValue && IsStringValue) {
8627    assert(Tag == ARMBuildAttrs::compatibility);
8628    getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
8629  } else if (IsIntegerValue)
8630    getTargetStreamer().emitAttribute(Tag, IntegerValue);
8631  else if (IsStringValue)
8632    getTargetStreamer().emitTextAttribute(Tag, StringValue);
8633  return false;
8634}
8635
8636/// parseDirectiveCPU
8637///  ::= .cpu str
8638bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
8639  StringRef CPU = getParser().parseStringToEndOfStatement().trim();
8640  getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
8641
8642  if (!STI.isCPUStringValid(CPU)) {
8643    Error(L, "Unknown CPU name");
8644    return false;
8645  }
8646
8647  // FIXME: This switches the CPU features globally, therefore it might
8648  // happen that code you would not expect to assemble will. For details
8649  // see: http://llvm.org/bugs/show_bug.cgi?id=20757
8650  STI.InitMCProcessorInfo(CPU, "");
8651  STI.InitCPUSchedModel(CPU);
8652  unsigned FB = ComputeAvailableFeatures(STI.getFeatureBits());
8653  setAvailableFeatures(FB);
8654
8655  return false;
8656}
8657
8658// FIXME: This is duplicated in getARMFPUFeatures() in
8659// tools/clang/lib/Driver/Tools.cpp
8660static const struct {
8661  const unsigned Fpu;
8662  const uint64_t Enabled;
8663  const uint64_t Disabled;
8664} Fpus[] = {
8665      {ARM::VFP, ARM::FeatureVFP2, ARM::FeatureNEON},
8666      {ARM::VFPV2, ARM::FeatureVFP2, ARM::FeatureNEON},
8667      {ARM::VFPV3, ARM::FeatureVFP3, ARM::FeatureNEON},
8668      {ARM::VFPV3_D16, ARM::FeatureVFP3 | ARM::FeatureD16, ARM::FeatureNEON},
8669      {ARM::VFPV4, ARM::FeatureVFP4, ARM::FeatureNEON},
8670      {ARM::VFPV4_D16, ARM::FeatureVFP4 | ARM::FeatureD16, ARM::FeatureNEON},
8671      {ARM::FP_ARMV8, ARM::FeatureFPARMv8,
8672       ARM::FeatureNEON | ARM::FeatureCrypto},
8673      {ARM::NEON, ARM::FeatureNEON, 0},
8674      {ARM::NEON_VFPV4, ARM::FeatureVFP4 | ARM::FeatureNEON, 0},
8675      {ARM::NEON_FP_ARMV8, ARM::FeatureFPARMv8 | ARM::FeatureNEON,
8676       ARM::FeatureCrypto},
8677      {ARM::CRYPTO_NEON_FP_ARMV8,
8678       ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto, 0},
8679      {ARM::SOFTVFP, 0, 0},
8680};
8681
8682/// parseDirectiveFPU
8683///  ::= .fpu str
8684bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
8685  StringRef FPU = getParser().parseStringToEndOfStatement().trim();
8686
8687  unsigned ID = StringSwitch<unsigned>(FPU)
8688#define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID)
8689#include "ARMFPUName.def"
8690    .Default(ARM::INVALID_FPU);
8691
8692  if (ID == ARM::INVALID_FPU) {
8693    Error(L, "Unknown FPU name");
8694    return false;
8695  }
8696
8697  for (const auto &Fpu : Fpus) {
8698    if (Fpu.Fpu != ID)
8699      continue;
8700
8701    // Need to toggle features that should be on but are off and that
8702    // should off but are on.
8703    unsigned Toggle = (Fpu.Enabled & ~STI.getFeatureBits()) |
8704                      (Fpu.Disabled & STI.getFeatureBits());
8705    setAvailableFeatures(ComputeAvailableFeatures(STI.ToggleFeature(Toggle)));
8706    break;
8707  }
8708
8709  getTargetStreamer().emitFPU(ID);
8710  return false;
8711}
8712
8713/// parseDirectiveFnStart
8714///  ::= .fnstart
8715bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
8716  if (UC.hasFnStart()) {
8717    Error(L, ".fnstart starts before the end of previous one");
8718    UC.emitFnStartLocNotes();
8719    return false;
8720  }
8721
8722  // Reset the unwind directives parser state
8723  UC.reset();
8724
8725  getTargetStreamer().emitFnStart();
8726
8727  UC.recordFnStart(L);
8728  return false;
8729}
8730
8731/// parseDirectiveFnEnd
8732///  ::= .fnend
8733bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
8734  // Check the ordering of unwind directives
8735  if (!UC.hasFnStart()) {
8736    Error(L, ".fnstart must precede .fnend directive");
8737    return false;
8738  }
8739
8740  // Reset the unwind directives parser state
8741  getTargetStreamer().emitFnEnd();
8742
8743  UC.reset();
8744  return false;
8745}
8746
8747/// parseDirectiveCantUnwind
8748///  ::= .cantunwind
8749bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
8750  UC.recordCantUnwind(L);
8751
8752  // Check the ordering of unwind directives
8753  if (!UC.hasFnStart()) {
8754    Error(L, ".fnstart must precede .cantunwind directive");
8755    return false;
8756  }
8757  if (UC.hasHandlerData()) {
8758    Error(L, ".cantunwind can't be used with .handlerdata directive");
8759    UC.emitHandlerDataLocNotes();
8760    return false;
8761  }
8762  if (UC.hasPersonality()) {
8763    Error(L, ".cantunwind can't be used with .personality directive");
8764    UC.emitPersonalityLocNotes();
8765    return false;
8766  }
8767
8768  getTargetStreamer().emitCantUnwind();
8769  return false;
8770}
8771
8772/// parseDirectivePersonality
8773///  ::= .personality name
8774bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
8775  bool HasExistingPersonality = UC.hasPersonality();
8776
8777  UC.recordPersonality(L);
8778
8779  // Check the ordering of unwind directives
8780  if (!UC.hasFnStart()) {
8781    Error(L, ".fnstart must precede .personality directive");
8782    return false;
8783  }
8784  if (UC.cantUnwind()) {
8785    Error(L, ".personality can't be used with .cantunwind directive");
8786    UC.emitCantUnwindLocNotes();
8787    return false;
8788  }
8789  if (UC.hasHandlerData()) {
8790    Error(L, ".personality must precede .handlerdata directive");
8791    UC.emitHandlerDataLocNotes();
8792    return false;
8793  }
8794  if (HasExistingPersonality) {
8795    Parser.eatToEndOfStatement();
8796    Error(L, "multiple personality directives");
8797    UC.emitPersonalityLocNotes();
8798    return false;
8799  }
8800
8801  // Parse the name of the personality routine
8802  if (Parser.getTok().isNot(AsmToken::Identifier)) {
8803    Parser.eatToEndOfStatement();
8804    Error(L, "unexpected input in .personality directive.");
8805    return false;
8806  }
8807  StringRef Name(Parser.getTok().getIdentifier());
8808  Parser.Lex();
8809
8810  MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
8811  getTargetStreamer().emitPersonality(PR);
8812  return false;
8813}
8814
8815/// parseDirectiveHandlerData
8816///  ::= .handlerdata
8817bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
8818  UC.recordHandlerData(L);
8819
8820  // Check the ordering of unwind directives
8821  if (!UC.hasFnStart()) {
8822    Error(L, ".fnstart must precede .personality directive");
8823    return false;
8824  }
8825  if (UC.cantUnwind()) {
8826    Error(L, ".handlerdata can't be used with .cantunwind directive");
8827    UC.emitCantUnwindLocNotes();
8828    return false;
8829  }
8830
8831  getTargetStreamer().emitHandlerData();
8832  return false;
8833}
8834
8835/// parseDirectiveSetFP
8836///  ::= .setfp fpreg, spreg [, offset]
8837bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
8838  // Check the ordering of unwind directives
8839  if (!UC.hasFnStart()) {
8840    Error(L, ".fnstart must precede .setfp directive");
8841    return false;
8842  }
8843  if (UC.hasHandlerData()) {
8844    Error(L, ".setfp must precede .handlerdata directive");
8845    return false;
8846  }
8847
8848  // Parse fpreg
8849  SMLoc FPRegLoc = Parser.getTok().getLoc();
8850  int FPReg = tryParseRegister();
8851  if (FPReg == -1) {
8852    Error(FPRegLoc, "frame pointer register expected");
8853    return false;
8854  }
8855
8856  // Consume comma
8857  if (Parser.getTok().isNot(AsmToken::Comma)) {
8858    Error(Parser.getTok().getLoc(), "comma expected");
8859    return false;
8860  }
8861  Parser.Lex(); // skip comma
8862
8863  // Parse spreg
8864  SMLoc SPRegLoc = Parser.getTok().getLoc();
8865  int SPReg = tryParseRegister();
8866  if (SPReg == -1) {
8867    Error(SPRegLoc, "stack pointer register expected");
8868    return false;
8869  }
8870
8871  if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
8872    Error(SPRegLoc, "register should be either $sp or the latest fp register");
8873    return false;
8874  }
8875
8876  // Update the frame pointer register
8877  UC.saveFPReg(FPReg);
8878
8879  // Parse offset
8880  int64_t Offset = 0;
8881  if (Parser.getTok().is(AsmToken::Comma)) {
8882    Parser.Lex(); // skip comma
8883
8884    if (Parser.getTok().isNot(AsmToken::Hash) &&
8885        Parser.getTok().isNot(AsmToken::Dollar)) {
8886      Error(Parser.getTok().getLoc(), "'#' expected");
8887      return false;
8888    }
8889    Parser.Lex(); // skip hash token.
8890
8891    const MCExpr *OffsetExpr;
8892    SMLoc ExLoc = Parser.getTok().getLoc();
8893    SMLoc EndLoc;
8894    if (getParser().parseExpression(OffsetExpr, EndLoc)) {
8895      Error(ExLoc, "malformed setfp offset");
8896      return false;
8897    }
8898    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8899    if (!CE) {
8900      Error(ExLoc, "setfp offset must be an immediate");
8901      return false;
8902    }
8903
8904    Offset = CE->getValue();
8905  }
8906
8907  getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
8908                                static_cast<unsigned>(SPReg), Offset);
8909  return false;
8910}
8911
8912/// parseDirective
8913///  ::= .pad offset
8914bool ARMAsmParser::parseDirectivePad(SMLoc L) {
8915  // Check the ordering of unwind directives
8916  if (!UC.hasFnStart()) {
8917    Error(L, ".fnstart must precede .pad directive");
8918    return false;
8919  }
8920  if (UC.hasHandlerData()) {
8921    Error(L, ".pad must precede .handlerdata directive");
8922    return false;
8923  }
8924
8925  // Parse the offset
8926  if (Parser.getTok().isNot(AsmToken::Hash) &&
8927      Parser.getTok().isNot(AsmToken::Dollar)) {
8928    Error(Parser.getTok().getLoc(), "'#' expected");
8929    return false;
8930  }
8931  Parser.Lex(); // skip hash token.
8932
8933  const MCExpr *OffsetExpr;
8934  SMLoc ExLoc = Parser.getTok().getLoc();
8935  SMLoc EndLoc;
8936  if (getParser().parseExpression(OffsetExpr, EndLoc)) {
8937    Error(ExLoc, "malformed pad offset");
8938    return false;
8939  }
8940  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8941  if (!CE) {
8942    Error(ExLoc, "pad offset must be an immediate");
8943    return false;
8944  }
8945
8946  getTargetStreamer().emitPad(CE->getValue());
8947  return false;
8948}
8949
8950/// parseDirectiveRegSave
8951///  ::= .save  { registers }
8952///  ::= .vsave { registers }
8953bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
8954  // Check the ordering of unwind directives
8955  if (!UC.hasFnStart()) {
8956    Error(L, ".fnstart must precede .save or .vsave directives");
8957    return false;
8958  }
8959  if (UC.hasHandlerData()) {
8960    Error(L, ".save or .vsave must precede .handlerdata directive");
8961    return false;
8962  }
8963
8964  // RAII object to make sure parsed operands are deleted.
8965  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
8966
8967  // Parse the register list
8968  if (parseRegisterList(Operands))
8969    return false;
8970  ARMOperand &Op = (ARMOperand &)*Operands[0];
8971  if (!IsVector && !Op.isRegList()) {
8972    Error(L, ".save expects GPR registers");
8973    return false;
8974  }
8975  if (IsVector && !Op.isDPRRegList()) {
8976    Error(L, ".vsave expects DPR registers");
8977    return false;
8978  }
8979
8980  getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
8981  return false;
8982}
8983
8984/// parseDirectiveInst
8985///  ::= .inst opcode [, ...]
8986///  ::= .inst.n opcode [, ...]
8987///  ::= .inst.w opcode [, ...]
8988bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
8989  int Width;
8990
8991  if (isThumb()) {
8992    switch (Suffix) {
8993    case 'n':
8994      Width = 2;
8995      break;
8996    case 'w':
8997      Width = 4;
8998      break;
8999    default:
9000      Parser.eatToEndOfStatement();
9001      Error(Loc, "cannot determine Thumb instruction size, "
9002                 "use inst.n/inst.w instead");
9003      return false;
9004    }
9005  } else {
9006    if (Suffix) {
9007      Parser.eatToEndOfStatement();
9008      Error(Loc, "width suffixes are invalid in ARM mode");
9009      return false;
9010    }
9011    Width = 4;
9012  }
9013
9014  if (getLexer().is(AsmToken::EndOfStatement)) {
9015    Parser.eatToEndOfStatement();
9016    Error(Loc, "expected expression following directive");
9017    return false;
9018  }
9019
9020  for (;;) {
9021    const MCExpr *Expr;
9022
9023    if (getParser().parseExpression(Expr)) {
9024      Error(Loc, "expected expression");
9025      return false;
9026    }
9027
9028    const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9029    if (!Value) {
9030      Error(Loc, "expected constant expression");
9031      return false;
9032    }
9033
9034    switch (Width) {
9035    case 2:
9036      if (Value->getValue() > 0xffff) {
9037        Error(Loc, "inst.n operand is too big, use inst.w instead");
9038        return false;
9039      }
9040      break;
9041    case 4:
9042      if (Value->getValue() > 0xffffffff) {
9043        Error(Loc,
9044              StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
9045        return false;
9046      }
9047      break;
9048    default:
9049      llvm_unreachable("only supported widths are 2 and 4");
9050    }
9051
9052    getTargetStreamer().emitInst(Value->getValue(), Suffix);
9053
9054    if (getLexer().is(AsmToken::EndOfStatement))
9055      break;
9056
9057    if (getLexer().isNot(AsmToken::Comma)) {
9058      Error(Loc, "unexpected token in directive");
9059      return false;
9060    }
9061
9062    Parser.Lex();
9063  }
9064
9065  Parser.Lex();
9066  return false;
9067}
9068
9069/// parseDirectiveLtorg
9070///  ::= .ltorg | .pool
9071bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9072  getTargetStreamer().emitCurrentConstantPool();
9073  return false;
9074}
9075
9076bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9077  const MCSection *Section = getStreamer().getCurrentSection().first;
9078
9079  if (getLexer().isNot(AsmToken::EndOfStatement)) {
9080    TokError("unexpected token in directive");
9081    return false;
9082  }
9083
9084  if (!Section) {
9085    getStreamer().InitSections();
9086    Section = getStreamer().getCurrentSection().first;
9087  }
9088
9089  assert(Section && "must have section to emit alignment");
9090  if (Section->UseCodeAlign())
9091    getStreamer().EmitCodeAlignment(2);
9092  else
9093    getStreamer().EmitValueToAlignment(2);
9094
9095  return false;
9096}
9097
9098/// parseDirectivePersonalityIndex
9099///   ::= .personalityindex index
9100bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9101  bool HasExistingPersonality = UC.hasPersonality();
9102
9103  UC.recordPersonalityIndex(L);
9104
9105  if (!UC.hasFnStart()) {
9106    Parser.eatToEndOfStatement();
9107    Error(L, ".fnstart must precede .personalityindex directive");
9108    return false;
9109  }
9110  if (UC.cantUnwind()) {
9111    Parser.eatToEndOfStatement();
9112    Error(L, ".personalityindex cannot be used with .cantunwind");
9113    UC.emitCantUnwindLocNotes();
9114    return false;
9115  }
9116  if (UC.hasHandlerData()) {
9117    Parser.eatToEndOfStatement();
9118    Error(L, ".personalityindex must precede .handlerdata directive");
9119    UC.emitHandlerDataLocNotes();
9120    return false;
9121  }
9122  if (HasExistingPersonality) {
9123    Parser.eatToEndOfStatement();
9124    Error(L, "multiple personality directives");
9125    UC.emitPersonalityLocNotes();
9126    return false;
9127  }
9128
9129  const MCExpr *IndexExpression;
9130  SMLoc IndexLoc = Parser.getTok().getLoc();
9131  if (Parser.parseExpression(IndexExpression)) {
9132    Parser.eatToEndOfStatement();
9133    return false;
9134  }
9135
9136  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
9137  if (!CE) {
9138    Parser.eatToEndOfStatement();
9139    Error(IndexLoc, "index must be a constant number");
9140    return false;
9141  }
9142  if (CE->getValue() < 0 ||
9143      CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
9144    Parser.eatToEndOfStatement();
9145    Error(IndexLoc, "personality routine index should be in range [0-3]");
9146    return false;
9147  }
9148
9149  getTargetStreamer().emitPersonalityIndex(CE->getValue());
9150  return false;
9151}
9152
9153/// parseDirectiveUnwindRaw
9154///   ::= .unwind_raw offset, opcode [, opcode...]
9155bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
9156  if (!UC.hasFnStart()) {
9157    Parser.eatToEndOfStatement();
9158    Error(L, ".fnstart must precede .unwind_raw directives");
9159    return false;
9160  }
9161
9162  int64_t StackOffset;
9163
9164  const MCExpr *OffsetExpr;
9165  SMLoc OffsetLoc = getLexer().getLoc();
9166  if (getLexer().is(AsmToken::EndOfStatement) ||
9167      getParser().parseExpression(OffsetExpr)) {
9168    Error(OffsetLoc, "expected expression");
9169    Parser.eatToEndOfStatement();
9170    return false;
9171  }
9172
9173  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9174  if (!CE) {
9175    Error(OffsetLoc, "offset must be a constant");
9176    Parser.eatToEndOfStatement();
9177    return false;
9178  }
9179
9180  StackOffset = CE->getValue();
9181
9182  if (getLexer().isNot(AsmToken::Comma)) {
9183    Error(getLexer().getLoc(), "expected comma");
9184    Parser.eatToEndOfStatement();
9185    return false;
9186  }
9187  Parser.Lex();
9188
9189  SmallVector<uint8_t, 16> Opcodes;
9190  for (;;) {
9191    const MCExpr *OE;
9192
9193    SMLoc OpcodeLoc = getLexer().getLoc();
9194    if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
9195      Error(OpcodeLoc, "expected opcode expression");
9196      Parser.eatToEndOfStatement();
9197      return false;
9198    }
9199
9200    const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
9201    if (!OC) {
9202      Error(OpcodeLoc, "opcode value must be a constant");
9203      Parser.eatToEndOfStatement();
9204      return false;
9205    }
9206
9207    const int64_t Opcode = OC->getValue();
9208    if (Opcode & ~0xff) {
9209      Error(OpcodeLoc, "invalid opcode");
9210      Parser.eatToEndOfStatement();
9211      return false;
9212    }
9213
9214    Opcodes.push_back(uint8_t(Opcode));
9215
9216    if (getLexer().is(AsmToken::EndOfStatement))
9217      break;
9218
9219    if (getLexer().isNot(AsmToken::Comma)) {
9220      Error(getLexer().getLoc(), "unexpected token in directive");
9221      Parser.eatToEndOfStatement();
9222      return false;
9223    }
9224
9225    Parser.Lex();
9226  }
9227
9228  getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9229
9230  Parser.Lex();
9231  return false;
9232}
9233
9234/// parseDirectiveTLSDescSeq
9235///   ::= .tlsdescseq tls-variable
9236bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
9237  if (getLexer().isNot(AsmToken::Identifier)) {
9238    TokError("expected variable after '.tlsdescseq' directive");
9239    Parser.eatToEndOfStatement();
9240    return false;
9241  }
9242
9243  const MCSymbolRefExpr *SRE =
9244    MCSymbolRefExpr::Create(Parser.getTok().getIdentifier(),
9245                            MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
9246  Lex();
9247
9248  if (getLexer().isNot(AsmToken::EndOfStatement)) {
9249    Error(Parser.getTok().getLoc(), "unexpected token");
9250    Parser.eatToEndOfStatement();
9251    return false;
9252  }
9253
9254  getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
9255  return false;
9256}
9257
9258/// parseDirectiveMovSP
9259///  ::= .movsp reg [, #offset]
9260bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
9261  if (!UC.hasFnStart()) {
9262    Parser.eatToEndOfStatement();
9263    Error(L, ".fnstart must precede .movsp directives");
9264    return false;
9265  }
9266  if (UC.getFPReg() != ARM::SP) {
9267    Parser.eatToEndOfStatement();
9268    Error(L, "unexpected .movsp directive");
9269    return false;
9270  }
9271
9272  SMLoc SPRegLoc = Parser.getTok().getLoc();
9273  int SPReg = tryParseRegister();
9274  if (SPReg == -1) {
9275    Parser.eatToEndOfStatement();
9276    Error(SPRegLoc, "register expected");
9277    return false;
9278  }
9279
9280  if (SPReg == ARM::SP || SPReg == ARM::PC) {
9281    Parser.eatToEndOfStatement();
9282    Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
9283    return false;
9284  }
9285
9286  int64_t Offset = 0;
9287  if (Parser.getTok().is(AsmToken::Comma)) {
9288    Parser.Lex();
9289
9290    if (Parser.getTok().isNot(AsmToken::Hash)) {
9291      Error(Parser.getTok().getLoc(), "expected #constant");
9292      Parser.eatToEndOfStatement();
9293      return false;
9294    }
9295    Parser.Lex();
9296
9297    const MCExpr *OffsetExpr;
9298    SMLoc OffsetLoc = Parser.getTok().getLoc();
9299    if (Parser.parseExpression(OffsetExpr)) {
9300      Parser.eatToEndOfStatement();
9301      Error(OffsetLoc, "malformed offset expression");
9302      return false;
9303    }
9304
9305    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9306    if (!CE) {
9307      Parser.eatToEndOfStatement();
9308      Error(OffsetLoc, "offset must be an immediate constant");
9309      return false;
9310    }
9311
9312    Offset = CE->getValue();
9313  }
9314
9315  getTargetStreamer().emitMovSP(SPReg, Offset);
9316  UC.saveFPReg(SPReg);
9317
9318  return false;
9319}
9320
9321/// parseDirectiveObjectArch
9322///   ::= .object_arch name
9323bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
9324  if (getLexer().isNot(AsmToken::Identifier)) {
9325    Error(getLexer().getLoc(), "unexpected token");
9326    Parser.eatToEndOfStatement();
9327    return false;
9328  }
9329
9330  StringRef Arch = Parser.getTok().getString();
9331  SMLoc ArchLoc = Parser.getTok().getLoc();
9332  getLexer().Lex();
9333
9334  unsigned ID = StringSwitch<unsigned>(Arch)
9335#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
9336    .Case(NAME, ARM::ID)
9337#define ARM_ARCH_ALIAS(NAME, ID) \
9338    .Case(NAME, ARM::ID)
9339#include "MCTargetDesc/ARMArchName.def"
9340#undef ARM_ARCH_NAME
9341#undef ARM_ARCH_ALIAS
9342    .Default(ARM::INVALID_ARCH);
9343
9344  if (ID == ARM::INVALID_ARCH) {
9345    Error(ArchLoc, "unknown architecture '" + Arch + "'");
9346    Parser.eatToEndOfStatement();
9347    return false;
9348  }
9349
9350  getTargetStreamer().emitObjectArch(ID);
9351
9352  if (getLexer().isNot(AsmToken::EndOfStatement)) {
9353    Error(getLexer().getLoc(), "unexpected token");
9354    Parser.eatToEndOfStatement();
9355  }
9356
9357  return false;
9358}
9359
9360/// parseDirectiveAlign
9361///   ::= .align
9362bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
9363  // NOTE: if this is not the end of the statement, fall back to the target
9364  // agnostic handling for this directive which will correctly handle this.
9365  if (getLexer().isNot(AsmToken::EndOfStatement))
9366    return true;
9367
9368  // '.align' is target specifically handled to mean 2**2 byte alignment.
9369  if (getStreamer().getCurrentSection().first->UseCodeAlign())
9370    getStreamer().EmitCodeAlignment(4, 0);
9371  else
9372    getStreamer().EmitValueToAlignment(4, 0, 1, 0);
9373
9374  return false;
9375}
9376
9377/// parseDirectiveThumbSet
9378///  ::= .thumb_set name, value
9379bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
9380  StringRef Name;
9381  if (Parser.parseIdentifier(Name)) {
9382    TokError("expected identifier after '.thumb_set'");
9383    Parser.eatToEndOfStatement();
9384    return false;
9385  }
9386
9387  if (getLexer().isNot(AsmToken::Comma)) {
9388    TokError("expected comma after name '" + Name + "'");
9389    Parser.eatToEndOfStatement();
9390    return false;
9391  }
9392  Lex();
9393
9394  const MCExpr *Value;
9395  if (Parser.parseExpression(Value)) {
9396    TokError("missing expression");
9397    Parser.eatToEndOfStatement();
9398    return false;
9399  }
9400
9401  if (getLexer().isNot(AsmToken::EndOfStatement)) {
9402    TokError("unexpected token");
9403    Parser.eatToEndOfStatement();
9404    return false;
9405  }
9406  Lex();
9407
9408  MCSymbol *Alias = getContext().GetOrCreateSymbol(Name);
9409  getTargetStreamer().emitThumbSet(Alias, Value);
9410  return false;
9411}
9412
9413/// Force static initialization.
9414extern "C" void LLVMInitializeARMAsmParser() {
9415  RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget);
9416  RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget);
9417  RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget);
9418  RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget);
9419}
9420
9421#define GET_REGISTER_MATCHER
9422#define GET_SUBTARGET_FEATURE_NAME
9423#define GET_MATCHER_IMPLEMENTATION
9424#include "ARMGenAsmMatcher.inc"
9425
9426static const struct ExtMapEntry {
9427  const char *Extension;
9428  const unsigned ArchCheck;
9429  const uint64_t Features;
9430} Extensions[] = {
9431  { "crc", Feature_HasV8, ARM::FeatureCRC },
9432  { "crypto",  Feature_HasV8,
9433    ARM::FeatureCrypto | ARM::FeatureNEON | ARM::FeatureFPARMv8 },
9434  { "fp", Feature_HasV8, ARM::FeatureFPARMv8 },
9435  { "idiv", Feature_HasV7 | Feature_IsNotMClass,
9436    ARM::FeatureHWDiv | ARM::FeatureHWDivARM },
9437  // FIXME: iWMMXT not supported
9438  { "iwmmxt", Feature_None, 0 },
9439  // FIXME: iWMMXT2 not supported
9440  { "iwmmxt2", Feature_None, 0 },
9441  // FIXME: Maverick not supported
9442  { "maverick", Feature_None, 0 },
9443  { "mp", Feature_HasV7 | Feature_IsNotMClass, ARM::FeatureMP },
9444  // FIXME: ARMv6-m OS Extensions feature not checked
9445  { "os", Feature_None, 0 },
9446  // FIXME: Also available in ARMv6-K
9447  { "sec", Feature_HasV7, ARM::FeatureTrustZone },
9448  { "simd", Feature_HasV8, ARM::FeatureNEON | ARM::FeatureFPARMv8 },
9449  // FIXME: Only available in A-class, isel not predicated
9450  { "virt", Feature_HasV7, ARM::FeatureVirtualization },
9451  // FIXME: xscale not supported
9452  { "xscale", Feature_None, 0 },
9453};
9454
9455/// parseDirectiveArchExtension
9456///   ::= .arch_extension [no]feature
9457bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
9458  if (getLexer().isNot(AsmToken::Identifier)) {
9459    Error(getLexer().getLoc(), "unexpected token");
9460    Parser.eatToEndOfStatement();
9461    return false;
9462  }
9463
9464  StringRef Extension = Parser.getTok().getString();
9465  SMLoc ExtLoc = Parser.getTok().getLoc();
9466  getLexer().Lex();
9467
9468  bool EnableFeature = true;
9469  if (Extension.startswith_lower("no")) {
9470    EnableFeature = false;
9471    Extension = Extension.substr(2);
9472  }
9473
9474  for (unsigned EI = 0, EE = array_lengthof(Extensions); EI != EE; ++EI) {
9475    if (Extensions[EI].Extension != Extension)
9476      continue;
9477
9478    unsigned FB = getAvailableFeatures();
9479    if ((FB & Extensions[EI].ArchCheck) != Extensions[EI].ArchCheck) {
9480      Error(ExtLoc, "architectural extension '" + Extension + "' is not "
9481            "allowed for the current base architecture");
9482      return false;
9483    }
9484
9485    if (!Extensions[EI].Features)
9486      report_fatal_error("unsupported architectural extension: " + Extension);
9487
9488    if (EnableFeature)
9489      FB |= ComputeAvailableFeatures(Extensions[EI].Features);
9490    else
9491      FB &= ~ComputeAvailableFeatures(Extensions[EI].Features);
9492
9493    setAvailableFeatures(FB);
9494    return false;
9495  }
9496
9497  Error(ExtLoc, "unknown architectural extension: " + Extension);
9498  Parser.eatToEndOfStatement();
9499  return false;
9500}
9501
9502// Define this matcher function after the auto-generated include so we
9503// have the match class enum definitions.
9504unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
9505                                                  unsigned Kind) {
9506  ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
9507  // If the kind is a token for a literal immediate, check if our asm
9508  // operand matches. This is for InstAliases which have a fixed-value
9509  // immediate in the syntax.
9510  switch (Kind) {
9511  default: break;
9512  case MCK__35_0:
9513    if (Op.isImm())
9514      if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
9515        if (CE->getValue() == 0)
9516          return Match_Success;
9517    break;
9518  case MCK_ARMSOImm:
9519    if (Op.isImm()) {
9520      const MCExpr *SOExpr = Op.getImm();
9521      int64_t Value;
9522      if (!SOExpr->EvaluateAsAbsolute(Value))
9523        return Match_Success;
9524      assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
9525             "expression value must be representable in 32 bits");
9526    }
9527    break;
9528  case MCK_GPRPair:
9529    if (Op.isReg() &&
9530        MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
9531      return Match_Success;
9532    break;
9533  }
9534  return Match_InvalidOperand;
9535}
9536