1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64MCExpr.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64TargetStreamer.h"
13#include "TargetInfo/AArch64TargetInfo.h"
14#include "AArch64InstrInfo.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCLinkerOptimizationHint.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCAsmParserExtension.h"
34#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35#include "llvm/MC/MCParser/MCTargetAsmParser.h"
36#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
39#include "llvm/MC/MCSymbol.h"
40#include "llvm/MC/MCTargetOptions.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/MC/MCValue.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetParser.h"
49#include "llvm/Support/TargetRegistry.h"
50#include "llvm/Support/raw_ostream.h"
51#include <cassert>
52#include <cctype>
53#include <cstdint>
54#include <cstdio>
55#include <string>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62namespace {
63
64enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69};
70
71enum RegConstraintEqualityTy {
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75};
76
77class AArch64AsmParser : public MCTargetAsmParser {
78private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80
81  // Map of register aliases registers via the .req directive.
82  StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83
84  class PrefixInfo {
85  public:
86    static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87      PrefixInfo Prefix;
88      switch (Inst.getOpcode()) {
89      case AArch64::MOVPRFX_ZZ:
90        Prefix.Active = true;
91        Prefix.Dst = Inst.getOperand(0).getReg();
92        break;
93      case AArch64::MOVPRFX_ZPmZ_B:
94      case AArch64::MOVPRFX_ZPmZ_H:
95      case AArch64::MOVPRFX_ZPmZ_S:
96      case AArch64::MOVPRFX_ZPmZ_D:
97        Prefix.Active = true;
98        Prefix.Predicated = true;
99        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101               "No destructive element size set for movprfx");
102        Prefix.Dst = Inst.getOperand(0).getReg();
103        Prefix.Pg = Inst.getOperand(2).getReg();
104        break;
105      case AArch64::MOVPRFX_ZPzZ_B:
106      case AArch64::MOVPRFX_ZPzZ_H:
107      case AArch64::MOVPRFX_ZPzZ_S:
108      case AArch64::MOVPRFX_ZPzZ_D:
109        Prefix.Active = true;
110        Prefix.Predicated = true;
111        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113               "No destructive element size set for movprfx");
114        Prefix.Dst = Inst.getOperand(0).getReg();
115        Prefix.Pg = Inst.getOperand(1).getReg();
116        break;
117      default:
118        break;
119      }
120
121      return Prefix;
122    }
123
124    PrefixInfo() : Active(false), Predicated(false) {}
125    bool isActive() const { return Active; }
126    bool isPredicated() const { return Predicated; }
127    unsigned getElementSize() const {
128      assert(Predicated);
129      return ElementSize;
130    }
131    unsigned getDstReg() const { return Dst; }
132    unsigned getPgReg() const {
133      assert(Predicated);
134      return Pg;
135    }
136
137  private:
138    bool Active;
139    bool Predicated;
140    unsigned ElementSize;
141    unsigned Dst;
142    unsigned Pg;
143  } NextPrefix;
144
145  AArch64TargetStreamer &getTargetStreamer() {
146    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147    return static_cast<AArch64TargetStreamer &>(TS);
148  }
149
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162                    bool invertCondCode);
163
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165                      OperandVector &Operands);
166
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveArchExtension(SMLoc L);
169  bool parseDirectiveCPU(SMLoc L);
170  bool parseDirectiveInst(SMLoc L);
171
172  bool parseDirectiveTLSDescCall(SMLoc L);
173
174  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175  bool parseDirectiveLtorg(SMLoc L);
176
177  bool parseDirectiveReq(StringRef Name, SMLoc L);
178  bool parseDirectiveUnreq(SMLoc L);
179  bool parseDirectiveCFINegateRAState();
180  bool parseDirectiveCFIBKeyFrame();
181
182  bool parseDirectiveVariantPCS(SMLoc L);
183
184  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
185                           SmallVectorImpl<SMLoc> &Loc);
186  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
187                               OperandVector &Operands, MCStreamer &Out,
188                               uint64_t &ErrorInfo,
189                               bool MatchingInlineAsm) override;
190/// @name Auto-generated Match Functions
191/// {
192
193#define GET_ASSEMBLER_HEADER
194#include "AArch64GenAsmMatcher.inc"
195
196  /// }
197
198  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
199  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
200                                              RegKind MatchKind);
201  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
202  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
203  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
204  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
205  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
206  template <bool IsSVEPrefetch = false>
207  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
208  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
209  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
210  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
211  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
212  template<bool AddFPZeroAsLiteral>
213  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
214  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
215  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
216  bool tryParseNeonVectorRegister(OperandVector &Operands);
217  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
218  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
219  template <bool ParseShiftExtend,
220            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
221  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
222  template <bool ParseShiftExtend, bool ParseSuffix>
223  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
224  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
225  template <RegKind VectorKind>
226  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
227                                          bool ExpectMatch = false);
228  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
229
230public:
231  enum AArch64MatchResultTy {
232    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
233#define GET_OPERAND_DIAGNOSTIC_TYPES
234#include "AArch64GenAsmMatcher.inc"
235  };
236  bool IsILP32;
237
238  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
239                   const MCInstrInfo &MII, const MCTargetOptions &Options)
240    : MCTargetAsmParser(Options, STI, MII) {
241    IsILP32 = Options.getABIName() == "ilp32";
242    MCAsmParserExtension::Initialize(Parser);
243    MCStreamer &S = getParser().getStreamer();
244    if (S.getTargetStreamer() == nullptr)
245      new AArch64TargetStreamer(S);
246
247    // Alias .hword/.word/.[dx]word to the target-independent
248    // .2byte/.4byte/.8byte directives as they have the same form and
249    // semantics:
250    ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
251    Parser.addAliasForDirective(".hword", ".2byte");
252    Parser.addAliasForDirective(".word", ".4byte");
253    Parser.addAliasForDirective(".dword", ".8byte");
254    Parser.addAliasForDirective(".xword", ".8byte");
255
256    // Initialize the set of available features.
257    setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
258  }
259
260  bool regsEqual(const MCParsedAsmOperand &Op1,
261                 const MCParsedAsmOperand &Op2) const override;
262  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
263                        SMLoc NameLoc, OperandVector &Operands) override;
264  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
265  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
266                                        SMLoc &EndLoc) override;
267  bool ParseDirective(AsmToken DirectiveID) override;
268  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
269                                      unsigned Kind) override;
270
271  static bool classifySymbolRef(const MCExpr *Expr,
272                                AArch64MCExpr::VariantKind &ELFRefKind,
273                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
274                                int64_t &Addend);
275};
276
277/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
278/// instruction.
279class AArch64Operand : public MCParsedAsmOperand {
280private:
281  enum KindTy {
282    k_Immediate,
283    k_ShiftedImm,
284    k_CondCode,
285    k_Register,
286    k_VectorList,
287    k_VectorIndex,
288    k_Token,
289    k_SysReg,
290    k_SysCR,
291    k_Prefetch,
292    k_ShiftExtend,
293    k_FPImm,
294    k_Barrier,
295    k_PSBHint,
296    k_BTIHint,
297  } Kind;
298
299  SMLoc StartLoc, EndLoc;
300
301  struct TokOp {
302    const char *Data;
303    unsigned Length;
304    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
305  };
306
307  // Separate shift/extend operand.
308  struct ShiftExtendOp {
309    AArch64_AM::ShiftExtendType Type;
310    unsigned Amount;
311    bool HasExplicitAmount;
312  };
313
314  struct RegOp {
315    unsigned RegNum;
316    RegKind Kind;
317    int ElementWidth;
318
319    // The register may be allowed as a different register class,
320    // e.g. for GPR64as32 or GPR32as64.
321    RegConstraintEqualityTy EqualityTy;
322
323    // In some cases the shift/extend needs to be explicitly parsed together
324    // with the register, rather than as a separate operand. This is needed
325    // for addressing modes where the instruction as a whole dictates the
326    // scaling/extend, rather than specific bits in the instruction.
327    // By parsing them as a single operand, we avoid the need to pass an
328    // extra operand in all CodeGen patterns (because all operands need to
329    // have an associated value), and we avoid the need to update TableGen to
330    // accept operands that have no associated bits in the instruction.
331    //
332    // An added benefit of parsing them together is that the assembler
333    // can give a sensible diagnostic if the scaling is not correct.
334    //
335    // The default is 'lsl #0' (HasExplicitAmount = false) if no
336    // ShiftExtend is specified.
337    ShiftExtendOp ShiftExtend;
338  };
339
340  struct VectorListOp {
341    unsigned RegNum;
342    unsigned Count;
343    unsigned NumElements;
344    unsigned ElementWidth;
345    RegKind  RegisterKind;
346  };
347
348  struct VectorIndexOp {
349    unsigned Val;
350  };
351
352  struct ImmOp {
353    const MCExpr *Val;
354  };
355
356  struct ShiftedImmOp {
357    const MCExpr *Val;
358    unsigned ShiftAmount;
359  };
360
361  struct CondCodeOp {
362    AArch64CC::CondCode Code;
363  };
364
365  struct FPImmOp {
366    uint64_t Val; // APFloat value bitcasted to uint64_t.
367    bool IsExact; // describes whether parsed value was exact.
368  };
369
370  struct BarrierOp {
371    const char *Data;
372    unsigned Length;
373    unsigned Val; // Not the enum since not all values have names.
374  };
375
376  struct SysRegOp {
377    const char *Data;
378    unsigned Length;
379    uint32_t MRSReg;
380    uint32_t MSRReg;
381    uint32_t PStateField;
382  };
383
384  struct SysCRImmOp {
385    unsigned Val;
386  };
387
388  struct PrefetchOp {
389    const char *Data;
390    unsigned Length;
391    unsigned Val;
392  };
393
394  struct PSBHintOp {
395    const char *Data;
396    unsigned Length;
397    unsigned Val;
398  };
399
400  struct BTIHintOp {
401    const char *Data;
402    unsigned Length;
403    unsigned Val;
404  };
405
406  struct ExtendOp {
407    unsigned Val;
408  };
409
410  union {
411    struct TokOp Tok;
412    struct RegOp Reg;
413    struct VectorListOp VectorList;
414    struct VectorIndexOp VectorIndex;
415    struct ImmOp Imm;
416    struct ShiftedImmOp ShiftedImm;
417    struct CondCodeOp CondCode;
418    struct FPImmOp FPImm;
419    struct BarrierOp Barrier;
420    struct SysRegOp SysReg;
421    struct SysCRImmOp SysCRImm;
422    struct PrefetchOp Prefetch;
423    struct PSBHintOp PSBHint;
424    struct BTIHintOp BTIHint;
425    struct ShiftExtendOp ShiftExtend;
426  };
427
428  // Keep the MCContext around as the MCExprs may need manipulated during
429  // the add<>Operands() calls.
430  MCContext &Ctx;
431
432public:
433  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
434
435  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
436    Kind = o.Kind;
437    StartLoc = o.StartLoc;
438    EndLoc = o.EndLoc;
439    switch (Kind) {
440    case k_Token:
441      Tok = o.Tok;
442      break;
443    case k_Immediate:
444      Imm = o.Imm;
445      break;
446    case k_ShiftedImm:
447      ShiftedImm = o.ShiftedImm;
448      break;
449    case k_CondCode:
450      CondCode = o.CondCode;
451      break;
452    case k_FPImm:
453      FPImm = o.FPImm;
454      break;
455    case k_Barrier:
456      Barrier = o.Barrier;
457      break;
458    case k_Register:
459      Reg = o.Reg;
460      break;
461    case k_VectorList:
462      VectorList = o.VectorList;
463      break;
464    case k_VectorIndex:
465      VectorIndex = o.VectorIndex;
466      break;
467    case k_SysReg:
468      SysReg = o.SysReg;
469      break;
470    case k_SysCR:
471      SysCRImm = o.SysCRImm;
472      break;
473    case k_Prefetch:
474      Prefetch = o.Prefetch;
475      break;
476    case k_PSBHint:
477      PSBHint = o.PSBHint;
478      break;
479    case k_BTIHint:
480      BTIHint = o.BTIHint;
481      break;
482    case k_ShiftExtend:
483      ShiftExtend = o.ShiftExtend;
484      break;
485    }
486  }
487
488  /// getStartLoc - Get the location of the first token of this operand.
489  SMLoc getStartLoc() const override { return StartLoc; }
490  /// getEndLoc - Get the location of the last token of this operand.
491  SMLoc getEndLoc() const override { return EndLoc; }
492
493  StringRef getToken() const {
494    assert(Kind == k_Token && "Invalid access!");
495    return StringRef(Tok.Data, Tok.Length);
496  }
497
498  bool isTokenSuffix() const {
499    assert(Kind == k_Token && "Invalid access!");
500    return Tok.IsSuffix;
501  }
502
503  const MCExpr *getImm() const {
504    assert(Kind == k_Immediate && "Invalid access!");
505    return Imm.Val;
506  }
507
508  const MCExpr *getShiftedImmVal() const {
509    assert(Kind == k_ShiftedImm && "Invalid access!");
510    return ShiftedImm.Val;
511  }
512
513  unsigned getShiftedImmShift() const {
514    assert(Kind == k_ShiftedImm && "Invalid access!");
515    return ShiftedImm.ShiftAmount;
516  }
517
518  AArch64CC::CondCode getCondCode() const {
519    assert(Kind == k_CondCode && "Invalid access!");
520    return CondCode.Code;
521  }
522
523  APFloat getFPImm() const {
524    assert (Kind == k_FPImm && "Invalid access!");
525    return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
526  }
527
528  bool getFPImmIsExact() const {
529    assert (Kind == k_FPImm && "Invalid access!");
530    return FPImm.IsExact;
531  }
532
533  unsigned getBarrier() const {
534    assert(Kind == k_Barrier && "Invalid access!");
535    return Barrier.Val;
536  }
537
538  StringRef getBarrierName() const {
539    assert(Kind == k_Barrier && "Invalid access!");
540    return StringRef(Barrier.Data, Barrier.Length);
541  }
542
543  unsigned getReg() const override {
544    assert(Kind == k_Register && "Invalid access!");
545    return Reg.RegNum;
546  }
547
548  RegConstraintEqualityTy getRegEqualityTy() const {
549    assert(Kind == k_Register && "Invalid access!");
550    return Reg.EqualityTy;
551  }
552
553  unsigned getVectorListStart() const {
554    assert(Kind == k_VectorList && "Invalid access!");
555    return VectorList.RegNum;
556  }
557
558  unsigned getVectorListCount() const {
559    assert(Kind == k_VectorList && "Invalid access!");
560    return VectorList.Count;
561  }
562
563  unsigned getVectorIndex() const {
564    assert(Kind == k_VectorIndex && "Invalid access!");
565    return VectorIndex.Val;
566  }
567
568  StringRef getSysReg() const {
569    assert(Kind == k_SysReg && "Invalid access!");
570    return StringRef(SysReg.Data, SysReg.Length);
571  }
572
573  unsigned getSysCR() const {
574    assert(Kind == k_SysCR && "Invalid access!");
575    return SysCRImm.Val;
576  }
577
578  unsigned getPrefetch() const {
579    assert(Kind == k_Prefetch && "Invalid access!");
580    return Prefetch.Val;
581  }
582
583  unsigned getPSBHint() const {
584    assert(Kind == k_PSBHint && "Invalid access!");
585    return PSBHint.Val;
586  }
587
588  StringRef getPSBHintName() const {
589    assert(Kind == k_PSBHint && "Invalid access!");
590    return StringRef(PSBHint.Data, PSBHint.Length);
591  }
592
593  unsigned getBTIHint() const {
594    assert(Kind == k_BTIHint && "Invalid access!");
595    return BTIHint.Val;
596  }
597
598  StringRef getBTIHintName() const {
599    assert(Kind == k_BTIHint && "Invalid access!");
600    return StringRef(BTIHint.Data, BTIHint.Length);
601  }
602
603  StringRef getPrefetchName() const {
604    assert(Kind == k_Prefetch && "Invalid access!");
605    return StringRef(Prefetch.Data, Prefetch.Length);
606  }
607
608  AArch64_AM::ShiftExtendType getShiftExtendType() const {
609    if (Kind == k_ShiftExtend)
610      return ShiftExtend.Type;
611    if (Kind == k_Register)
612      return Reg.ShiftExtend.Type;
613    llvm_unreachable("Invalid access!");
614  }
615
616  unsigned getShiftExtendAmount() const {
617    if (Kind == k_ShiftExtend)
618      return ShiftExtend.Amount;
619    if (Kind == k_Register)
620      return Reg.ShiftExtend.Amount;
621    llvm_unreachable("Invalid access!");
622  }
623
624  bool hasShiftExtendAmount() const {
625    if (Kind == k_ShiftExtend)
626      return ShiftExtend.HasExplicitAmount;
627    if (Kind == k_Register)
628      return Reg.ShiftExtend.HasExplicitAmount;
629    llvm_unreachable("Invalid access!");
630  }
631
632  bool isImm() const override { return Kind == k_Immediate; }
633  bool isMem() const override { return false; }
634
635  bool isUImm6() const {
636    if (!isImm())
637      return false;
638    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
639    if (!MCE)
640      return false;
641    int64_t Val = MCE->getValue();
642    return (Val >= 0 && Val < 64);
643  }
644
645  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
646
647  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
648    return isImmScaled<Bits, Scale>(true);
649  }
650
651  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
652    return isImmScaled<Bits, Scale>(false);
653  }
654
655  template <int Bits, int Scale>
656  DiagnosticPredicate isImmScaled(bool Signed) const {
657    if (!isImm())
658      return DiagnosticPredicateTy::NoMatch;
659
660    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
661    if (!MCE)
662      return DiagnosticPredicateTy::NoMatch;
663
664    int64_t MinVal, MaxVal;
665    if (Signed) {
666      int64_t Shift = Bits - 1;
667      MinVal = (int64_t(1) << Shift) * -Scale;
668      MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
669    } else {
670      MinVal = 0;
671      MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
672    }
673
674    int64_t Val = MCE->getValue();
675    if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
676      return DiagnosticPredicateTy::Match;
677
678    return DiagnosticPredicateTy::NearMatch;
679  }
680
681  DiagnosticPredicate isSVEPattern() const {
682    if (!isImm())
683      return DiagnosticPredicateTy::NoMatch;
684    auto *MCE = dyn_cast<MCConstantExpr>(getImm());
685    if (!MCE)
686      return DiagnosticPredicateTy::NoMatch;
687    int64_t Val = MCE->getValue();
688    if (Val >= 0 && Val < 32)
689      return DiagnosticPredicateTy::Match;
690    return DiagnosticPredicateTy::NearMatch;
691  }
692
693  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
694    AArch64MCExpr::VariantKind ELFRefKind;
695    MCSymbolRefExpr::VariantKind DarwinRefKind;
696    int64_t Addend;
697    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
698                                           Addend)) {
699      // If we don't understand the expression, assume the best and
700      // let the fixup and relocation code deal with it.
701      return true;
702    }
703
704    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
705        ELFRefKind == AArch64MCExpr::VK_LO12 ||
706        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
707        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
708        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
709        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
710        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
711        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
712        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
713        ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
714        ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
715      // Note that we don't range-check the addend. It's adjusted modulo page
716      // size when converted, so there is no "out of range" condition when using
717      // @pageoff.
718      return true;
719    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
720               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
721      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
722      return Addend == 0;
723    }
724
725    return false;
726  }
727
728  template <int Scale> bool isUImm12Offset() const {
729    if (!isImm())
730      return false;
731
732    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
733    if (!MCE)
734      return isSymbolicUImm12Offset(getImm());
735
736    int64_t Val = MCE->getValue();
737    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
738  }
739
740  template <int N, int M>
741  bool isImmInRange() const {
742    if (!isImm())
743      return false;
744    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
745    if (!MCE)
746      return false;
747    int64_t Val = MCE->getValue();
748    return (Val >= N && Val <= M);
749  }
750
751  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
752  // a logical immediate can always be represented when inverted.
753  template <typename T>
754  bool isLogicalImm() const {
755    if (!isImm())
756      return false;
757    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
758    if (!MCE)
759      return false;
760
761    int64_t Val = MCE->getValue();
762    // Avoid left shift by 64 directly.
763    uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
764    // Allow all-0 or all-1 in top bits to permit bitwise NOT.
765    if ((Val & Upper) && (Val & Upper) != Upper)
766      return false;
767
768    return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
769  }
770
771  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
772
773  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
774  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
775  /// immediate that can be shifted by 'Shift'.
776  template <unsigned Width>
777  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
778    if (isShiftedImm() && Width == getShiftedImmShift())
779      if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
780        return std::make_pair(CE->getValue(), Width);
781
782    if (isImm())
783      if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
784        int64_t Val = CE->getValue();
785        if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
786          return std::make_pair(Val >> Width, Width);
787        else
788          return std::make_pair(Val, 0u);
789      }
790
791    return {};
792  }
793
794  bool isAddSubImm() const {
795    if (!isShiftedImm() && !isImm())
796      return false;
797
798    const MCExpr *Expr;
799
800    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
801    if (isShiftedImm()) {
802      unsigned Shift = ShiftedImm.ShiftAmount;
803      Expr = ShiftedImm.Val;
804      if (Shift != 0 && Shift != 12)
805        return false;
806    } else {
807      Expr = getImm();
808    }
809
810    AArch64MCExpr::VariantKind ELFRefKind;
811    MCSymbolRefExpr::VariantKind DarwinRefKind;
812    int64_t Addend;
813    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
814                                          DarwinRefKind, Addend)) {
815      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
816          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
817          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
818          || ELFRefKind == AArch64MCExpr::VK_LO12
819          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
820          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
821          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
822          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
823          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
824          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
825          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
826          || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
827          || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
828    }
829
830    // If it's a constant, it should be a real immediate in range.
831    if (auto ShiftedVal = getShiftedVal<12>())
832      return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
833
834    // If it's an expression, we hope for the best and let the fixup/relocation
835    // code deal with it.
836    return true;
837  }
838
839  bool isAddSubImmNeg() const {
840    if (!isShiftedImm() && !isImm())
841      return false;
842
843    // Otherwise it should be a real negative immediate in range.
844    if (auto ShiftedVal = getShiftedVal<12>())
845      return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
846
847    return false;
848  }
849
850  // Signed value in the range -128 to +127. For element widths of
851  // 16 bits or higher it may also be a signed multiple of 256 in the
852  // range -32768 to +32512.
853  // For element-width of 8 bits a range of -128 to 255 is accepted,
854  // since a copy of a byte can be either signed/unsigned.
855  template <typename T>
856  DiagnosticPredicate isSVECpyImm() const {
857    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
858      return DiagnosticPredicateTy::NoMatch;
859
860    bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value;
861    if (auto ShiftedImm = getShiftedVal<8>())
862      if (!(IsByte && ShiftedImm->second) &&
863          AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
864                                     << ShiftedImm->second))
865        return DiagnosticPredicateTy::Match;
866
867    return DiagnosticPredicateTy::NearMatch;
868  }
869
870  // Unsigned value in the range 0 to 255. For element widths of
871  // 16 bits or higher it may also be a signed multiple of 256 in the
872  // range 0 to 65280.
873  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
874    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
875      return DiagnosticPredicateTy::NoMatch;
876
877    bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value;
878    if (auto ShiftedImm = getShiftedVal<8>())
879      if (!(IsByte && ShiftedImm->second) &&
880          AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
881                                        << ShiftedImm->second))
882        return DiagnosticPredicateTy::Match;
883
884    return DiagnosticPredicateTy::NearMatch;
885  }
886
887  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
888    if (isLogicalImm<T>() && !isSVECpyImm<T>())
889      return DiagnosticPredicateTy::Match;
890    return DiagnosticPredicateTy::NoMatch;
891  }
892
893  bool isCondCode() const { return Kind == k_CondCode; }
894
895  bool isSIMDImmType10() const {
896    if (!isImm())
897      return false;
898    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
899    if (!MCE)
900      return false;
901    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
902  }
903
904  template<int N>
905  bool isBranchTarget() const {
906    if (!isImm())
907      return false;
908    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
909    if (!MCE)
910      return true;
911    int64_t Val = MCE->getValue();
912    if (Val & 0x3)
913      return false;
914    assert(N > 0 && "Branch target immediate cannot be 0 bits!");
915    return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
916  }
917
918  bool
919  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
920    if (!isImm())
921      return false;
922
923    AArch64MCExpr::VariantKind ELFRefKind;
924    MCSymbolRefExpr::VariantKind DarwinRefKind;
925    int64_t Addend;
926    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
927                                             DarwinRefKind, Addend)) {
928      return false;
929    }
930    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
931      return false;
932
933    for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
934      if (ELFRefKind == AllowedModifiers[i])
935        return true;
936    }
937
938    return false;
939  }
940
941  bool isMovWSymbolG3() const {
942    return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
943  }
944
945  bool isMovWSymbolG2() const {
946    return isMovWSymbol(
947        {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
948         AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
949         AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
950         AArch64MCExpr::VK_DTPREL_G2});
951  }
952
953  bool isMovWSymbolG1() const {
954    return isMovWSymbol(
955        {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
956         AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
957         AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
958         AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
959         AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
960  }
961
962  bool isMovWSymbolG0() const {
963    return isMovWSymbol(
964        {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
965         AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
966         AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
967         AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
968         AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
969  }
970
971  template<int RegWidth, int Shift>
972  bool isMOVZMovAlias() const {
973    if (!isImm()) return false;
974
975    const MCExpr *E = getImm();
976    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
977      uint64_t Value = CE->getValue();
978
979      return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
980    }
981    // Only supports the case of Shift being 0 if an expression is used as an
982    // operand
983    return !Shift && E;
984  }
985
986  template<int RegWidth, int Shift>
987  bool isMOVNMovAlias() const {
988    if (!isImm()) return false;
989
990    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
991    if (!CE) return false;
992    uint64_t Value = CE->getValue();
993
994    return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
995  }
996
997  bool isFPImm() const {
998    return Kind == k_FPImm &&
999           AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1000  }
1001
1002  bool isBarrier() const { return Kind == k_Barrier; }
1003  bool isSysReg() const { return Kind == k_SysReg; }
1004
1005  bool isMRSSystemRegister() const {
1006    if (!isSysReg()) return false;
1007
1008    return SysReg.MRSReg != -1U;
1009  }
1010
1011  bool isMSRSystemRegister() const {
1012    if (!isSysReg()) return false;
1013    return SysReg.MSRReg != -1U;
1014  }
1015
1016  bool isSystemPStateFieldWithImm0_1() const {
1017    if (!isSysReg()) return false;
1018    return (SysReg.PStateField == AArch64PState::PAN ||
1019            SysReg.PStateField == AArch64PState::DIT ||
1020            SysReg.PStateField == AArch64PState::UAO ||
1021            SysReg.PStateField == AArch64PState::SSBS);
1022  }
1023
1024  bool isSystemPStateFieldWithImm0_15() const {
1025    if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1026    return SysReg.PStateField != -1U;
1027  }
1028
1029  bool isReg() const override {
1030    return Kind == k_Register;
1031  }
1032
1033  bool isScalarReg() const {
1034    return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1035  }
1036
1037  bool isNeonVectorReg() const {
1038    return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1039  }
1040
1041  bool isNeonVectorRegLo() const {
1042    return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1043           (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1044                Reg.RegNum) ||
1045            AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1046                Reg.RegNum));
1047  }
1048
1049  template <unsigned Class> bool isSVEVectorReg() const {
1050    RegKind RK;
1051    switch (Class) {
1052    case AArch64::ZPRRegClassID:
1053    case AArch64::ZPR_3bRegClassID:
1054    case AArch64::ZPR_4bRegClassID:
1055      RK = RegKind::SVEDataVector;
1056      break;
1057    case AArch64::PPRRegClassID:
1058    case AArch64::PPR_3bRegClassID:
1059      RK = RegKind::SVEPredicateVector;
1060      break;
1061    default:
1062      llvm_unreachable("Unsupport register class");
1063    }
1064
1065    return (Kind == k_Register && Reg.Kind == RK) &&
1066           AArch64MCRegisterClasses[Class].contains(getReg());
1067  }
1068
1069  template <unsigned Class> bool isFPRasZPR() const {
1070    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1071           AArch64MCRegisterClasses[Class].contains(getReg());
1072  }
1073
1074  template <int ElementWidth, unsigned Class>
1075  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1076    if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1077      return DiagnosticPredicateTy::NoMatch;
1078
1079    if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1080      return DiagnosticPredicateTy::Match;
1081
1082    return DiagnosticPredicateTy::NearMatch;
1083  }
1084
1085  template <int ElementWidth, unsigned Class>
1086  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1087    if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1088      return DiagnosticPredicateTy::NoMatch;
1089
1090    if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1091      return DiagnosticPredicateTy::Match;
1092
1093    return DiagnosticPredicateTy::NearMatch;
1094  }
1095
1096  template <int ElementWidth, unsigned Class,
1097            AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1098            bool ShiftWidthAlwaysSame>
1099  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1100    auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1101    if (!VectorMatch.isMatch())
1102      return DiagnosticPredicateTy::NoMatch;
1103
1104    // Give a more specific diagnostic when the user has explicitly typed in
1105    // a shift-amount that does not match what is expected, but for which
1106    // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1107    bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1108    if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1109                        ShiftExtendTy == AArch64_AM::SXTW) &&
1110        !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1111      return DiagnosticPredicateTy::NoMatch;
1112
1113    if (MatchShift && ShiftExtendTy == getShiftExtendType())
1114      return DiagnosticPredicateTy::Match;
1115
1116    return DiagnosticPredicateTy::NearMatch;
1117  }
1118
1119  bool isGPR32as64() const {
1120    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1121      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1122  }
1123
1124  bool isGPR64as32() const {
1125    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1126      AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1127  }
1128
1129  bool isWSeqPair() const {
1130    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1131           AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1132               Reg.RegNum);
1133  }
1134
1135  bool isXSeqPair() const {
1136    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1137           AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1138               Reg.RegNum);
1139  }
1140
1141  template<int64_t Angle, int64_t Remainder>
1142  DiagnosticPredicate isComplexRotation() const {
1143    if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1144
1145    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1146    if (!CE) return DiagnosticPredicateTy::NoMatch;
1147    uint64_t Value = CE->getValue();
1148
1149    if (Value % Angle == Remainder && Value <= 270)
1150      return DiagnosticPredicateTy::Match;
1151    return DiagnosticPredicateTy::NearMatch;
1152  }
1153
1154  template <unsigned RegClassID> bool isGPR64() const {
1155    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1156           AArch64MCRegisterClasses[RegClassID].contains(getReg());
1157  }
1158
1159  template <unsigned RegClassID, int ExtWidth>
1160  DiagnosticPredicate isGPR64WithShiftExtend() const {
1161    if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1162      return DiagnosticPredicateTy::NoMatch;
1163
1164    if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1165        getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1166      return DiagnosticPredicateTy::Match;
1167    return DiagnosticPredicateTy::NearMatch;
1168  }
1169
1170  /// Is this a vector list with the type implicit (presumably attached to the
1171  /// instruction itself)?
1172  template <RegKind VectorKind, unsigned NumRegs>
1173  bool isImplicitlyTypedVectorList() const {
1174    return Kind == k_VectorList && VectorList.Count == NumRegs &&
1175           VectorList.NumElements == 0 &&
1176           VectorList.RegisterKind == VectorKind;
1177  }
1178
1179  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1180            unsigned ElementWidth>
1181  bool isTypedVectorList() const {
1182    if (Kind != k_VectorList)
1183      return false;
1184    if (VectorList.Count != NumRegs)
1185      return false;
1186    if (VectorList.RegisterKind != VectorKind)
1187      return false;
1188    if (VectorList.ElementWidth != ElementWidth)
1189      return false;
1190    return VectorList.NumElements == NumElements;
1191  }
1192
1193  template <int Min, int Max>
1194  DiagnosticPredicate isVectorIndex() const {
1195    if (Kind != k_VectorIndex)
1196      return DiagnosticPredicateTy::NoMatch;
1197    if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1198      return DiagnosticPredicateTy::Match;
1199    return DiagnosticPredicateTy::NearMatch;
1200  }
1201
1202  bool isToken() const override { return Kind == k_Token; }
1203
1204  bool isTokenEqual(StringRef Str) const {
1205    return Kind == k_Token && getToken() == Str;
1206  }
1207  bool isSysCR() const { return Kind == k_SysCR; }
1208  bool isPrefetch() const { return Kind == k_Prefetch; }
1209  bool isPSBHint() const { return Kind == k_PSBHint; }
1210  bool isBTIHint() const { return Kind == k_BTIHint; }
1211  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1212  bool isShifter() const {
1213    if (!isShiftExtend())
1214      return false;
1215
1216    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1217    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1218            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1219            ST == AArch64_AM::MSL);
1220  }
1221
1222  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1223    if (Kind != k_FPImm)
1224      return DiagnosticPredicateTy::NoMatch;
1225
1226    if (getFPImmIsExact()) {
1227      // Lookup the immediate from table of supported immediates.
1228      auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1229      assert(Desc && "Unknown enum value");
1230
1231      // Calculate its FP value.
1232      APFloat RealVal(APFloat::IEEEdouble());
1233      auto StatusOrErr =
1234          RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1235      if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1236        llvm_unreachable("FP immediate is not exact");
1237
1238      if (getFPImm().bitwiseIsEqual(RealVal))
1239        return DiagnosticPredicateTy::Match;
1240    }
1241
1242    return DiagnosticPredicateTy::NearMatch;
1243  }
1244
1245  template <unsigned ImmA, unsigned ImmB>
1246  DiagnosticPredicate isExactFPImm() const {
1247    DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1248    if ((Res = isExactFPImm<ImmA>()))
1249      return DiagnosticPredicateTy::Match;
1250    if ((Res = isExactFPImm<ImmB>()))
1251      return DiagnosticPredicateTy::Match;
1252    return Res;
1253  }
1254
1255  bool isExtend() const {
1256    if (!isShiftExtend())
1257      return false;
1258
1259    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1260    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1261            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1262            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1263            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1264            ET == AArch64_AM::LSL) &&
1265           getShiftExtendAmount() <= 4;
1266  }
1267
1268  bool isExtend64() const {
1269    if (!isExtend())
1270      return false;
1271    // Make sure the extend expects a 32-bit source register.
1272    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1273    return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1274           ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1275           ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1276  }
1277
1278  bool isExtendLSL64() const {
1279    if (!isExtend())
1280      return false;
1281    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1282    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1283            ET == AArch64_AM::LSL) &&
1284           getShiftExtendAmount() <= 4;
1285  }
1286
1287  template<int Width> bool isMemXExtend() const {
1288    if (!isExtend())
1289      return false;
1290    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1291    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1292           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1293            getShiftExtendAmount() == 0);
1294  }
1295
1296  template<int Width> bool isMemWExtend() const {
1297    if (!isExtend())
1298      return false;
1299    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1300    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1301           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1302            getShiftExtendAmount() == 0);
1303  }
1304
1305  template <unsigned width>
1306  bool isArithmeticShifter() const {
1307    if (!isShifter())
1308      return false;
1309
1310    // An arithmetic shifter is LSL, LSR, or ASR.
1311    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1312    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1313            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1314  }
1315
1316  template <unsigned width>
1317  bool isLogicalShifter() const {
1318    if (!isShifter())
1319      return false;
1320
1321    // A logical shifter is LSL, LSR, ASR or ROR.
1322    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1323    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1324            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1325           getShiftExtendAmount() < width;
1326  }
1327
1328  bool isMovImm32Shifter() const {
1329    if (!isShifter())
1330      return false;
1331
1332    // A MOVi shifter is LSL of 0, 16, 32, or 48.
1333    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1334    if (ST != AArch64_AM::LSL)
1335      return false;
1336    uint64_t Val = getShiftExtendAmount();
1337    return (Val == 0 || Val == 16);
1338  }
1339
1340  bool isMovImm64Shifter() const {
1341    if (!isShifter())
1342      return false;
1343
1344    // A MOVi shifter is LSL of 0 or 16.
1345    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1346    if (ST != AArch64_AM::LSL)
1347      return false;
1348    uint64_t Val = getShiftExtendAmount();
1349    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1350  }
1351
1352  bool isLogicalVecShifter() const {
1353    if (!isShifter())
1354      return false;
1355
1356    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1357    unsigned Shift = getShiftExtendAmount();
1358    return getShiftExtendType() == AArch64_AM::LSL &&
1359           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1360  }
1361
1362  bool isLogicalVecHalfWordShifter() const {
1363    if (!isLogicalVecShifter())
1364      return false;
1365
1366    // A logical vector shifter is a left shift by 0 or 8.
1367    unsigned Shift = getShiftExtendAmount();
1368    return getShiftExtendType() == AArch64_AM::LSL &&
1369           (Shift == 0 || Shift == 8);
1370  }
1371
1372  bool isMoveVecShifter() const {
1373    if (!isShiftExtend())
1374      return false;
1375
1376    // A logical vector shifter is a left shift by 8 or 16.
1377    unsigned Shift = getShiftExtendAmount();
1378    return getShiftExtendType() == AArch64_AM::MSL &&
1379           (Shift == 8 || Shift == 16);
1380  }
1381
1382  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1383  // to LDUR/STUR when the offset is not legal for the former but is for
1384  // the latter. As such, in addition to checking for being a legal unscaled
1385  // address, also check that it is not a legal scaled address. This avoids
1386  // ambiguity in the matcher.
1387  template<int Width>
1388  bool isSImm9OffsetFB() const {
1389    return isSImm<9>() && !isUImm12Offset<Width / 8>();
1390  }
1391
1392  bool isAdrpLabel() const {
1393    // Validation was handled during parsing, so we just sanity check that
1394    // something didn't go haywire.
1395    if (!isImm())
1396        return false;
1397
1398    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1399      int64_t Val = CE->getValue();
1400      int64_t Min = - (4096 * (1LL << (21 - 1)));
1401      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1402      return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1403    }
1404
1405    return true;
1406  }
1407
1408  bool isAdrLabel() const {
1409    // Validation was handled during parsing, so we just sanity check that
1410    // something didn't go haywire.
1411    if (!isImm())
1412        return false;
1413
1414    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1415      int64_t Val = CE->getValue();
1416      int64_t Min = - (1LL << (21 - 1));
1417      int64_t Max = ((1LL << (21 - 1)) - 1);
1418      return Val >= Min && Val <= Max;
1419    }
1420
1421    return true;
1422  }
1423
1424  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1425    // Add as immediates when possible.  Null MCExpr = 0.
1426    if (!Expr)
1427      Inst.addOperand(MCOperand::createImm(0));
1428    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1429      Inst.addOperand(MCOperand::createImm(CE->getValue()));
1430    else
1431      Inst.addOperand(MCOperand::createExpr(Expr));
1432  }
1433
1434  void addRegOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 1 && "Invalid number of operands!");
1436    Inst.addOperand(MCOperand::createReg(getReg()));
1437  }
1438
1439  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1440    assert(N == 1 && "Invalid number of operands!");
1441    assert(
1442        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1443
1444    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1445    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1446        RI->getEncodingValue(getReg()));
1447
1448    Inst.addOperand(MCOperand::createReg(Reg));
1449  }
1450
1451  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    assert(
1454        AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1455
1456    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1457    uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1458        RI->getEncodingValue(getReg()));
1459
1460    Inst.addOperand(MCOperand::createReg(Reg));
1461  }
1462
1463  template <int Width>
1464  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1465    unsigned Base;
1466    switch (Width) {
1467    case 8:   Base = AArch64::B0; break;
1468    case 16:  Base = AArch64::H0; break;
1469    case 32:  Base = AArch64::S0; break;
1470    case 64:  Base = AArch64::D0; break;
1471    case 128: Base = AArch64::Q0; break;
1472    default:
1473      llvm_unreachable("Unsupported width");
1474    }
1475    Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1476  }
1477
1478  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    assert(
1481        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1482    Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1483  }
1484
1485  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1486    assert(N == 1 && "Invalid number of operands!");
1487    assert(
1488        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1489    Inst.addOperand(MCOperand::createReg(getReg()));
1490  }
1491
1492  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1493    assert(N == 1 && "Invalid number of operands!");
1494    Inst.addOperand(MCOperand::createReg(getReg()));
1495  }
1496
1497  enum VecListIndexType {
1498    VecListIdx_DReg = 0,
1499    VecListIdx_QReg = 1,
1500    VecListIdx_ZReg = 2,
1501  };
1502
1503  template <VecListIndexType RegTy, unsigned NumRegs>
1504  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1505    assert(N == 1 && "Invalid number of operands!");
1506    static const unsigned FirstRegs[][5] = {
1507      /* DReg */ { AArch64::Q0,
1508                   AArch64::D0,       AArch64::D0_D1,
1509                   AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1510      /* QReg */ { AArch64::Q0,
1511                   AArch64::Q0,       AArch64::Q0_Q1,
1512                   AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1513      /* ZReg */ { AArch64::Z0,
1514                   AArch64::Z0,       AArch64::Z0_Z1,
1515                   AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1516    };
1517
1518    assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1519           " NumRegs must be <= 4 for ZRegs");
1520
1521    unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1522    Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1523                                         FirstRegs[(unsigned)RegTy][0]));
1524  }
1525
1526  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1527    assert(N == 1 && "Invalid number of operands!");
1528    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1529  }
1530
1531  template <unsigned ImmIs0, unsigned ImmIs1>
1532  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1533    assert(N == 1 && "Invalid number of operands!");
1534    assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1535    Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1536  }
1537
1538  void addImmOperands(MCInst &Inst, unsigned N) const {
1539    assert(N == 1 && "Invalid number of operands!");
1540    // If this is a pageoff symrefexpr with an addend, adjust the addend
1541    // to be only the page-offset portion. Otherwise, just add the expr
1542    // as-is.
1543    addExpr(Inst, getImm());
1544  }
1545
1546  template <int Shift>
1547  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1548    assert(N == 2 && "Invalid number of operands!");
1549    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1550      Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1551      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1552    } else if (isShiftedImm()) {
1553      addExpr(Inst, getShiftedImmVal());
1554      Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1555    } else {
1556      addExpr(Inst, getImm());
1557      Inst.addOperand(MCOperand::createImm(0));
1558    }
1559  }
1560
1561  template <int Shift>
1562  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1563    assert(N == 2 && "Invalid number of operands!");
1564    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1565      Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1566      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1567    } else
1568      llvm_unreachable("Not a shifted negative immediate");
1569  }
1570
1571  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1572    assert(N == 1 && "Invalid number of operands!");
1573    Inst.addOperand(MCOperand::createImm(getCondCode()));
1574  }
1575
1576  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1577    assert(N == 1 && "Invalid number of operands!");
1578    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1579    if (!MCE)
1580      addExpr(Inst, getImm());
1581    else
1582      Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1583  }
1584
1585  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1586    addImmOperands(Inst, N);
1587  }
1588
1589  template<int Scale>
1590  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1591    assert(N == 1 && "Invalid number of operands!");
1592    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1593
1594    if (!MCE) {
1595      Inst.addOperand(MCOperand::createExpr(getImm()));
1596      return;
1597    }
1598    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1599  }
1600
1601  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1602    assert(N == 1 && "Invalid number of operands!");
1603    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1604    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1605  }
1606
1607  template <int Scale>
1608  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1609    assert(N == 1 && "Invalid number of operands!");
1610    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1611    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1612  }
1613
1614  template <typename T>
1615  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1616    assert(N == 1 && "Invalid number of operands!");
1617    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1618    std::make_unsigned_t<T> Val = MCE->getValue();
1619    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1620    Inst.addOperand(MCOperand::createImm(encoding));
1621  }
1622
1623  template <typename T>
1624  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1625    assert(N == 1 && "Invalid number of operands!");
1626    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1627    std::make_unsigned_t<T> Val = ~MCE->getValue();
1628    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1629    Inst.addOperand(MCOperand::createImm(encoding));
1630  }
1631
1632  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1633    assert(N == 1 && "Invalid number of operands!");
1634    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1635    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1636    Inst.addOperand(MCOperand::createImm(encoding));
1637  }
1638
1639  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1640    // Branch operands don't encode the low bits, so shift them off
1641    // here. If it's a label, however, just put it on directly as there's
1642    // not enough information now to do anything.
1643    assert(N == 1 && "Invalid number of operands!");
1644    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1645    if (!MCE) {
1646      addExpr(Inst, getImm());
1647      return;
1648    }
1649    assert(MCE && "Invalid constant immediate operand!");
1650    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1651  }
1652
1653  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1654    // Branch operands don't encode the low bits, so shift them off
1655    // here. If it's a label, however, just put it on directly as there's
1656    // not enough information now to do anything.
1657    assert(N == 1 && "Invalid number of operands!");
1658    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1659    if (!MCE) {
1660      addExpr(Inst, getImm());
1661      return;
1662    }
1663    assert(MCE && "Invalid constant immediate operand!");
1664    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1665  }
1666
1667  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1668    // Branch operands don't encode the low bits, so shift them off
1669    // here. If it's a label, however, just put it on directly as there's
1670    // not enough information now to do anything.
1671    assert(N == 1 && "Invalid number of operands!");
1672    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1673    if (!MCE) {
1674      addExpr(Inst, getImm());
1675      return;
1676    }
1677    assert(MCE && "Invalid constant immediate operand!");
1678    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1679  }
1680
1681  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1682    assert(N == 1 && "Invalid number of operands!");
1683    Inst.addOperand(MCOperand::createImm(
1684        AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1685  }
1686
1687  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1688    assert(N == 1 && "Invalid number of operands!");
1689    Inst.addOperand(MCOperand::createImm(getBarrier()));
1690  }
1691
1692  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 1 && "Invalid number of operands!");
1694
1695    Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1696  }
1697
1698  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1699    assert(N == 1 && "Invalid number of operands!");
1700
1701    Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1702  }
1703
1704  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1705    assert(N == 1 && "Invalid number of operands!");
1706
1707    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1708  }
1709
1710  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1711    assert(N == 1 && "Invalid number of operands!");
1712
1713    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1714  }
1715
1716  void addSysCROperands(MCInst &Inst, unsigned N) const {
1717    assert(N == 1 && "Invalid number of operands!");
1718    Inst.addOperand(MCOperand::createImm(getSysCR()));
1719  }
1720
1721  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1722    assert(N == 1 && "Invalid number of operands!");
1723    Inst.addOperand(MCOperand::createImm(getPrefetch()));
1724  }
1725
1726  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1727    assert(N == 1 && "Invalid number of operands!");
1728    Inst.addOperand(MCOperand::createImm(getPSBHint()));
1729  }
1730
1731  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1732    assert(N == 1 && "Invalid number of operands!");
1733    Inst.addOperand(MCOperand::createImm(getBTIHint()));
1734  }
1735
1736  void addShifterOperands(MCInst &Inst, unsigned N) const {
1737    assert(N == 1 && "Invalid number of operands!");
1738    unsigned Imm =
1739        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1740    Inst.addOperand(MCOperand::createImm(Imm));
1741  }
1742
1743  void addExtendOperands(MCInst &Inst, unsigned N) const {
1744    assert(N == 1 && "Invalid number of operands!");
1745    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1746    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1747    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1748    Inst.addOperand(MCOperand::createImm(Imm));
1749  }
1750
1751  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1752    assert(N == 1 && "Invalid number of operands!");
1753    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1754    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1755    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1756    Inst.addOperand(MCOperand::createImm(Imm));
1757  }
1758
1759  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1762    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1763    Inst.addOperand(MCOperand::createImm(IsSigned));
1764    Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1765  }
1766
1767  // For 8-bit load/store instructions with a register offset, both the
1768  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1769  // they're disambiguated by whether the shift was explicit or implicit rather
1770  // than its size.
1771  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1772    assert(N == 2 && "Invalid number of operands!");
1773    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1774    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1775    Inst.addOperand(MCOperand::createImm(IsSigned));
1776    Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1777  }
1778
1779  template<int Shift>
1780  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782
1783    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1784    if (CE) {
1785      uint64_t Value = CE->getValue();
1786      Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1787    } else {
1788      addExpr(Inst, getImm());
1789    }
1790  }
1791
1792  template<int Shift>
1793  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 1 && "Invalid number of operands!");
1795
1796    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1797    uint64_t Value = CE->getValue();
1798    Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1799  }
1800
1801  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1802    assert(N == 1 && "Invalid number of operands!");
1803    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1804    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1805  }
1806
1807  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1808    assert(N == 1 && "Invalid number of operands!");
1809    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1810    Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1811  }
1812
1813  void print(raw_ostream &OS) const override;
1814
1815  static std::unique_ptr<AArch64Operand>
1816  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1817    auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1818    Op->Tok.Data = Str.data();
1819    Op->Tok.Length = Str.size();
1820    Op->Tok.IsSuffix = IsSuffix;
1821    Op->StartLoc = S;
1822    Op->EndLoc = S;
1823    return Op;
1824  }
1825
1826  static std::unique_ptr<AArch64Operand>
1827  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1828            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1829            AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1830            unsigned ShiftAmount = 0,
1831            unsigned HasExplicitAmount = false) {
1832    auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1833    Op->Reg.RegNum = RegNum;
1834    Op->Reg.Kind = Kind;
1835    Op->Reg.ElementWidth = 0;
1836    Op->Reg.EqualityTy = EqTy;
1837    Op->Reg.ShiftExtend.Type = ExtTy;
1838    Op->Reg.ShiftExtend.Amount = ShiftAmount;
1839    Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1840    Op->StartLoc = S;
1841    Op->EndLoc = E;
1842    return Op;
1843  }
1844
1845  static std::unique_ptr<AArch64Operand>
1846  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1847                  SMLoc S, SMLoc E, MCContext &Ctx,
1848                  AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1849                  unsigned ShiftAmount = 0,
1850                  unsigned HasExplicitAmount = false) {
1851    assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1852            Kind == RegKind::SVEPredicateVector) &&
1853           "Invalid vector kind");
1854    auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1855                        HasExplicitAmount);
1856    Op->Reg.ElementWidth = ElementWidth;
1857    return Op;
1858  }
1859
1860  static std::unique_ptr<AArch64Operand>
1861  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1862                   unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1863                   MCContext &Ctx) {
1864    auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1865    Op->VectorList.RegNum = RegNum;
1866    Op->VectorList.Count = Count;
1867    Op->VectorList.NumElements = NumElements;
1868    Op->VectorList.ElementWidth = ElementWidth;
1869    Op->VectorList.RegisterKind = RegisterKind;
1870    Op->StartLoc = S;
1871    Op->EndLoc = E;
1872    return Op;
1873  }
1874
1875  static std::unique_ptr<AArch64Operand>
1876  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1877    auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1878    Op->VectorIndex.Val = Idx;
1879    Op->StartLoc = S;
1880    Op->EndLoc = E;
1881    return Op;
1882  }
1883
1884  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1885                                                   SMLoc E, MCContext &Ctx) {
1886    auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1887    Op->Imm.Val = Val;
1888    Op->StartLoc = S;
1889    Op->EndLoc = E;
1890    return Op;
1891  }
1892
1893  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1894                                                          unsigned ShiftAmount,
1895                                                          SMLoc S, SMLoc E,
1896                                                          MCContext &Ctx) {
1897    auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1898    Op->ShiftedImm .Val = Val;
1899    Op->ShiftedImm.ShiftAmount = ShiftAmount;
1900    Op->StartLoc = S;
1901    Op->EndLoc = E;
1902    return Op;
1903  }
1904
1905  static std::unique_ptr<AArch64Operand>
1906  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1907    auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1908    Op->CondCode.Code = Code;
1909    Op->StartLoc = S;
1910    Op->EndLoc = E;
1911    return Op;
1912  }
1913
1914  static std::unique_ptr<AArch64Operand>
1915  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1916    auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1917    Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1918    Op->FPImm.IsExact = IsExact;
1919    Op->StartLoc = S;
1920    Op->EndLoc = S;
1921    return Op;
1922  }
1923
1924  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1925                                                       StringRef Str,
1926                                                       SMLoc S,
1927                                                       MCContext &Ctx) {
1928    auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1929    Op->Barrier.Val = Val;
1930    Op->Barrier.Data = Str.data();
1931    Op->Barrier.Length = Str.size();
1932    Op->StartLoc = S;
1933    Op->EndLoc = S;
1934    return Op;
1935  }
1936
1937  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1938                                                      uint32_t MRSReg,
1939                                                      uint32_t MSRReg,
1940                                                      uint32_t PStateField,
1941                                                      MCContext &Ctx) {
1942    auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1943    Op->SysReg.Data = Str.data();
1944    Op->SysReg.Length = Str.size();
1945    Op->SysReg.MRSReg = MRSReg;
1946    Op->SysReg.MSRReg = MSRReg;
1947    Op->SysReg.PStateField = PStateField;
1948    Op->StartLoc = S;
1949    Op->EndLoc = S;
1950    return Op;
1951  }
1952
1953  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1954                                                     SMLoc E, MCContext &Ctx) {
1955    auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1956    Op->SysCRImm.Val = Val;
1957    Op->StartLoc = S;
1958    Op->EndLoc = E;
1959    return Op;
1960  }
1961
1962  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1963                                                        StringRef Str,
1964                                                        SMLoc S,
1965                                                        MCContext &Ctx) {
1966    auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1967    Op->Prefetch.Val = Val;
1968    Op->Barrier.Data = Str.data();
1969    Op->Barrier.Length = Str.size();
1970    Op->StartLoc = S;
1971    Op->EndLoc = S;
1972    return Op;
1973  }
1974
1975  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1976                                                       StringRef Str,
1977                                                       SMLoc S,
1978                                                       MCContext &Ctx) {
1979    auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
1980    Op->PSBHint.Val = Val;
1981    Op->PSBHint.Data = Str.data();
1982    Op->PSBHint.Length = Str.size();
1983    Op->StartLoc = S;
1984    Op->EndLoc = S;
1985    return Op;
1986  }
1987
1988  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1989                                                       StringRef Str,
1990                                                       SMLoc S,
1991                                                       MCContext &Ctx) {
1992    auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
1993    Op->BTIHint.Val = Val << 1 | 32;
1994    Op->BTIHint.Data = Str.data();
1995    Op->BTIHint.Length = Str.size();
1996    Op->StartLoc = S;
1997    Op->EndLoc = S;
1998    return Op;
1999  }
2000
2001  static std::unique_ptr<AArch64Operand>
2002  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2003                    bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2004    auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2005    Op->ShiftExtend.Type = ShOp;
2006    Op->ShiftExtend.Amount = Val;
2007    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2008    Op->StartLoc = S;
2009    Op->EndLoc = E;
2010    return Op;
2011  }
2012};
2013
2014} // end anonymous namespace.
2015
2016void AArch64Operand::print(raw_ostream &OS) const {
2017  switch (Kind) {
2018  case k_FPImm:
2019    OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2020    if (!getFPImmIsExact())
2021      OS << " (inexact)";
2022    OS << ">";
2023    break;
2024  case k_Barrier: {
2025    StringRef Name = getBarrierName();
2026    if (!Name.empty())
2027      OS << "<barrier " << Name << ">";
2028    else
2029      OS << "<barrier invalid #" << getBarrier() << ">";
2030    break;
2031  }
2032  case k_Immediate:
2033    OS << *getImm();
2034    break;
2035  case k_ShiftedImm: {
2036    unsigned Shift = getShiftedImmShift();
2037    OS << "<shiftedimm ";
2038    OS << *getShiftedImmVal();
2039    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2040    break;
2041  }
2042  case k_CondCode:
2043    OS << "<condcode " << getCondCode() << ">";
2044    break;
2045  case k_VectorList: {
2046    OS << "<vectorlist ";
2047    unsigned Reg = getVectorListStart();
2048    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2049      OS << Reg + i << " ";
2050    OS << ">";
2051    break;
2052  }
2053  case k_VectorIndex:
2054    OS << "<vectorindex " << getVectorIndex() << ">";
2055    break;
2056  case k_SysReg:
2057    OS << "<sysreg: " << getSysReg() << '>';
2058    break;
2059  case k_Token:
2060    OS << "'" << getToken() << "'";
2061    break;
2062  case k_SysCR:
2063    OS << "c" << getSysCR();
2064    break;
2065  case k_Prefetch: {
2066    StringRef Name = getPrefetchName();
2067    if (!Name.empty())
2068      OS << "<prfop " << Name << ">";
2069    else
2070      OS << "<prfop invalid #" << getPrefetch() << ">";
2071    break;
2072  }
2073  case k_PSBHint:
2074    OS << getPSBHintName();
2075    break;
2076  case k_Register:
2077    OS << "<register " << getReg() << ">";
2078    if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2079      break;
2080    LLVM_FALLTHROUGH;
2081  case k_BTIHint:
2082    OS << getBTIHintName();
2083    break;
2084  case k_ShiftExtend:
2085    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2086       << getShiftExtendAmount();
2087    if (!hasShiftExtendAmount())
2088      OS << "<imp>";
2089    OS << '>';
2090    break;
2091  }
2092}
2093
2094/// @name Auto-generated Match Functions
2095/// {
2096
2097static unsigned MatchRegisterName(StringRef Name);
2098
2099/// }
2100
2101static unsigned MatchNeonVectorRegName(StringRef Name) {
2102  return StringSwitch<unsigned>(Name.lower())
2103      .Case("v0", AArch64::Q0)
2104      .Case("v1", AArch64::Q1)
2105      .Case("v2", AArch64::Q2)
2106      .Case("v3", AArch64::Q3)
2107      .Case("v4", AArch64::Q4)
2108      .Case("v5", AArch64::Q5)
2109      .Case("v6", AArch64::Q6)
2110      .Case("v7", AArch64::Q7)
2111      .Case("v8", AArch64::Q8)
2112      .Case("v9", AArch64::Q9)
2113      .Case("v10", AArch64::Q10)
2114      .Case("v11", AArch64::Q11)
2115      .Case("v12", AArch64::Q12)
2116      .Case("v13", AArch64::Q13)
2117      .Case("v14", AArch64::Q14)
2118      .Case("v15", AArch64::Q15)
2119      .Case("v16", AArch64::Q16)
2120      .Case("v17", AArch64::Q17)
2121      .Case("v18", AArch64::Q18)
2122      .Case("v19", AArch64::Q19)
2123      .Case("v20", AArch64::Q20)
2124      .Case("v21", AArch64::Q21)
2125      .Case("v22", AArch64::Q22)
2126      .Case("v23", AArch64::Q23)
2127      .Case("v24", AArch64::Q24)
2128      .Case("v25", AArch64::Q25)
2129      .Case("v26", AArch64::Q26)
2130      .Case("v27", AArch64::Q27)
2131      .Case("v28", AArch64::Q28)
2132      .Case("v29", AArch64::Q29)
2133      .Case("v30", AArch64::Q30)
2134      .Case("v31", AArch64::Q31)
2135      .Default(0);
2136}
2137
2138/// Returns an optional pair of (#elements, element-width) if Suffix
2139/// is a valid vector kind. Where the number of elements in a vector
2140/// or the vector width is implicit or explicitly unknown (but still a
2141/// valid suffix kind), 0 is used.
2142static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2143                                                     RegKind VectorKind) {
2144  std::pair<int, int> Res = {-1, -1};
2145
2146  switch (VectorKind) {
2147  case RegKind::NeonVector:
2148    Res =
2149        StringSwitch<std::pair<int, int>>(Suffix.lower())
2150            .Case("", {0, 0})
2151            .Case(".1d", {1, 64})
2152            .Case(".1q", {1, 128})
2153            // '.2h' needed for fp16 scalar pairwise reductions
2154            .Case(".2h", {2, 16})
2155            .Case(".2s", {2, 32})
2156            .Case(".2d", {2, 64})
2157            // '.4b' is another special case for the ARMv8.2a dot product
2158            // operand
2159            .Case(".4b", {4, 8})
2160            .Case(".4h", {4, 16})
2161            .Case(".4s", {4, 32})
2162            .Case(".8b", {8, 8})
2163            .Case(".8h", {8, 16})
2164            .Case(".16b", {16, 8})
2165            // Accept the width neutral ones, too, for verbose syntax. If those
2166            // aren't used in the right places, the token operand won't match so
2167            // all will work out.
2168            .Case(".b", {0, 8})
2169            .Case(".h", {0, 16})
2170            .Case(".s", {0, 32})
2171            .Case(".d", {0, 64})
2172            .Default({-1, -1});
2173    break;
2174  case RegKind::SVEPredicateVector:
2175  case RegKind::SVEDataVector:
2176    Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2177              .Case("", {0, 0})
2178              .Case(".b", {0, 8})
2179              .Case(".h", {0, 16})
2180              .Case(".s", {0, 32})
2181              .Case(".d", {0, 64})
2182              .Case(".q", {0, 128})
2183              .Default({-1, -1});
2184    break;
2185  default:
2186    llvm_unreachable("Unsupported RegKind");
2187  }
2188
2189  if (Res == std::make_pair(-1, -1))
2190    return Optional<std::pair<int, int>>();
2191
2192  return Optional<std::pair<int, int>>(Res);
2193}
2194
2195static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2196  return parseVectorKind(Suffix, VectorKind).hasValue();
2197}
2198
2199static unsigned matchSVEDataVectorRegName(StringRef Name) {
2200  return StringSwitch<unsigned>(Name.lower())
2201      .Case("z0", AArch64::Z0)
2202      .Case("z1", AArch64::Z1)
2203      .Case("z2", AArch64::Z2)
2204      .Case("z3", AArch64::Z3)
2205      .Case("z4", AArch64::Z4)
2206      .Case("z5", AArch64::Z5)
2207      .Case("z6", AArch64::Z6)
2208      .Case("z7", AArch64::Z7)
2209      .Case("z8", AArch64::Z8)
2210      .Case("z9", AArch64::Z9)
2211      .Case("z10", AArch64::Z10)
2212      .Case("z11", AArch64::Z11)
2213      .Case("z12", AArch64::Z12)
2214      .Case("z13", AArch64::Z13)
2215      .Case("z14", AArch64::Z14)
2216      .Case("z15", AArch64::Z15)
2217      .Case("z16", AArch64::Z16)
2218      .Case("z17", AArch64::Z17)
2219      .Case("z18", AArch64::Z18)
2220      .Case("z19", AArch64::Z19)
2221      .Case("z20", AArch64::Z20)
2222      .Case("z21", AArch64::Z21)
2223      .Case("z22", AArch64::Z22)
2224      .Case("z23", AArch64::Z23)
2225      .Case("z24", AArch64::Z24)
2226      .Case("z25", AArch64::Z25)
2227      .Case("z26", AArch64::Z26)
2228      .Case("z27", AArch64::Z27)
2229      .Case("z28", AArch64::Z28)
2230      .Case("z29", AArch64::Z29)
2231      .Case("z30", AArch64::Z30)
2232      .Case("z31", AArch64::Z31)
2233      .Default(0);
2234}
2235
2236static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2237  return StringSwitch<unsigned>(Name.lower())
2238      .Case("p0", AArch64::P0)
2239      .Case("p1", AArch64::P1)
2240      .Case("p2", AArch64::P2)
2241      .Case("p3", AArch64::P3)
2242      .Case("p4", AArch64::P4)
2243      .Case("p5", AArch64::P5)
2244      .Case("p6", AArch64::P6)
2245      .Case("p7", AArch64::P7)
2246      .Case("p8", AArch64::P8)
2247      .Case("p9", AArch64::P9)
2248      .Case("p10", AArch64::P10)
2249      .Case("p11", AArch64::P11)
2250      .Case("p12", AArch64::P12)
2251      .Case("p13", AArch64::P13)
2252      .Case("p14", AArch64::P14)
2253      .Case("p15", AArch64::P15)
2254      .Default(0);
2255}
2256
2257bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2258                                     SMLoc &EndLoc) {
2259  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2260}
2261
2262OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2263                                                        SMLoc &StartLoc,
2264                                                        SMLoc &EndLoc) {
2265  StartLoc = getLoc();
2266  auto Res = tryParseScalarRegister(RegNo);
2267  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2268  return Res;
2269}
2270
2271// Matches a register name or register alias previously defined by '.req'
2272unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2273                                                  RegKind Kind) {
2274  unsigned RegNum = 0;
2275  if ((RegNum = matchSVEDataVectorRegName(Name)))
2276    return Kind == RegKind::SVEDataVector ? RegNum : 0;
2277
2278  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2279    return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2280
2281  if ((RegNum = MatchNeonVectorRegName(Name)))
2282    return Kind == RegKind::NeonVector ? RegNum : 0;
2283
2284  // The parsed register must be of RegKind Scalar
2285  if ((RegNum = MatchRegisterName(Name)))
2286    return Kind == RegKind::Scalar ? RegNum : 0;
2287
2288  if (!RegNum) {
2289    // Handle a few common aliases of registers.
2290    if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2291                    .Case("fp", AArch64::FP)
2292                    .Case("lr",  AArch64::LR)
2293                    .Case("x31", AArch64::XZR)
2294                    .Case("w31", AArch64::WZR)
2295                    .Default(0))
2296      return Kind == RegKind::Scalar ? RegNum : 0;
2297
2298    // Check for aliases registered via .req. Canonicalize to lower case.
2299    // That's more consistent since register names are case insensitive, and
2300    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2301    auto Entry = RegisterReqs.find(Name.lower());
2302    if (Entry == RegisterReqs.end())
2303      return 0;
2304
2305    // set RegNum if the match is the right kind of register
2306    if (Kind == Entry->getValue().first)
2307      RegNum = Entry->getValue().second;
2308  }
2309  return RegNum;
2310}
2311
2312/// tryParseScalarRegister - Try to parse a register name. The token must be an
2313/// Identifier when called, and if it is a register name the token is eaten and
2314/// the register is added to the operand list.
2315OperandMatchResultTy
2316AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2317  MCAsmParser &Parser = getParser();
2318  const AsmToken &Tok = Parser.getTok();
2319  if (Tok.isNot(AsmToken::Identifier))
2320    return MatchOperand_NoMatch;
2321
2322  std::string lowerCase = Tok.getString().lower();
2323  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2324  if (Reg == 0)
2325    return MatchOperand_NoMatch;
2326
2327  RegNum = Reg;
2328  Parser.Lex(); // Eat identifier token.
2329  return MatchOperand_Success;
2330}
2331
2332/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2333OperandMatchResultTy
2334AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2335  MCAsmParser &Parser = getParser();
2336  SMLoc S = getLoc();
2337
2338  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2339    Error(S, "Expected cN operand where 0 <= N <= 15");
2340    return MatchOperand_ParseFail;
2341  }
2342
2343  StringRef Tok = Parser.getTok().getIdentifier();
2344  if (Tok[0] != 'c' && Tok[0] != 'C') {
2345    Error(S, "Expected cN operand where 0 <= N <= 15");
2346    return MatchOperand_ParseFail;
2347  }
2348
2349  uint32_t CRNum;
2350  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2351  if (BadNum || CRNum > 15) {
2352    Error(S, "Expected cN operand where 0 <= N <= 15");
2353    return MatchOperand_ParseFail;
2354  }
2355
2356  Parser.Lex(); // Eat identifier token.
2357  Operands.push_back(
2358      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2359  return MatchOperand_Success;
2360}
2361
2362/// tryParsePrefetch - Try to parse a prefetch operand.
2363template <bool IsSVEPrefetch>
2364OperandMatchResultTy
2365AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2366  MCAsmParser &Parser = getParser();
2367  SMLoc S = getLoc();
2368  const AsmToken &Tok = Parser.getTok();
2369
2370  auto LookupByName = [](StringRef N) {
2371    if (IsSVEPrefetch) {
2372      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2373        return Optional<unsigned>(Res->Encoding);
2374    } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2375      return Optional<unsigned>(Res->Encoding);
2376    return Optional<unsigned>();
2377  };
2378
2379  auto LookupByEncoding = [](unsigned E) {
2380    if (IsSVEPrefetch) {
2381      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2382        return Optional<StringRef>(Res->Name);
2383    } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2384      return Optional<StringRef>(Res->Name);
2385    return Optional<StringRef>();
2386  };
2387  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2388
2389  // Either an identifier for named values or a 5-bit immediate.
2390  // Eat optional hash.
2391  if (parseOptionalToken(AsmToken::Hash) ||
2392      Tok.is(AsmToken::Integer)) {
2393    const MCExpr *ImmVal;
2394    if (getParser().parseExpression(ImmVal))
2395      return MatchOperand_ParseFail;
2396
2397    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2398    if (!MCE) {
2399      TokError("immediate value expected for prefetch operand");
2400      return MatchOperand_ParseFail;
2401    }
2402    unsigned prfop = MCE->getValue();
2403    if (prfop > MaxVal) {
2404      TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2405               "] expected");
2406      return MatchOperand_ParseFail;
2407    }
2408
2409    auto PRFM = LookupByEncoding(MCE->getValue());
2410    Operands.push_back(AArch64Operand::CreatePrefetch(
2411        prfop, PRFM.getValueOr(""), S, getContext()));
2412    return MatchOperand_Success;
2413  }
2414
2415  if (Tok.isNot(AsmToken::Identifier)) {
2416    TokError("prefetch hint expected");
2417    return MatchOperand_ParseFail;
2418  }
2419
2420  auto PRFM = LookupByName(Tok.getString());
2421  if (!PRFM) {
2422    TokError("prefetch hint expected");
2423    return MatchOperand_ParseFail;
2424  }
2425
2426  Operands.push_back(AArch64Operand::CreatePrefetch(
2427      *PRFM, Tok.getString(), S, getContext()));
2428  Parser.Lex(); // Eat identifier token.
2429  return MatchOperand_Success;
2430}
2431
2432/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2433OperandMatchResultTy
2434AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2435  MCAsmParser &Parser = getParser();
2436  SMLoc S = getLoc();
2437  const AsmToken &Tok = Parser.getTok();
2438  if (Tok.isNot(AsmToken::Identifier)) {
2439    TokError("invalid operand for instruction");
2440    return MatchOperand_ParseFail;
2441  }
2442
2443  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2444  if (!PSB) {
2445    TokError("invalid operand for instruction");
2446    return MatchOperand_ParseFail;
2447  }
2448
2449  Operands.push_back(AArch64Operand::CreatePSBHint(
2450      PSB->Encoding, Tok.getString(), S, getContext()));
2451  Parser.Lex(); // Eat identifier token.
2452  return MatchOperand_Success;
2453}
2454
2455/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2456OperandMatchResultTy
2457AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2458  MCAsmParser &Parser = getParser();
2459  SMLoc S = getLoc();
2460  const AsmToken &Tok = Parser.getTok();
2461  if (Tok.isNot(AsmToken::Identifier)) {
2462    TokError("invalid operand for instruction");
2463    return MatchOperand_ParseFail;
2464  }
2465
2466  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2467  if (!BTI) {
2468    TokError("invalid operand for instruction");
2469    return MatchOperand_ParseFail;
2470  }
2471
2472  Operands.push_back(AArch64Operand::CreateBTIHint(
2473      BTI->Encoding, Tok.getString(), S, getContext()));
2474  Parser.Lex(); // Eat identifier token.
2475  return MatchOperand_Success;
2476}
2477
2478/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2479/// instruction.
2480OperandMatchResultTy
2481AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2482  MCAsmParser &Parser = getParser();
2483  SMLoc S = getLoc();
2484  const MCExpr *Expr = nullptr;
2485
2486  if (Parser.getTok().is(AsmToken::Hash)) {
2487    Parser.Lex(); // Eat hash token.
2488  }
2489
2490  if (parseSymbolicImmVal(Expr))
2491    return MatchOperand_ParseFail;
2492
2493  AArch64MCExpr::VariantKind ELFRefKind;
2494  MCSymbolRefExpr::VariantKind DarwinRefKind;
2495  int64_t Addend;
2496  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2497    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2498        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2499      // No modifier was specified at all; this is the syntax for an ELF basic
2500      // ADRP relocation (unfortunately).
2501      Expr =
2502          AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2503    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2504                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2505               Addend != 0) {
2506      Error(S, "gotpage label reference not allowed an addend");
2507      return MatchOperand_ParseFail;
2508    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2509               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2510               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2511               ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2512               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2513               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2514               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2515      // The operand must be an @page or @gotpage qualified symbolref.
2516      Error(S, "page or gotpage label reference expected");
2517      return MatchOperand_ParseFail;
2518    }
2519  }
2520
2521  // We have either a label reference possibly with addend or an immediate. The
2522  // addend is a raw value here. The linker will adjust it to only reference the
2523  // page.
2524  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2525  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2526
2527  return MatchOperand_Success;
2528}
2529
2530/// tryParseAdrLabel - Parse and validate a source label for the ADR
2531/// instruction.
2532OperandMatchResultTy
2533AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2534  SMLoc S = getLoc();
2535  const MCExpr *Expr = nullptr;
2536
2537  // Leave anything with a bracket to the default for SVE
2538  if (getParser().getTok().is(AsmToken::LBrac))
2539    return MatchOperand_NoMatch;
2540
2541  if (getParser().getTok().is(AsmToken::Hash))
2542    getParser().Lex(); // Eat hash token.
2543
2544  if (parseSymbolicImmVal(Expr))
2545    return MatchOperand_ParseFail;
2546
2547  AArch64MCExpr::VariantKind ELFRefKind;
2548  MCSymbolRefExpr::VariantKind DarwinRefKind;
2549  int64_t Addend;
2550  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2551    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2552        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2553      // No modifier was specified at all; this is the syntax for an ELF basic
2554      // ADR relocation (unfortunately).
2555      Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2556    } else {
2557      Error(S, "unexpected adr label");
2558      return MatchOperand_ParseFail;
2559    }
2560  }
2561
2562  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2563  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2564  return MatchOperand_Success;
2565}
2566
2567/// tryParseFPImm - A floating point immediate expression operand.
2568template<bool AddFPZeroAsLiteral>
2569OperandMatchResultTy
2570AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2571  MCAsmParser &Parser = getParser();
2572  SMLoc S = getLoc();
2573
2574  bool Hash = parseOptionalToken(AsmToken::Hash);
2575
2576  // Handle negation, as that still comes through as a separate token.
2577  bool isNegative = parseOptionalToken(AsmToken::Minus);
2578
2579  const AsmToken &Tok = Parser.getTok();
2580  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2581    if (!Hash)
2582      return MatchOperand_NoMatch;
2583    TokError("invalid floating point immediate");
2584    return MatchOperand_ParseFail;
2585  }
2586
2587  // Parse hexadecimal representation.
2588  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2589    if (Tok.getIntVal() > 255 || isNegative) {
2590      TokError("encoded floating point value out of range");
2591      return MatchOperand_ParseFail;
2592    }
2593
2594    APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2595    Operands.push_back(
2596        AArch64Operand::CreateFPImm(F, true, S, getContext()));
2597  } else {
2598    // Parse FP representation.
2599    APFloat RealVal(APFloat::IEEEdouble());
2600    auto StatusOrErr =
2601        RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2602    if (errorToBool(StatusOrErr.takeError())) {
2603      TokError("invalid floating point representation");
2604      return MatchOperand_ParseFail;
2605    }
2606
2607    if (isNegative)
2608      RealVal.changeSign();
2609
2610    if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2611      Operands.push_back(
2612          AArch64Operand::CreateToken("#0", false, S, getContext()));
2613      Operands.push_back(
2614          AArch64Operand::CreateToken(".0", false, S, getContext()));
2615    } else
2616      Operands.push_back(AArch64Operand::CreateFPImm(
2617          RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2618  }
2619
2620  Parser.Lex(); // Eat the token.
2621
2622  return MatchOperand_Success;
2623}
2624
2625/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2626/// a shift suffix, for example '#1, lsl #12'.
2627OperandMatchResultTy
2628AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2629  MCAsmParser &Parser = getParser();
2630  SMLoc S = getLoc();
2631
2632  if (Parser.getTok().is(AsmToken::Hash))
2633    Parser.Lex(); // Eat '#'
2634  else if (Parser.getTok().isNot(AsmToken::Integer))
2635    // Operand should start from # or should be integer, emit error otherwise.
2636    return MatchOperand_NoMatch;
2637
2638  const MCExpr *Imm = nullptr;
2639  if (parseSymbolicImmVal(Imm))
2640    return MatchOperand_ParseFail;
2641  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2642    SMLoc E = Parser.getTok().getLoc();
2643    Operands.push_back(
2644        AArch64Operand::CreateImm(Imm, S, E, getContext()));
2645    return MatchOperand_Success;
2646  }
2647
2648  // Eat ','
2649  Parser.Lex();
2650
2651  // The optional operand must be "lsl #N" where N is non-negative.
2652  if (!Parser.getTok().is(AsmToken::Identifier) ||
2653      !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2654    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2655    return MatchOperand_ParseFail;
2656  }
2657
2658  // Eat 'lsl'
2659  Parser.Lex();
2660
2661  parseOptionalToken(AsmToken::Hash);
2662
2663  if (Parser.getTok().isNot(AsmToken::Integer)) {
2664    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2665    return MatchOperand_ParseFail;
2666  }
2667
2668  int64_t ShiftAmount = Parser.getTok().getIntVal();
2669
2670  if (ShiftAmount < 0) {
2671    Error(Parser.getTok().getLoc(), "positive shift amount required");
2672    return MatchOperand_ParseFail;
2673  }
2674  Parser.Lex(); // Eat the number
2675
2676  // Just in case the optional lsl #0 is used for immediates other than zero.
2677  if (ShiftAmount == 0 && Imm != nullptr) {
2678    SMLoc E = Parser.getTok().getLoc();
2679    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2680    return MatchOperand_Success;
2681  }
2682
2683  SMLoc E = Parser.getTok().getLoc();
2684  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2685                                                      S, E, getContext()));
2686  return MatchOperand_Success;
2687}
2688
2689/// parseCondCodeString - Parse a Condition Code string.
2690AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2691  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2692                    .Case("eq", AArch64CC::EQ)
2693                    .Case("ne", AArch64CC::NE)
2694                    .Case("cs", AArch64CC::HS)
2695                    .Case("hs", AArch64CC::HS)
2696                    .Case("cc", AArch64CC::LO)
2697                    .Case("lo", AArch64CC::LO)
2698                    .Case("mi", AArch64CC::MI)
2699                    .Case("pl", AArch64CC::PL)
2700                    .Case("vs", AArch64CC::VS)
2701                    .Case("vc", AArch64CC::VC)
2702                    .Case("hi", AArch64CC::HI)
2703                    .Case("ls", AArch64CC::LS)
2704                    .Case("ge", AArch64CC::GE)
2705                    .Case("lt", AArch64CC::LT)
2706                    .Case("gt", AArch64CC::GT)
2707                    .Case("le", AArch64CC::LE)
2708                    .Case("al", AArch64CC::AL)
2709                    .Case("nv", AArch64CC::NV)
2710                    .Default(AArch64CC::Invalid);
2711
2712  if (CC == AArch64CC::Invalid &&
2713      getSTI().getFeatureBits()[AArch64::FeatureSVE])
2714    CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2715                    .Case("none",  AArch64CC::EQ)
2716                    .Case("any",   AArch64CC::NE)
2717                    .Case("nlast", AArch64CC::HS)
2718                    .Case("last",  AArch64CC::LO)
2719                    .Case("first", AArch64CC::MI)
2720                    .Case("nfrst", AArch64CC::PL)
2721                    .Case("pmore", AArch64CC::HI)
2722                    .Case("plast", AArch64CC::LS)
2723                    .Case("tcont", AArch64CC::GE)
2724                    .Case("tstop", AArch64CC::LT)
2725                    .Default(AArch64CC::Invalid);
2726
2727  return CC;
2728}
2729
2730/// parseCondCode - Parse a Condition Code operand.
2731bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2732                                     bool invertCondCode) {
2733  MCAsmParser &Parser = getParser();
2734  SMLoc S = getLoc();
2735  const AsmToken &Tok = Parser.getTok();
2736  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2737
2738  StringRef Cond = Tok.getString();
2739  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2740  if (CC == AArch64CC::Invalid)
2741    return TokError("invalid condition code");
2742  Parser.Lex(); // Eat identifier token.
2743
2744  if (invertCondCode) {
2745    if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2746      return TokError("condition codes AL and NV are invalid for this instruction");
2747    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2748  }
2749
2750  Operands.push_back(
2751      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2752  return false;
2753}
2754
2755/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2756/// them if present.
2757OperandMatchResultTy
2758AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2759  MCAsmParser &Parser = getParser();
2760  const AsmToken &Tok = Parser.getTok();
2761  std::string LowerID = Tok.getString().lower();
2762  AArch64_AM::ShiftExtendType ShOp =
2763      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2764          .Case("lsl", AArch64_AM::LSL)
2765          .Case("lsr", AArch64_AM::LSR)
2766          .Case("asr", AArch64_AM::ASR)
2767          .Case("ror", AArch64_AM::ROR)
2768          .Case("msl", AArch64_AM::MSL)
2769          .Case("uxtb", AArch64_AM::UXTB)
2770          .Case("uxth", AArch64_AM::UXTH)
2771          .Case("uxtw", AArch64_AM::UXTW)
2772          .Case("uxtx", AArch64_AM::UXTX)
2773          .Case("sxtb", AArch64_AM::SXTB)
2774          .Case("sxth", AArch64_AM::SXTH)
2775          .Case("sxtw", AArch64_AM::SXTW)
2776          .Case("sxtx", AArch64_AM::SXTX)
2777          .Default(AArch64_AM::InvalidShiftExtend);
2778
2779  if (ShOp == AArch64_AM::InvalidShiftExtend)
2780    return MatchOperand_NoMatch;
2781
2782  SMLoc S = Tok.getLoc();
2783  Parser.Lex();
2784
2785  bool Hash = parseOptionalToken(AsmToken::Hash);
2786
2787  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2788    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2789        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2790        ShOp == AArch64_AM::MSL) {
2791      // We expect a number here.
2792      TokError("expected #imm after shift specifier");
2793      return MatchOperand_ParseFail;
2794    }
2795
2796    // "extend" type operations don't need an immediate, #0 is implicit.
2797    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2798    Operands.push_back(
2799        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2800    return MatchOperand_Success;
2801  }
2802
2803  // Make sure we do actually have a number, identifier or a parenthesized
2804  // expression.
2805  SMLoc E = Parser.getTok().getLoc();
2806  if (!Parser.getTok().is(AsmToken::Integer) &&
2807      !Parser.getTok().is(AsmToken::LParen) &&
2808      !Parser.getTok().is(AsmToken::Identifier)) {
2809    Error(E, "expected integer shift amount");
2810    return MatchOperand_ParseFail;
2811  }
2812
2813  const MCExpr *ImmVal;
2814  if (getParser().parseExpression(ImmVal))
2815    return MatchOperand_ParseFail;
2816
2817  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2818  if (!MCE) {
2819    Error(E, "expected constant '#imm' after shift specifier");
2820    return MatchOperand_ParseFail;
2821  }
2822
2823  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2824  Operands.push_back(AArch64Operand::CreateShiftExtend(
2825      ShOp, MCE->getValue(), true, S, E, getContext()));
2826  return MatchOperand_Success;
2827}
2828
2829static const struct Extension {
2830  const char *Name;
2831  const FeatureBitset Features;
2832} ExtensionMap[] = {
2833    {"crc", {AArch64::FeatureCRC}},
2834    {"sm4", {AArch64::FeatureSM4}},
2835    {"sha3", {AArch64::FeatureSHA3}},
2836    {"sha2", {AArch64::FeatureSHA2}},
2837    {"aes", {AArch64::FeatureAES}},
2838    {"crypto", {AArch64::FeatureCrypto}},
2839    {"fp", {AArch64::FeatureFPARMv8}},
2840    {"simd", {AArch64::FeatureNEON}},
2841    {"ras", {AArch64::FeatureRAS}},
2842    {"lse", {AArch64::FeatureLSE}},
2843    {"predres", {AArch64::FeaturePredRes}},
2844    {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2845    {"mte", {AArch64::FeatureMTE}},
2846    {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2847    {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2848    {"ccpp", {AArch64::FeatureCCPP}},
2849    {"rcpc", {AArch64::FeatureRCPC}},
2850    {"sve", {AArch64::FeatureSVE}},
2851    {"sve2", {AArch64::FeatureSVE2}},
2852    {"sve2-aes", {AArch64::FeatureSVE2AES}},
2853    {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2854    {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2855    {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2856    // FIXME: Unsupported extensions
2857    {"pan", {}},
2858    {"lor", {}},
2859    {"rdma", {}},
2860    {"profile", {}},
2861};
2862
2863static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2864  if (FBS[AArch64::HasV8_1aOps])
2865    Str += "ARMv8.1a";
2866  else if (FBS[AArch64::HasV8_2aOps])
2867    Str += "ARMv8.2a";
2868  else if (FBS[AArch64::HasV8_3aOps])
2869    Str += "ARMv8.3a";
2870  else if (FBS[AArch64::HasV8_4aOps])
2871    Str += "ARMv8.4a";
2872  else if (FBS[AArch64::HasV8_5aOps])
2873    Str += "ARMv8.5a";
2874  else if (FBS[AArch64::HasV8_6aOps])
2875    Str += "ARMv8.6a";
2876  else {
2877    auto ext = std::find_if(std::begin(ExtensionMap),
2878      std::end(ExtensionMap),
2879      [&](const Extension& e)
2880      // Use & in case multiple features are enabled
2881      { return (FBS & e.Features) != FeatureBitset(); }
2882    );
2883
2884    Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2885  }
2886}
2887
2888void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2889                                      SMLoc S) {
2890  const uint16_t Op2 = Encoding & 7;
2891  const uint16_t Cm = (Encoding & 0x78) >> 3;
2892  const uint16_t Cn = (Encoding & 0x780) >> 7;
2893  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2894
2895  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2896
2897  Operands.push_back(
2898      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2899  Operands.push_back(
2900      AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2901  Operands.push_back(
2902      AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2903  Expr = MCConstantExpr::create(Op2, getContext());
2904  Operands.push_back(
2905      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2906}
2907
2908/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2909/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2910bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2911                                   OperandVector &Operands) {
2912  if (Name.find('.') != StringRef::npos)
2913    return TokError("invalid operand");
2914
2915  Mnemonic = Name;
2916  Operands.push_back(
2917      AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2918
2919  MCAsmParser &Parser = getParser();
2920  const AsmToken &Tok = Parser.getTok();
2921  StringRef Op = Tok.getString();
2922  SMLoc S = Tok.getLoc();
2923
2924  if (Mnemonic == "ic") {
2925    const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2926    if (!IC)
2927      return TokError("invalid operand for IC instruction");
2928    else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2929      std::string Str("IC " + std::string(IC->Name) + " requires ");
2930      setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2931      return TokError(Str.c_str());
2932    }
2933    createSysAlias(IC->Encoding, Operands, S);
2934  } else if (Mnemonic == "dc") {
2935    const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2936    if (!DC)
2937      return TokError("invalid operand for DC instruction");
2938    else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2939      std::string Str("DC " + std::string(DC->Name) + " requires ");
2940      setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2941      return TokError(Str.c_str());
2942    }
2943    createSysAlias(DC->Encoding, Operands, S);
2944  } else if (Mnemonic == "at") {
2945    const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2946    if (!AT)
2947      return TokError("invalid operand for AT instruction");
2948    else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2949      std::string Str("AT " + std::string(AT->Name) + " requires ");
2950      setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2951      return TokError(Str.c_str());
2952    }
2953    createSysAlias(AT->Encoding, Operands, S);
2954  } else if (Mnemonic == "tlbi") {
2955    const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2956    if (!TLBI)
2957      return TokError("invalid operand for TLBI instruction");
2958    else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2959      std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2960      setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2961      return TokError(Str.c_str());
2962    }
2963    createSysAlias(TLBI->Encoding, Operands, S);
2964  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2965    const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2966    if (!PRCTX)
2967      return TokError("invalid operand for prediction restriction instruction");
2968    else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2969      std::string Str(
2970          Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2971      setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2972      return TokError(Str.c_str());
2973    }
2974    uint16_t PRCTX_Op2 =
2975      Mnemonic == "cfp" ? 4 :
2976      Mnemonic == "dvp" ? 5 :
2977      Mnemonic == "cpp" ? 7 :
2978      0;
2979    assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2980    createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2981  }
2982
2983  Parser.Lex(); // Eat operand.
2984
2985  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2986  bool HasRegister = false;
2987
2988  // Check for the optional register operand.
2989  if (parseOptionalToken(AsmToken::Comma)) {
2990    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2991      return TokError("expected register operand");
2992    HasRegister = true;
2993  }
2994
2995  if (ExpectRegister && !HasRegister)
2996    return TokError("specified " + Mnemonic + " op requires a register");
2997  else if (!ExpectRegister && HasRegister)
2998    return TokError("specified " + Mnemonic + " op does not use a register");
2999
3000  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3001    return true;
3002
3003  return false;
3004}
3005
3006OperandMatchResultTy
3007AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3008  MCAsmParser &Parser = getParser();
3009  const AsmToken &Tok = Parser.getTok();
3010
3011  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3012    TokError("'csync' operand expected");
3013    return MatchOperand_ParseFail;
3014  // Can be either a #imm style literal or an option name
3015  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3016    // Immediate operand.
3017    const MCExpr *ImmVal;
3018    SMLoc ExprLoc = getLoc();
3019    if (getParser().parseExpression(ImmVal))
3020      return MatchOperand_ParseFail;
3021    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3022    if (!MCE) {
3023      Error(ExprLoc, "immediate value expected for barrier operand");
3024      return MatchOperand_ParseFail;
3025    }
3026    if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3027      Error(ExprLoc, "barrier operand out of range");
3028      return MatchOperand_ParseFail;
3029    }
3030    auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3031    Operands.push_back(AArch64Operand::CreateBarrier(
3032        MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3033    return MatchOperand_Success;
3034  }
3035
3036  if (Tok.isNot(AsmToken::Identifier)) {
3037    TokError("invalid operand for instruction");
3038    return MatchOperand_ParseFail;
3039  }
3040
3041  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3042  // The only valid named option for ISB is 'sy'
3043  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3044  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3045    TokError("'sy' or #imm operand expected");
3046    return MatchOperand_ParseFail;
3047  // The only valid named option for TSB is 'csync'
3048  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3049    TokError("'csync' operand expected");
3050    return MatchOperand_ParseFail;
3051  } else if (!DB && !TSB) {
3052    TokError("invalid barrier option name");
3053    return MatchOperand_ParseFail;
3054  }
3055
3056  Operands.push_back(AArch64Operand::CreateBarrier(
3057      DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3058  Parser.Lex(); // Consume the option
3059
3060  return MatchOperand_Success;
3061}
3062
3063OperandMatchResultTy
3064AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3065  MCAsmParser &Parser = getParser();
3066  const AsmToken &Tok = Parser.getTok();
3067
3068  if (Tok.isNot(AsmToken::Identifier))
3069    return MatchOperand_NoMatch;
3070
3071  int MRSReg, MSRReg;
3072  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3073  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3074    MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3075    MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3076  } else
3077    MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3078
3079  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3080  unsigned PStateImm = -1;
3081  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3082    PStateImm = PState->Encoding;
3083
3084  Operands.push_back(
3085      AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3086                                   PStateImm, getContext()));
3087  Parser.Lex(); // Eat identifier
3088
3089  return MatchOperand_Success;
3090}
3091
3092/// tryParseNeonVectorRegister - Parse a vector register operand.
3093bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3094  MCAsmParser &Parser = getParser();
3095  if (Parser.getTok().isNot(AsmToken::Identifier))
3096    return true;
3097
3098  SMLoc S = getLoc();
3099  // Check for a vector register specifier first.
3100  StringRef Kind;
3101  unsigned Reg;
3102  OperandMatchResultTy Res =
3103      tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3104  if (Res != MatchOperand_Success)
3105    return true;
3106
3107  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3108  if (!KindRes)
3109    return true;
3110
3111  unsigned ElementWidth = KindRes->second;
3112  Operands.push_back(
3113      AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3114                                      S, getLoc(), getContext()));
3115
3116  // If there was an explicit qualifier, that goes on as a literal text
3117  // operand.
3118  if (!Kind.empty())
3119    Operands.push_back(
3120        AArch64Operand::CreateToken(Kind, false, S, getContext()));
3121
3122  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3123}
3124
3125OperandMatchResultTy
3126AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3127  SMLoc SIdx = getLoc();
3128  if (parseOptionalToken(AsmToken::LBrac)) {
3129    const MCExpr *ImmVal;
3130    if (getParser().parseExpression(ImmVal))
3131      return MatchOperand_NoMatch;
3132    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3133    if (!MCE) {
3134      TokError("immediate value expected for vector index");
3135      return MatchOperand_ParseFail;;
3136    }
3137
3138    SMLoc E = getLoc();
3139
3140    if (parseToken(AsmToken::RBrac, "']' expected"))
3141      return MatchOperand_ParseFail;;
3142
3143    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3144                                                         E, getContext()));
3145    return MatchOperand_Success;
3146  }
3147
3148  return MatchOperand_NoMatch;
3149}
3150
3151// tryParseVectorRegister - Try to parse a vector register name with
3152// optional kind specifier. If it is a register specifier, eat the token
3153// and return it.
3154OperandMatchResultTy
3155AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3156                                         RegKind MatchKind) {
3157  MCAsmParser &Parser = getParser();
3158  const AsmToken &Tok = Parser.getTok();
3159
3160  if (Tok.isNot(AsmToken::Identifier))
3161    return MatchOperand_NoMatch;
3162
3163  StringRef Name = Tok.getString();
3164  // If there is a kind specifier, it's separated from the register name by
3165  // a '.'.
3166  size_t Start = 0, Next = Name.find('.');
3167  StringRef Head = Name.slice(Start, Next);
3168  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3169
3170  if (RegNum) {
3171    if (Next != StringRef::npos) {
3172      Kind = Name.slice(Next, StringRef::npos);
3173      if (!isValidVectorKind(Kind, MatchKind)) {
3174        TokError("invalid vector kind qualifier");
3175        return MatchOperand_ParseFail;
3176      }
3177    }
3178    Parser.Lex(); // Eat the register token.
3179
3180    Reg = RegNum;
3181    return MatchOperand_Success;
3182  }
3183
3184  return MatchOperand_NoMatch;
3185}
3186
3187/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3188OperandMatchResultTy
3189AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3190  // Check for a SVE predicate register specifier first.
3191  const SMLoc S = getLoc();
3192  StringRef Kind;
3193  unsigned RegNum;
3194  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3195  if (Res != MatchOperand_Success)
3196    return Res;
3197
3198  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3199  if (!KindRes)
3200    return MatchOperand_NoMatch;
3201
3202  unsigned ElementWidth = KindRes->second;
3203  Operands.push_back(AArch64Operand::CreateVectorReg(
3204      RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3205      getLoc(), getContext()));
3206
3207  // Not all predicates are followed by a '/m' or '/z'.
3208  MCAsmParser &Parser = getParser();
3209  if (Parser.getTok().isNot(AsmToken::Slash))
3210    return MatchOperand_Success;
3211
3212  // But when they do they shouldn't have an element type suffix.
3213  if (!Kind.empty()) {
3214    Error(S, "not expecting size suffix");
3215    return MatchOperand_ParseFail;
3216  }
3217
3218  // Add a literal slash as operand
3219  Operands.push_back(
3220      AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3221
3222  Parser.Lex(); // Eat the slash.
3223
3224  // Zeroing or merging?
3225  auto Pred = Parser.getTok().getString().lower();
3226  if (Pred != "z" && Pred != "m") {
3227    Error(getLoc(), "expecting 'm' or 'z' predication");
3228    return MatchOperand_ParseFail;
3229  }
3230
3231  // Add zero/merge token.
3232  const char *ZM = Pred == "z" ? "z" : "m";
3233  Operands.push_back(
3234    AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3235
3236  Parser.Lex(); // Eat zero/merge token.
3237  return MatchOperand_Success;
3238}
3239
3240/// parseRegister - Parse a register operand.
3241bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3242  // Try for a Neon vector register.
3243  if (!tryParseNeonVectorRegister(Operands))
3244    return false;
3245
3246  // Otherwise try for a scalar register.
3247  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3248    return false;
3249
3250  return true;
3251}
3252
3253bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3254  MCAsmParser &Parser = getParser();
3255  bool HasELFModifier = false;
3256  AArch64MCExpr::VariantKind RefKind;
3257
3258  if (parseOptionalToken(AsmToken::Colon)) {
3259    HasELFModifier = true;
3260
3261    if (Parser.getTok().isNot(AsmToken::Identifier))
3262      return TokError("expect relocation specifier in operand after ':'");
3263
3264    std::string LowerCase = Parser.getTok().getIdentifier().lower();
3265    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3266                  .Case("lo12", AArch64MCExpr::VK_LO12)
3267                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3268                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3269                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3270                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3271                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3272                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3273                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3274                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3275                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3276                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3277                  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3278                  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3279                  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3280                  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3281                  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3282                  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3283                  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3284                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3285                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3286                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3287                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3288                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3289                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3290                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3291                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3292                  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3293                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3294                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3295                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3296                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3297                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3298                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3299                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3300                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3301                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3302                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3303                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3304                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3305                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3306                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3307                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3308                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3309                  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3310                  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3311                  .Default(AArch64MCExpr::VK_INVALID);
3312
3313    if (RefKind == AArch64MCExpr::VK_INVALID)
3314      return TokError("expect relocation specifier in operand after ':'");
3315
3316    Parser.Lex(); // Eat identifier
3317
3318    if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3319      return true;
3320  }
3321
3322  if (getParser().parseExpression(ImmVal))
3323    return true;
3324
3325  if (HasELFModifier)
3326    ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3327
3328  return false;
3329}
3330
3331template <RegKind VectorKind>
3332OperandMatchResultTy
3333AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3334                                     bool ExpectMatch) {
3335  MCAsmParser &Parser = getParser();
3336  if (!Parser.getTok().is(AsmToken::LCurly))
3337    return MatchOperand_NoMatch;
3338
3339  // Wrapper around parse function
3340  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3341                                     bool NoMatchIsError) {
3342    auto RegTok = Parser.getTok();
3343    auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3344    if (ParseRes == MatchOperand_Success) {
3345      if (parseVectorKind(Kind, VectorKind))
3346        return ParseRes;
3347      llvm_unreachable("Expected a valid vector kind");
3348    }
3349
3350    if (RegTok.isNot(AsmToken::Identifier) ||
3351        ParseRes == MatchOperand_ParseFail ||
3352        (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3353      Error(Loc, "vector register expected");
3354      return MatchOperand_ParseFail;
3355    }
3356
3357    return MatchOperand_NoMatch;
3358  };
3359
3360  SMLoc S = getLoc();
3361  auto LCurly = Parser.getTok();
3362  Parser.Lex(); // Eat left bracket token.
3363
3364  StringRef Kind;
3365  unsigned FirstReg;
3366  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3367
3368  // Put back the original left bracket if there was no match, so that
3369  // different types of list-operands can be matched (e.g. SVE, Neon).
3370  if (ParseRes == MatchOperand_NoMatch)
3371    Parser.getLexer().UnLex(LCurly);
3372
3373  if (ParseRes != MatchOperand_Success)
3374    return ParseRes;
3375
3376  int64_t PrevReg = FirstReg;
3377  unsigned Count = 1;
3378
3379  if (parseOptionalToken(AsmToken::Minus)) {
3380    SMLoc Loc = getLoc();
3381    StringRef NextKind;
3382
3383    unsigned Reg;
3384    ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3385    if (ParseRes != MatchOperand_Success)
3386      return ParseRes;
3387
3388    // Any Kind suffices must match on all regs in the list.
3389    if (Kind != NextKind) {
3390      Error(Loc, "mismatched register size suffix");
3391      return MatchOperand_ParseFail;
3392    }
3393
3394    unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3395
3396    if (Space == 0 || Space > 3) {
3397      Error(Loc, "invalid number of vectors");
3398      return MatchOperand_ParseFail;
3399    }
3400
3401    Count += Space;
3402  }
3403  else {
3404    while (parseOptionalToken(AsmToken::Comma)) {
3405      SMLoc Loc = getLoc();
3406      StringRef NextKind;
3407      unsigned Reg;
3408      ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3409      if (ParseRes != MatchOperand_Success)
3410        return ParseRes;
3411
3412      // Any Kind suffices must match on all regs in the list.
3413      if (Kind != NextKind) {
3414        Error(Loc, "mismatched register size suffix");
3415        return MatchOperand_ParseFail;
3416      }
3417
3418      // Registers must be incremental (with wraparound at 31)
3419      if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3420          (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3421        Error(Loc, "registers must be sequential");
3422        return MatchOperand_ParseFail;
3423      }
3424
3425      PrevReg = Reg;
3426      ++Count;
3427    }
3428  }
3429
3430  if (parseToken(AsmToken::RCurly, "'}' expected"))
3431    return MatchOperand_ParseFail;
3432
3433  if (Count > 4) {
3434    Error(S, "invalid number of vectors");
3435    return MatchOperand_ParseFail;
3436  }
3437
3438  unsigned NumElements = 0;
3439  unsigned ElementWidth = 0;
3440  if (!Kind.empty()) {
3441    if (const auto &VK = parseVectorKind(Kind, VectorKind))
3442      std::tie(NumElements, ElementWidth) = *VK;
3443  }
3444
3445  Operands.push_back(AArch64Operand::CreateVectorList(
3446      FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3447      getContext()));
3448
3449  return MatchOperand_Success;
3450}
3451
3452/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3453bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3454  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3455  if (ParseRes != MatchOperand_Success)
3456    return true;
3457
3458  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3459}
3460
3461OperandMatchResultTy
3462AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3463  SMLoc StartLoc = getLoc();
3464
3465  unsigned RegNum;
3466  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3467  if (Res != MatchOperand_Success)
3468    return Res;
3469
3470  if (!parseOptionalToken(AsmToken::Comma)) {
3471    Operands.push_back(AArch64Operand::CreateReg(
3472        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3473    return MatchOperand_Success;
3474  }
3475
3476  parseOptionalToken(AsmToken::Hash);
3477
3478  if (getParser().getTok().isNot(AsmToken::Integer)) {
3479    Error(getLoc(), "index must be absent or #0");
3480    return MatchOperand_ParseFail;
3481  }
3482
3483  const MCExpr *ImmVal;
3484  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3485      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3486    Error(getLoc(), "index must be absent or #0");
3487    return MatchOperand_ParseFail;
3488  }
3489
3490  Operands.push_back(AArch64Operand::CreateReg(
3491      RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3492  return MatchOperand_Success;
3493}
3494
3495template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3496OperandMatchResultTy
3497AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3498  SMLoc StartLoc = getLoc();
3499
3500  unsigned RegNum;
3501  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3502  if (Res != MatchOperand_Success)
3503    return Res;
3504
3505  // No shift/extend is the default.
3506  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3507    Operands.push_back(AArch64Operand::CreateReg(
3508        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3509    return MatchOperand_Success;
3510  }
3511
3512  // Eat the comma
3513  getParser().Lex();
3514
3515  // Match the shift
3516  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3517  Res = tryParseOptionalShiftExtend(ExtOpnd);
3518  if (Res != MatchOperand_Success)
3519    return Res;
3520
3521  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3522  Operands.push_back(AArch64Operand::CreateReg(
3523      RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3524      Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3525      Ext->hasShiftExtendAmount()));
3526
3527  return MatchOperand_Success;
3528}
3529
3530bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3531  MCAsmParser &Parser = getParser();
3532
3533  // Some SVE instructions have a decoration after the immediate, i.e.
3534  // "mul vl". We parse them here and add tokens, which must be present in the
3535  // asm string in the tablegen instruction.
3536  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3537  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3538  if (!Parser.getTok().getString().equals_lower("mul") ||
3539      !(NextIsVL || NextIsHash))
3540    return true;
3541
3542  Operands.push_back(
3543    AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3544  Parser.Lex(); // Eat the "mul"
3545
3546  if (NextIsVL) {
3547    Operands.push_back(
3548        AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3549    Parser.Lex(); // Eat the "vl"
3550    return false;
3551  }
3552
3553  if (NextIsHash) {
3554    Parser.Lex(); // Eat the #
3555    SMLoc S = getLoc();
3556
3557    // Parse immediate operand.
3558    const MCExpr *ImmVal;
3559    if (!Parser.parseExpression(ImmVal))
3560      if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3561        Operands.push_back(AArch64Operand::CreateImm(
3562            MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3563            getContext()));
3564        return MatchOperand_Success;
3565      }
3566  }
3567
3568  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3569}
3570
3571/// parseOperand - Parse a arm instruction operand.  For now this parses the
3572/// operand regardless of the mnemonic.
3573bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3574                                  bool invertCondCode) {
3575  MCAsmParser &Parser = getParser();
3576
3577  OperandMatchResultTy ResTy =
3578      MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3579
3580  // Check if the current operand has a custom associated parser, if so, try to
3581  // custom parse the operand, or fallback to the general approach.
3582  if (ResTy == MatchOperand_Success)
3583    return false;
3584  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3585  // there was a match, but an error occurred, in which case, just return that
3586  // the operand parsing failed.
3587  if (ResTy == MatchOperand_ParseFail)
3588    return true;
3589
3590  // Nothing custom, so do general case parsing.
3591  SMLoc S, E;
3592  switch (getLexer().getKind()) {
3593  default: {
3594    SMLoc S = getLoc();
3595    const MCExpr *Expr;
3596    if (parseSymbolicImmVal(Expr))
3597      return Error(S, "invalid operand");
3598
3599    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3600    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3601    return false;
3602  }
3603  case AsmToken::LBrac: {
3604    SMLoc Loc = Parser.getTok().getLoc();
3605    Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3606                                                   getContext()));
3607    Parser.Lex(); // Eat '['
3608
3609    // There's no comma after a '[', so we can parse the next operand
3610    // immediately.
3611    return parseOperand(Operands, false, false);
3612  }
3613  case AsmToken::LCurly:
3614    return parseNeonVectorList(Operands);
3615  case AsmToken::Identifier: {
3616    // If we're expecting a Condition Code operand, then just parse that.
3617    if (isCondCode)
3618      return parseCondCode(Operands, invertCondCode);
3619
3620    // If it's a register name, parse it.
3621    if (!parseRegister(Operands))
3622      return false;
3623
3624    // See if this is a "mul vl" decoration or "mul #<int>" operand used
3625    // by SVE instructions.
3626    if (!parseOptionalMulOperand(Operands))
3627      return false;
3628
3629    // This could be an optional "shift" or "extend" operand.
3630    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3631    // We can only continue if no tokens were eaten.
3632    if (GotShift != MatchOperand_NoMatch)
3633      return GotShift;
3634
3635    // This was not a register so parse other operands that start with an
3636    // identifier (like labels) as expressions and create them as immediates.
3637    const MCExpr *IdVal;
3638    S = getLoc();
3639    if (getParser().parseExpression(IdVal))
3640      return true;
3641    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3642    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3643    return false;
3644  }
3645  case AsmToken::Integer:
3646  case AsmToken::Real:
3647  case AsmToken::Hash: {
3648    // #42 -> immediate.
3649    S = getLoc();
3650
3651    parseOptionalToken(AsmToken::Hash);
3652
3653    // Parse a negative sign
3654    bool isNegative = false;
3655    if (Parser.getTok().is(AsmToken::Minus)) {
3656      isNegative = true;
3657      // We need to consume this token only when we have a Real, otherwise
3658      // we let parseSymbolicImmVal take care of it
3659      if (Parser.getLexer().peekTok().is(AsmToken::Real))
3660        Parser.Lex();
3661    }
3662
3663    // The only Real that should come through here is a literal #0.0 for
3664    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3665    // so convert the value.
3666    const AsmToken &Tok = Parser.getTok();
3667    if (Tok.is(AsmToken::Real)) {
3668      APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3669      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3670      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3671          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3672          Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3673        return TokError("unexpected floating point literal");
3674      else if (IntVal != 0 || isNegative)
3675        return TokError("expected floating-point constant #0.0");
3676      Parser.Lex(); // Eat the token.
3677
3678      Operands.push_back(
3679          AArch64Operand::CreateToken("#0", false, S, getContext()));
3680      Operands.push_back(
3681          AArch64Operand::CreateToken(".0", false, S, getContext()));
3682      return false;
3683    }
3684
3685    const MCExpr *ImmVal;
3686    if (parseSymbolicImmVal(ImmVal))
3687      return true;
3688
3689    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3690    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3691    return false;
3692  }
3693  case AsmToken::Equal: {
3694    SMLoc Loc = getLoc();
3695    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3696      return TokError("unexpected token in operand");
3697    Parser.Lex(); // Eat '='
3698    const MCExpr *SubExprVal;
3699    if (getParser().parseExpression(SubExprVal))
3700      return true;
3701
3702    if (Operands.size() < 2 ||
3703        !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3704      return Error(Loc, "Only valid when first operand is register");
3705
3706    bool IsXReg =
3707        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3708            Operands[1]->getReg());
3709
3710    MCContext& Ctx = getContext();
3711    E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3712    // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3713    if (isa<MCConstantExpr>(SubExprVal)) {
3714      uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3715      uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3716      while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3717        ShiftAmt += 16;
3718        Imm >>= 16;
3719      }
3720      if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3721          Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3722          Operands.push_back(AArch64Operand::CreateImm(
3723                     MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3724        if (ShiftAmt)
3725          Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3726                     ShiftAmt, true, S, E, Ctx));
3727        return false;
3728      }
3729      APInt Simm = APInt(64, Imm << ShiftAmt);
3730      // check if the immediate is an unsigned or signed 32-bit int for W regs
3731      if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3732        return Error(Loc, "Immediate too large for register");
3733    }
3734    // If it is a label or an imm that cannot fit in a movz, put it into CP.
3735    const MCExpr *CPLoc =
3736        getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3737    Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3738    return false;
3739  }
3740  }
3741}
3742
3743bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3744                                 const MCParsedAsmOperand &Op2) const {
3745  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3746  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3747  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3748      AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3749    return MCTargetAsmParser::regsEqual(Op1, Op2);
3750
3751  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3752         "Testing equality of non-scalar registers not supported");
3753
3754  // Check if a registers match their sub/super register classes.
3755  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3756    return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3757  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3758    return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3759  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3760    return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3761  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3762    return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3763
3764  return false;
3765}
3766
3767/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3768/// operands.
3769bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3770                                        StringRef Name, SMLoc NameLoc,
3771                                        OperandVector &Operands) {
3772  MCAsmParser &Parser = getParser();
3773  Name = StringSwitch<StringRef>(Name.lower())
3774             .Case("beq", "b.eq")
3775             .Case("bne", "b.ne")
3776             .Case("bhs", "b.hs")
3777             .Case("bcs", "b.cs")
3778             .Case("blo", "b.lo")
3779             .Case("bcc", "b.cc")
3780             .Case("bmi", "b.mi")
3781             .Case("bpl", "b.pl")
3782             .Case("bvs", "b.vs")
3783             .Case("bvc", "b.vc")
3784             .Case("bhi", "b.hi")
3785             .Case("bls", "b.ls")
3786             .Case("bge", "b.ge")
3787             .Case("blt", "b.lt")
3788             .Case("bgt", "b.gt")
3789             .Case("ble", "b.le")
3790             .Case("bal", "b.al")
3791             .Case("bnv", "b.nv")
3792             .Default(Name);
3793
3794  // First check for the AArch64-specific .req directive.
3795  if (Parser.getTok().is(AsmToken::Identifier) &&
3796      Parser.getTok().getIdentifier().lower() == ".req") {
3797    parseDirectiveReq(Name, NameLoc);
3798    // We always return 'error' for this, as we're done with this
3799    // statement and don't need to match the 'instruction."
3800    return true;
3801  }
3802
3803  // Create the leading tokens for the mnemonic, split by '.' characters.
3804  size_t Start = 0, Next = Name.find('.');
3805  StringRef Head = Name.slice(Start, Next);
3806
3807  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3808  // the SYS instruction.
3809  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3810      Head == "cfp" || Head == "dvp" || Head == "cpp")
3811    return parseSysAlias(Head, NameLoc, Operands);
3812
3813  Operands.push_back(
3814      AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3815  Mnemonic = Head;
3816
3817  // Handle condition codes for a branch mnemonic
3818  if (Head == "b" && Next != StringRef::npos) {
3819    Start = Next;
3820    Next = Name.find('.', Start + 1);
3821    Head = Name.slice(Start + 1, Next);
3822
3823    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3824                                            (Head.data() - Name.data()));
3825    AArch64CC::CondCode CC = parseCondCodeString(Head);
3826    if (CC == AArch64CC::Invalid)
3827      return Error(SuffixLoc, "invalid condition code");
3828    Operands.push_back(
3829        AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3830    Operands.push_back(
3831        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3832  }
3833
3834  // Add the remaining tokens in the mnemonic.
3835  while (Next != StringRef::npos) {
3836    Start = Next;
3837    Next = Name.find('.', Start + 1);
3838    Head = Name.slice(Start, Next);
3839    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3840                                            (Head.data() - Name.data()) + 1);
3841    Operands.push_back(
3842        AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3843  }
3844
3845  // Conditional compare instructions have a Condition Code operand, which needs
3846  // to be parsed and an immediate operand created.
3847  bool condCodeFourthOperand =
3848      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3849       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3850       Head == "csinc" || Head == "csinv" || Head == "csneg");
3851
3852  // These instructions are aliases to some of the conditional select
3853  // instructions. However, the condition code is inverted in the aliased
3854  // instruction.
3855  //
3856  // FIXME: Is this the correct way to handle these? Or should the parser
3857  //        generate the aliased instructions directly?
3858  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3859  bool condCodeThirdOperand =
3860      (Head == "cinc" || Head == "cinv" || Head == "cneg");
3861
3862  // Read the remaining operands.
3863  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3864
3865    unsigned N = 1;
3866    do {
3867      // Parse and remember the operand.
3868      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3869                                     (N == 3 && condCodeThirdOperand) ||
3870                                     (N == 2 && condCodeSecondOperand),
3871                       condCodeSecondOperand || condCodeThirdOperand)) {
3872        return true;
3873      }
3874
3875      // After successfully parsing some operands there are two special cases to
3876      // consider (i.e. notional operands not separated by commas). Both are due
3877      // to memory specifiers:
3878      //  + An RBrac will end an address for load/store/prefetch
3879      //  + An '!' will indicate a pre-indexed operation.
3880      //
3881      // It's someone else's responsibility to make sure these tokens are sane
3882      // in the given context!
3883
3884      SMLoc RLoc = Parser.getTok().getLoc();
3885      if (parseOptionalToken(AsmToken::RBrac))
3886        Operands.push_back(
3887            AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3888      SMLoc ELoc = Parser.getTok().getLoc();
3889      if (parseOptionalToken(AsmToken::Exclaim))
3890        Operands.push_back(
3891            AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3892
3893      ++N;
3894    } while (parseOptionalToken(AsmToken::Comma));
3895  }
3896
3897  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3898    return true;
3899
3900  return false;
3901}
3902
3903static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3904  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3905  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3906         (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3907         (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3908         (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3909         (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3910         (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3911}
3912
3913// FIXME: This entire function is a giant hack to provide us with decent
3914// operand range validation/diagnostics until TableGen/MC can be extended
3915// to support autogeneration of this kind of validation.
3916bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3917                                           SmallVectorImpl<SMLoc> &Loc) {
3918  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3919  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3920
3921  // A prefix only applies to the instruction following it.  Here we extract
3922  // prefix information for the next instruction before validating the current
3923  // one so that in the case of failure we don't erronously continue using the
3924  // current prefix.
3925  PrefixInfo Prefix = NextPrefix;
3926  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3927
3928  // Before validating the instruction in isolation we run through the rules
3929  // applicable when it follows a prefix instruction.
3930  // NOTE: brk & hlt can be prefixed but require no additional validation.
3931  if (Prefix.isActive() &&
3932      (Inst.getOpcode() != AArch64::BRK) &&
3933      (Inst.getOpcode() != AArch64::HLT)) {
3934
3935    // Prefixed intructions must have a destructive operand.
3936    if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3937        AArch64::NotDestructive)
3938      return Error(IDLoc, "instruction is unpredictable when following a"
3939                   " movprfx, suggest replacing movprfx with mov");
3940
3941    // Destination operands must match.
3942    if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3943      return Error(Loc[0], "instruction is unpredictable when following a"
3944                   " movprfx writing to a different destination");
3945
3946    // Destination operand must not be used in any other location.
3947    for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3948      if (Inst.getOperand(i).isReg() &&
3949          (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3950          isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3951        return Error(Loc[0], "instruction is unpredictable when following a"
3952                     " movprfx and destination also used as non-destructive"
3953                     " source");
3954    }
3955
3956    auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3957    if (Prefix.isPredicated()) {
3958      int PgIdx = -1;
3959
3960      // Find the instructions general predicate.
3961      for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3962        if (Inst.getOperand(i).isReg() &&
3963            PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3964          PgIdx = i;
3965          break;
3966        }
3967
3968      // Instruction must be predicated if the movprfx is predicated.
3969      if (PgIdx == -1 ||
3970          (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3971        return Error(IDLoc, "instruction is unpredictable when following a"
3972                     " predicated movprfx, suggest using unpredicated movprfx");
3973
3974      // Instruction must use same general predicate as the movprfx.
3975      if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3976        return Error(IDLoc, "instruction is unpredictable when following a"
3977                     " predicated movprfx using a different general predicate");
3978
3979      // Instruction element type must match the movprfx.
3980      if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3981        return Error(IDLoc, "instruction is unpredictable when following a"
3982                     " predicated movprfx with a different element size");
3983    }
3984  }
3985
3986  // Check for indexed addressing modes w/ the base register being the
3987  // same as a destination/source register or pair load where
3988  // the Rt == Rt2. All of those are undefined behaviour.
3989  switch (Inst.getOpcode()) {
3990  case AArch64::LDPSWpre:
3991  case AArch64::LDPWpost:
3992  case AArch64::LDPWpre:
3993  case AArch64::LDPXpost:
3994  case AArch64::LDPXpre: {
3995    unsigned Rt = Inst.getOperand(1).getReg();
3996    unsigned Rt2 = Inst.getOperand(2).getReg();
3997    unsigned Rn = Inst.getOperand(3).getReg();
3998    if (RI->isSubRegisterEq(Rn, Rt))
3999      return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4000                           "is also a destination");
4001    if (RI->isSubRegisterEq(Rn, Rt2))
4002      return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4003                           "is also a destination");
4004    LLVM_FALLTHROUGH;
4005  }
4006  case AArch64::LDPDi:
4007  case AArch64::LDPQi:
4008  case AArch64::LDPSi:
4009  case AArch64::LDPSWi:
4010  case AArch64::LDPWi:
4011  case AArch64::LDPXi: {
4012    unsigned Rt = Inst.getOperand(0).getReg();
4013    unsigned Rt2 = Inst.getOperand(1).getReg();
4014    if (Rt == Rt2)
4015      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4016    break;
4017  }
4018  case AArch64::LDPDpost:
4019  case AArch64::LDPDpre:
4020  case AArch64::LDPQpost:
4021  case AArch64::LDPQpre:
4022  case AArch64::LDPSpost:
4023  case AArch64::LDPSpre:
4024  case AArch64::LDPSWpost: {
4025    unsigned Rt = Inst.getOperand(1).getReg();
4026    unsigned Rt2 = Inst.getOperand(2).getReg();
4027    if (Rt == Rt2)
4028      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4029    break;
4030  }
4031  case AArch64::STPDpost:
4032  case AArch64::STPDpre:
4033  case AArch64::STPQpost:
4034  case AArch64::STPQpre:
4035  case AArch64::STPSpost:
4036  case AArch64::STPSpre:
4037  case AArch64::STPWpost:
4038  case AArch64::STPWpre:
4039  case AArch64::STPXpost:
4040  case AArch64::STPXpre: {
4041    unsigned Rt = Inst.getOperand(1).getReg();
4042    unsigned Rt2 = Inst.getOperand(2).getReg();
4043    unsigned Rn = Inst.getOperand(3).getReg();
4044    if (RI->isSubRegisterEq(Rn, Rt))
4045      return Error(Loc[0], "unpredictable STP instruction, writeback base "
4046                           "is also a source");
4047    if (RI->isSubRegisterEq(Rn, Rt2))
4048      return Error(Loc[1], "unpredictable STP instruction, writeback base "
4049                           "is also a source");
4050    break;
4051  }
4052  case AArch64::LDRBBpre:
4053  case AArch64::LDRBpre:
4054  case AArch64::LDRHHpre:
4055  case AArch64::LDRHpre:
4056  case AArch64::LDRSBWpre:
4057  case AArch64::LDRSBXpre:
4058  case AArch64::LDRSHWpre:
4059  case AArch64::LDRSHXpre:
4060  case AArch64::LDRSWpre:
4061  case AArch64::LDRWpre:
4062  case AArch64::LDRXpre:
4063  case AArch64::LDRBBpost:
4064  case AArch64::LDRBpost:
4065  case AArch64::LDRHHpost:
4066  case AArch64::LDRHpost:
4067  case AArch64::LDRSBWpost:
4068  case AArch64::LDRSBXpost:
4069  case AArch64::LDRSHWpost:
4070  case AArch64::LDRSHXpost:
4071  case AArch64::LDRSWpost:
4072  case AArch64::LDRWpost:
4073  case AArch64::LDRXpost: {
4074    unsigned Rt = Inst.getOperand(1).getReg();
4075    unsigned Rn = Inst.getOperand(2).getReg();
4076    if (RI->isSubRegisterEq(Rn, Rt))
4077      return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4078                           "is also a source");
4079    break;
4080  }
4081  case AArch64::STRBBpost:
4082  case AArch64::STRBpost:
4083  case AArch64::STRHHpost:
4084  case AArch64::STRHpost:
4085  case AArch64::STRWpost:
4086  case AArch64::STRXpost:
4087  case AArch64::STRBBpre:
4088  case AArch64::STRBpre:
4089  case AArch64::STRHHpre:
4090  case AArch64::STRHpre:
4091  case AArch64::STRWpre:
4092  case AArch64::STRXpre: {
4093    unsigned Rt = Inst.getOperand(1).getReg();
4094    unsigned Rn = Inst.getOperand(2).getReg();
4095    if (RI->isSubRegisterEq(Rn, Rt))
4096      return Error(Loc[0], "unpredictable STR instruction, writeback base "
4097                           "is also a source");
4098    break;
4099  }
4100  case AArch64::STXRB:
4101  case AArch64::STXRH:
4102  case AArch64::STXRW:
4103  case AArch64::STXRX:
4104  case AArch64::STLXRB:
4105  case AArch64::STLXRH:
4106  case AArch64::STLXRW:
4107  case AArch64::STLXRX: {
4108    unsigned Rs = Inst.getOperand(0).getReg();
4109    unsigned Rt = Inst.getOperand(1).getReg();
4110    unsigned Rn = Inst.getOperand(2).getReg();
4111    if (RI->isSubRegisterEq(Rt, Rs) ||
4112        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4113      return Error(Loc[0],
4114                   "unpredictable STXR instruction, status is also a source");
4115    break;
4116  }
4117  case AArch64::STXPW:
4118  case AArch64::STXPX:
4119  case AArch64::STLXPW:
4120  case AArch64::STLXPX: {
4121    unsigned Rs = Inst.getOperand(0).getReg();
4122    unsigned Rt1 = Inst.getOperand(1).getReg();
4123    unsigned Rt2 = Inst.getOperand(2).getReg();
4124    unsigned Rn = Inst.getOperand(3).getReg();
4125    if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4126        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4127      return Error(Loc[0],
4128                   "unpredictable STXP instruction, status is also a source");
4129    break;
4130  }
4131  case AArch64::LDRABwriteback:
4132  case AArch64::LDRAAwriteback: {
4133    unsigned Xt = Inst.getOperand(0).getReg();
4134    unsigned Xn = Inst.getOperand(1).getReg();
4135    if (Xt == Xn)
4136      return Error(Loc[0],
4137          "unpredictable LDRA instruction, writeback base"
4138          " is also a destination");
4139    break;
4140  }
4141  }
4142
4143
4144  // Now check immediate ranges. Separate from the above as there is overlap
4145  // in the instructions being checked and this keeps the nested conditionals
4146  // to a minimum.
4147  switch (Inst.getOpcode()) {
4148  case AArch64::ADDSWri:
4149  case AArch64::ADDSXri:
4150  case AArch64::ADDWri:
4151  case AArch64::ADDXri:
4152  case AArch64::SUBSWri:
4153  case AArch64::SUBSXri:
4154  case AArch64::SUBWri:
4155  case AArch64::SUBXri: {
4156    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4157    // some slight duplication here.
4158    if (Inst.getOperand(2).isExpr()) {
4159      const MCExpr *Expr = Inst.getOperand(2).getExpr();
4160      AArch64MCExpr::VariantKind ELFRefKind;
4161      MCSymbolRefExpr::VariantKind DarwinRefKind;
4162      int64_t Addend;
4163      if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4164
4165        // Only allow these with ADDXri.
4166        if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4167             DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4168            Inst.getOpcode() == AArch64::ADDXri)
4169          return false;
4170
4171        // Only allow these with ADDXri/ADDWri
4172        if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4173             ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4174             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4175             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4176             ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4177             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4178             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4179             ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4180             ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4181             ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4182            (Inst.getOpcode() == AArch64::ADDXri ||
4183             Inst.getOpcode() == AArch64::ADDWri))
4184          return false;
4185
4186        // Don't allow symbol refs in the immediate field otherwise
4187        // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4188        // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4189        // 'cmp w0, 'borked')
4190        return Error(Loc.back(), "invalid immediate expression");
4191      }
4192      // We don't validate more complex expressions here
4193    }
4194    return false;
4195  }
4196  default:
4197    return false;
4198  }
4199}
4200
4201static std::string AArch64MnemonicSpellCheck(StringRef S,
4202                                             const FeatureBitset &FBS,
4203                                             unsigned VariantID = 0);
4204
4205bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4206                                      uint64_t ErrorInfo,
4207                                      OperandVector &Operands) {
4208  switch (ErrCode) {
4209  case Match_InvalidTiedOperand: {
4210    RegConstraintEqualityTy EqTy =
4211        static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4212            .getRegEqualityTy();
4213    switch (EqTy) {
4214    case RegConstraintEqualityTy::EqualsSubReg:
4215      return Error(Loc, "operand must be 64-bit form of destination register");
4216    case RegConstraintEqualityTy::EqualsSuperReg:
4217      return Error(Loc, "operand must be 32-bit form of destination register");
4218    case RegConstraintEqualityTy::EqualsReg:
4219      return Error(Loc, "operand must match destination register");
4220    }
4221    llvm_unreachable("Unknown RegConstraintEqualityTy");
4222  }
4223  case Match_MissingFeature:
4224    return Error(Loc,
4225                 "instruction requires a CPU feature not currently enabled");
4226  case Match_InvalidOperand:
4227    return Error(Loc, "invalid operand for instruction");
4228  case Match_InvalidSuffix:
4229    return Error(Loc, "invalid type suffix for instruction");
4230  case Match_InvalidCondCode:
4231    return Error(Loc, "expected AArch64 condition code");
4232  case Match_AddSubRegExtendSmall:
4233    return Error(Loc,
4234      "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4235  case Match_AddSubRegExtendLarge:
4236    return Error(Loc,
4237      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4238  case Match_AddSubSecondSource:
4239    return Error(Loc,
4240      "expected compatible register, symbol or integer in range [0, 4095]");
4241  case Match_LogicalSecondSource:
4242    return Error(Loc, "expected compatible register or logical immediate");
4243  case Match_InvalidMovImm32Shift:
4244    return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4245  case Match_InvalidMovImm64Shift:
4246    return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4247  case Match_AddSubRegShift32:
4248    return Error(Loc,
4249       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4250  case Match_AddSubRegShift64:
4251    return Error(Loc,
4252       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4253  case Match_InvalidFPImm:
4254    return Error(Loc,
4255                 "expected compatible register or floating-point constant");
4256  case Match_InvalidMemoryIndexedSImm6:
4257    return Error(Loc, "index must be an integer in range [-32, 31].");
4258  case Match_InvalidMemoryIndexedSImm5:
4259    return Error(Loc, "index must be an integer in range [-16, 15].");
4260  case Match_InvalidMemoryIndexed1SImm4:
4261    return Error(Loc, "index must be an integer in range [-8, 7].");
4262  case Match_InvalidMemoryIndexed2SImm4:
4263    return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4264  case Match_InvalidMemoryIndexed3SImm4:
4265    return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4266  case Match_InvalidMemoryIndexed4SImm4:
4267    return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4268  case Match_InvalidMemoryIndexed16SImm4:
4269    return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4270  case Match_InvalidMemoryIndexed32SImm4:
4271    return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4272  case Match_InvalidMemoryIndexed1SImm6:
4273    return Error(Loc, "index must be an integer in range [-32, 31].");
4274  case Match_InvalidMemoryIndexedSImm8:
4275    return Error(Loc, "index must be an integer in range [-128, 127].");
4276  case Match_InvalidMemoryIndexedSImm9:
4277    return Error(Loc, "index must be an integer in range [-256, 255].");
4278  case Match_InvalidMemoryIndexed16SImm9:
4279    return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4280  case Match_InvalidMemoryIndexed8SImm10:
4281    return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4282  case Match_InvalidMemoryIndexed4SImm7:
4283    return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4284  case Match_InvalidMemoryIndexed8SImm7:
4285    return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4286  case Match_InvalidMemoryIndexed16SImm7:
4287    return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4288  case Match_InvalidMemoryIndexed8UImm5:
4289    return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4290  case Match_InvalidMemoryIndexed4UImm5:
4291    return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4292  case Match_InvalidMemoryIndexed2UImm5:
4293    return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4294  case Match_InvalidMemoryIndexed8UImm6:
4295    return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4296  case Match_InvalidMemoryIndexed16UImm6:
4297    return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4298  case Match_InvalidMemoryIndexed4UImm6:
4299    return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4300  case Match_InvalidMemoryIndexed2UImm6:
4301    return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4302  case Match_InvalidMemoryIndexed1UImm6:
4303    return Error(Loc, "index must be in range [0, 63].");
4304  case Match_InvalidMemoryWExtend8:
4305    return Error(Loc,
4306                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4307  case Match_InvalidMemoryWExtend16:
4308    return Error(Loc,
4309                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4310  case Match_InvalidMemoryWExtend32:
4311    return Error(Loc,
4312                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4313  case Match_InvalidMemoryWExtend64:
4314    return Error(Loc,
4315                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4316  case Match_InvalidMemoryWExtend128:
4317    return Error(Loc,
4318                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4319  case Match_InvalidMemoryXExtend8:
4320    return Error(Loc,
4321                 "expected 'lsl' or 'sxtx' with optional shift of #0");
4322  case Match_InvalidMemoryXExtend16:
4323    return Error(Loc,
4324                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4325  case Match_InvalidMemoryXExtend32:
4326    return Error(Loc,
4327                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4328  case Match_InvalidMemoryXExtend64:
4329    return Error(Loc,
4330                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4331  case Match_InvalidMemoryXExtend128:
4332    return Error(Loc,
4333                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4334  case Match_InvalidMemoryIndexed1:
4335    return Error(Loc, "index must be an integer in range [0, 4095].");
4336  case Match_InvalidMemoryIndexed2:
4337    return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4338  case Match_InvalidMemoryIndexed4:
4339    return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4340  case Match_InvalidMemoryIndexed8:
4341    return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4342  case Match_InvalidMemoryIndexed16:
4343    return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4344  case Match_InvalidImm0_1:
4345    return Error(Loc, "immediate must be an integer in range [0, 1].");
4346  case Match_InvalidImm0_7:
4347    return Error(Loc, "immediate must be an integer in range [0, 7].");
4348  case Match_InvalidImm0_15:
4349    return Error(Loc, "immediate must be an integer in range [0, 15].");
4350  case Match_InvalidImm0_31:
4351    return Error(Loc, "immediate must be an integer in range [0, 31].");
4352  case Match_InvalidImm0_63:
4353    return Error(Loc, "immediate must be an integer in range [0, 63].");
4354  case Match_InvalidImm0_127:
4355    return Error(Loc, "immediate must be an integer in range [0, 127].");
4356  case Match_InvalidImm0_255:
4357    return Error(Loc, "immediate must be an integer in range [0, 255].");
4358  case Match_InvalidImm0_65535:
4359    return Error(Loc, "immediate must be an integer in range [0, 65535].");
4360  case Match_InvalidImm1_8:
4361    return Error(Loc, "immediate must be an integer in range [1, 8].");
4362  case Match_InvalidImm1_16:
4363    return Error(Loc, "immediate must be an integer in range [1, 16].");
4364  case Match_InvalidImm1_32:
4365    return Error(Loc, "immediate must be an integer in range [1, 32].");
4366  case Match_InvalidImm1_64:
4367    return Error(Loc, "immediate must be an integer in range [1, 64].");
4368  case Match_InvalidSVEAddSubImm8:
4369    return Error(Loc, "immediate must be an integer in range [0, 255]"
4370                      " with a shift amount of 0");
4371  case Match_InvalidSVEAddSubImm16:
4372  case Match_InvalidSVEAddSubImm32:
4373  case Match_InvalidSVEAddSubImm64:
4374    return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4375                      "multiple of 256 in range [256, 65280]");
4376  case Match_InvalidSVECpyImm8:
4377    return Error(Loc, "immediate must be an integer in range [-128, 255]"
4378                      " with a shift amount of 0");
4379  case Match_InvalidSVECpyImm16:
4380    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4381                      "multiple of 256 in range [-32768, 65280]");
4382  case Match_InvalidSVECpyImm32:
4383  case Match_InvalidSVECpyImm64:
4384    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4385                      "multiple of 256 in range [-32768, 32512]");
4386  case Match_InvalidIndexRange1_1:
4387    return Error(Loc, "expected lane specifier '[1]'");
4388  case Match_InvalidIndexRange0_15:
4389    return Error(Loc, "vector lane must be an integer in range [0, 15].");
4390  case Match_InvalidIndexRange0_7:
4391    return Error(Loc, "vector lane must be an integer in range [0, 7].");
4392  case Match_InvalidIndexRange0_3:
4393    return Error(Loc, "vector lane must be an integer in range [0, 3].");
4394  case Match_InvalidIndexRange0_1:
4395    return Error(Loc, "vector lane must be an integer in range [0, 1].");
4396  case Match_InvalidSVEIndexRange0_63:
4397    return Error(Loc, "vector lane must be an integer in range [0, 63].");
4398  case Match_InvalidSVEIndexRange0_31:
4399    return Error(Loc, "vector lane must be an integer in range [0, 31].");
4400  case Match_InvalidSVEIndexRange0_15:
4401    return Error(Loc, "vector lane must be an integer in range [0, 15].");
4402  case Match_InvalidSVEIndexRange0_7:
4403    return Error(Loc, "vector lane must be an integer in range [0, 7].");
4404  case Match_InvalidSVEIndexRange0_3:
4405    return Error(Loc, "vector lane must be an integer in range [0, 3].");
4406  case Match_InvalidLabel:
4407    return Error(Loc, "expected label or encodable integer pc offset");
4408  case Match_MRS:
4409    return Error(Loc, "expected readable system register");
4410  case Match_MSR:
4411    return Error(Loc, "expected writable system register or pstate");
4412  case Match_InvalidComplexRotationEven:
4413    return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4414  case Match_InvalidComplexRotationOdd:
4415    return Error(Loc, "complex rotation must be 90 or 270.");
4416  case Match_MnemonicFail: {
4417    std::string Suggestion = AArch64MnemonicSpellCheck(
4418        ((AArch64Operand &)*Operands[0]).getToken(),
4419        ComputeAvailableFeatures(STI->getFeatureBits()));
4420    return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4421  }
4422  case Match_InvalidGPR64shifted8:
4423    return Error(Loc, "register must be x0..x30 or xzr, without shift");
4424  case Match_InvalidGPR64shifted16:
4425    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4426  case Match_InvalidGPR64shifted32:
4427    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4428  case Match_InvalidGPR64shifted64:
4429    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4430  case Match_InvalidGPR64NoXZRshifted8:
4431    return Error(Loc, "register must be x0..x30 without shift");
4432  case Match_InvalidGPR64NoXZRshifted16:
4433    return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4434  case Match_InvalidGPR64NoXZRshifted32:
4435    return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4436  case Match_InvalidGPR64NoXZRshifted64:
4437    return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4438  case Match_InvalidZPR32UXTW8:
4439  case Match_InvalidZPR32SXTW8:
4440    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4441  case Match_InvalidZPR32UXTW16:
4442  case Match_InvalidZPR32SXTW16:
4443    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4444  case Match_InvalidZPR32UXTW32:
4445  case Match_InvalidZPR32SXTW32:
4446    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4447  case Match_InvalidZPR32UXTW64:
4448  case Match_InvalidZPR32SXTW64:
4449    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4450  case Match_InvalidZPR64UXTW8:
4451  case Match_InvalidZPR64SXTW8:
4452    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4453  case Match_InvalidZPR64UXTW16:
4454  case Match_InvalidZPR64SXTW16:
4455    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4456  case Match_InvalidZPR64UXTW32:
4457  case Match_InvalidZPR64SXTW32:
4458    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4459  case Match_InvalidZPR64UXTW64:
4460  case Match_InvalidZPR64SXTW64:
4461    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4462  case Match_InvalidZPR32LSL8:
4463    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4464  case Match_InvalidZPR32LSL16:
4465    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4466  case Match_InvalidZPR32LSL32:
4467    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4468  case Match_InvalidZPR32LSL64:
4469    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4470  case Match_InvalidZPR64LSL8:
4471    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4472  case Match_InvalidZPR64LSL16:
4473    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4474  case Match_InvalidZPR64LSL32:
4475    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4476  case Match_InvalidZPR64LSL64:
4477    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4478  case Match_InvalidZPR0:
4479    return Error(Loc, "expected register without element width suffix");
4480  case Match_InvalidZPR8:
4481  case Match_InvalidZPR16:
4482  case Match_InvalidZPR32:
4483  case Match_InvalidZPR64:
4484  case Match_InvalidZPR128:
4485    return Error(Loc, "invalid element width");
4486  case Match_InvalidZPR_3b8:
4487    return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4488  case Match_InvalidZPR_3b16:
4489    return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4490  case Match_InvalidZPR_3b32:
4491    return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4492  case Match_InvalidZPR_4b16:
4493    return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4494  case Match_InvalidZPR_4b32:
4495    return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4496  case Match_InvalidZPR_4b64:
4497    return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4498  case Match_InvalidSVEPattern:
4499    return Error(Loc, "invalid predicate pattern");
4500  case Match_InvalidSVEPredicateAnyReg:
4501  case Match_InvalidSVEPredicateBReg:
4502  case Match_InvalidSVEPredicateHReg:
4503  case Match_InvalidSVEPredicateSReg:
4504  case Match_InvalidSVEPredicateDReg:
4505    return Error(Loc, "invalid predicate register.");
4506  case Match_InvalidSVEPredicate3bAnyReg:
4507    return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4508  case Match_InvalidSVEPredicate3bBReg:
4509    return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4510  case Match_InvalidSVEPredicate3bHReg:
4511    return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4512  case Match_InvalidSVEPredicate3bSReg:
4513    return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4514  case Match_InvalidSVEPredicate3bDReg:
4515    return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4516  case Match_InvalidSVEExactFPImmOperandHalfOne:
4517    return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4518  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4519    return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4520  case Match_InvalidSVEExactFPImmOperandZeroOne:
4521    return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4522  default:
4523    llvm_unreachable("unexpected error code!");
4524  }
4525}
4526
4527static const char *getSubtargetFeatureName(uint64_t Val);
4528
4529bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4530                                               OperandVector &Operands,
4531                                               MCStreamer &Out,
4532                                               uint64_t &ErrorInfo,
4533                                               bool MatchingInlineAsm) {
4534  assert(!Operands.empty() && "Unexpect empty operand list!");
4535  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4536  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4537
4538  StringRef Tok = Op.getToken();
4539  unsigned NumOperands = Operands.size();
4540
4541  if (NumOperands == 4 && Tok == "lsl") {
4542    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4543    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4544    if (Op2.isScalarReg() && Op3.isImm()) {
4545      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4546      if (Op3CE) {
4547        uint64_t Op3Val = Op3CE->getValue();
4548        uint64_t NewOp3Val = 0;
4549        uint64_t NewOp4Val = 0;
4550        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4551                Op2.getReg())) {
4552          NewOp3Val = (32 - Op3Val) & 0x1f;
4553          NewOp4Val = 31 - Op3Val;
4554        } else {
4555          NewOp3Val = (64 - Op3Val) & 0x3f;
4556          NewOp4Val = 63 - Op3Val;
4557        }
4558
4559        const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4560        const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4561
4562        Operands[0] = AArch64Operand::CreateToken(
4563            "ubfm", false, Op.getStartLoc(), getContext());
4564        Operands.push_back(AArch64Operand::CreateImm(
4565            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4566        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4567                                                Op3.getEndLoc(), getContext());
4568      }
4569    }
4570  } else if (NumOperands == 4 && Tok == "bfc") {
4571    // FIXME: Horrible hack to handle BFC->BFM alias.
4572    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4573    AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4574    AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4575
4576    if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4577      const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4578      const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4579
4580      if (LSBCE && WidthCE) {
4581        uint64_t LSB = LSBCE->getValue();
4582        uint64_t Width = WidthCE->getValue();
4583
4584        uint64_t RegWidth = 0;
4585        if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4586                Op1.getReg()))
4587          RegWidth = 64;
4588        else
4589          RegWidth = 32;
4590
4591        if (LSB >= RegWidth)
4592          return Error(LSBOp.getStartLoc(),
4593                       "expected integer in range [0, 31]");
4594        if (Width < 1 || Width > RegWidth)
4595          return Error(WidthOp.getStartLoc(),
4596                       "expected integer in range [1, 32]");
4597
4598        uint64_t ImmR = 0;
4599        if (RegWidth == 32)
4600          ImmR = (32 - LSB) & 0x1f;
4601        else
4602          ImmR = (64 - LSB) & 0x3f;
4603
4604        uint64_t ImmS = Width - 1;
4605
4606        if (ImmR != 0 && ImmS >= ImmR)
4607          return Error(WidthOp.getStartLoc(),
4608                       "requested insert overflows register");
4609
4610        const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4611        const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4612        Operands[0] = AArch64Operand::CreateToken(
4613              "bfm", false, Op.getStartLoc(), getContext());
4614        Operands[2] = AArch64Operand::CreateReg(
4615            RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4616            SMLoc(), SMLoc(), getContext());
4617        Operands[3] = AArch64Operand::CreateImm(
4618            ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4619        Operands.emplace_back(
4620            AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4621                                      WidthOp.getEndLoc(), getContext()));
4622      }
4623    }
4624  } else if (NumOperands == 5) {
4625    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4626    // UBFIZ -> UBFM aliases.
4627    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4628      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4629      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4630      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4631
4632      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4633        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4634        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4635
4636        if (Op3CE && Op4CE) {
4637          uint64_t Op3Val = Op3CE->getValue();
4638          uint64_t Op4Val = Op4CE->getValue();
4639
4640          uint64_t RegWidth = 0;
4641          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4642                  Op1.getReg()))
4643            RegWidth = 64;
4644          else
4645            RegWidth = 32;
4646
4647          if (Op3Val >= RegWidth)
4648            return Error(Op3.getStartLoc(),
4649                         "expected integer in range [0, 31]");
4650          if (Op4Val < 1 || Op4Val > RegWidth)
4651            return Error(Op4.getStartLoc(),
4652                         "expected integer in range [1, 32]");
4653
4654          uint64_t NewOp3Val = 0;
4655          if (RegWidth == 32)
4656            NewOp3Val = (32 - Op3Val) & 0x1f;
4657          else
4658            NewOp3Val = (64 - Op3Val) & 0x3f;
4659
4660          uint64_t NewOp4Val = Op4Val - 1;
4661
4662          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4663            return Error(Op4.getStartLoc(),
4664                         "requested insert overflows register");
4665
4666          const MCExpr *NewOp3 =
4667              MCConstantExpr::create(NewOp3Val, getContext());
4668          const MCExpr *NewOp4 =
4669              MCConstantExpr::create(NewOp4Val, getContext());
4670          Operands[3] = AArch64Operand::CreateImm(
4671              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4672          Operands[4] = AArch64Operand::CreateImm(
4673              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4674          if (Tok == "bfi")
4675            Operands[0] = AArch64Operand::CreateToken(
4676                "bfm", false, Op.getStartLoc(), getContext());
4677          else if (Tok == "sbfiz")
4678            Operands[0] = AArch64Operand::CreateToken(
4679                "sbfm", false, Op.getStartLoc(), getContext());
4680          else if (Tok == "ubfiz")
4681            Operands[0] = AArch64Operand::CreateToken(
4682                "ubfm", false, Op.getStartLoc(), getContext());
4683          else
4684            llvm_unreachable("No valid mnemonic for alias?");
4685        }
4686      }
4687
4688      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4689      // UBFX -> UBFM aliases.
4690    } else if (NumOperands == 5 &&
4691               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4692      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4693      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4694      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4695
4696      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4697        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4698        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4699
4700        if (Op3CE && Op4CE) {
4701          uint64_t Op3Val = Op3CE->getValue();
4702          uint64_t Op4Val = Op4CE->getValue();
4703
4704          uint64_t RegWidth = 0;
4705          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4706                  Op1.getReg()))
4707            RegWidth = 64;
4708          else
4709            RegWidth = 32;
4710
4711          if (Op3Val >= RegWidth)
4712            return Error(Op3.getStartLoc(),
4713                         "expected integer in range [0, 31]");
4714          if (Op4Val < 1 || Op4Val > RegWidth)
4715            return Error(Op4.getStartLoc(),
4716                         "expected integer in range [1, 32]");
4717
4718          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4719
4720          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4721            return Error(Op4.getStartLoc(),
4722                         "requested extract overflows register");
4723
4724          const MCExpr *NewOp4 =
4725              MCConstantExpr::create(NewOp4Val, getContext());
4726          Operands[4] = AArch64Operand::CreateImm(
4727              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4728          if (Tok == "bfxil")
4729            Operands[0] = AArch64Operand::CreateToken(
4730                "bfm", false, Op.getStartLoc(), getContext());
4731          else if (Tok == "sbfx")
4732            Operands[0] = AArch64Operand::CreateToken(
4733                "sbfm", false, Op.getStartLoc(), getContext());
4734          else if (Tok == "ubfx")
4735            Operands[0] = AArch64Operand::CreateToken(
4736                "ubfm", false, Op.getStartLoc(), getContext());
4737          else
4738            llvm_unreachable("No valid mnemonic for alias?");
4739        }
4740      }
4741    }
4742  }
4743
4744  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4745  // instruction for FP registers correctly in some rare circumstances. Convert
4746  // it to a safe instruction and warn (because silently changing someone's
4747  // assembly is rude).
4748  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4749      NumOperands == 4 && Tok == "movi") {
4750    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4751    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4752    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4753    if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4754        (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4755      StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4756      if (Suffix.lower() == ".2d" &&
4757          cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4758        Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4759                " correctly on this CPU, converting to equivalent movi.16b");
4760        // Switch the suffix to .16b.
4761        unsigned Idx = Op1.isToken() ? 1 : 2;
4762        Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4763                                                  getContext());
4764      }
4765    }
4766  }
4767
4768  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4769  //        InstAlias can't quite handle this since the reg classes aren't
4770  //        subclasses.
4771  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4772    // The source register can be Wn here, but the matcher expects a
4773    // GPR64. Twiddle it here if necessary.
4774    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4775    if (Op.isScalarReg()) {
4776      unsigned Reg = getXRegFromWReg(Op.getReg());
4777      Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4778                                              Op.getStartLoc(), Op.getEndLoc(),
4779                                              getContext());
4780    }
4781  }
4782  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4783  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4784    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4785    if (Op.isScalarReg() &&
4786        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4787            Op.getReg())) {
4788      // The source register can be Wn here, but the matcher expects a
4789      // GPR64. Twiddle it here if necessary.
4790      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4791      if (Op.isScalarReg()) {
4792        unsigned Reg = getXRegFromWReg(Op.getReg());
4793        Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4794                                                Op.getStartLoc(),
4795                                                Op.getEndLoc(), getContext());
4796      }
4797    }
4798  }
4799  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4800  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4801    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4802    if (Op.isScalarReg() &&
4803        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4804            Op.getReg())) {
4805      // The source register can be Wn here, but the matcher expects a
4806      // GPR32. Twiddle it here if necessary.
4807      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4808      if (Op.isScalarReg()) {
4809        unsigned Reg = getWRegFromXReg(Op.getReg());
4810        Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4811                                                Op.getStartLoc(),
4812                                                Op.getEndLoc(), getContext());
4813      }
4814    }
4815  }
4816
4817  MCInst Inst;
4818  FeatureBitset MissingFeatures;
4819  // First try to match against the secondary set of tables containing the
4820  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4821  unsigned MatchResult =
4822      MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4823                           MatchingInlineAsm, 1);
4824
4825  // If that fails, try against the alternate table containing long-form NEON:
4826  // "fadd v0.2s, v1.2s, v2.2s"
4827  if (MatchResult != Match_Success) {
4828    // But first, save the short-form match result: we can use it in case the
4829    // long-form match also fails.
4830    auto ShortFormNEONErrorInfo = ErrorInfo;
4831    auto ShortFormNEONMatchResult = MatchResult;
4832    auto ShortFormNEONMissingFeatures = MissingFeatures;
4833
4834    MatchResult =
4835        MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4836                             MatchingInlineAsm, 0);
4837
4838    // Now, both matches failed, and the long-form match failed on the mnemonic
4839    // suffix token operand.  The short-form match failure is probably more
4840    // relevant: use it instead.
4841    if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4842        Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4843        ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4844      MatchResult = ShortFormNEONMatchResult;
4845      ErrorInfo = ShortFormNEONErrorInfo;
4846      MissingFeatures = ShortFormNEONMissingFeatures;
4847    }
4848  }
4849
4850  switch (MatchResult) {
4851  case Match_Success: {
4852    // Perform range checking and other semantic validations
4853    SmallVector<SMLoc, 8> OperandLocs;
4854    NumOperands = Operands.size();
4855    for (unsigned i = 1; i < NumOperands; ++i)
4856      OperandLocs.push_back(Operands[i]->getStartLoc());
4857    if (validateInstruction(Inst, IDLoc, OperandLocs))
4858      return true;
4859
4860    Inst.setLoc(IDLoc);
4861    Out.emitInstruction(Inst, getSTI());
4862    return false;
4863  }
4864  case Match_MissingFeature: {
4865    assert(MissingFeatures.any() && "Unknown missing feature!");
4866    // Special case the error message for the very common case where only
4867    // a single subtarget feature is missing (neon, e.g.).
4868    std::string Msg = "instruction requires:";
4869    for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4870      if (MissingFeatures[i]) {
4871        Msg += " ";
4872        Msg += getSubtargetFeatureName(i);
4873      }
4874    }
4875    return Error(IDLoc, Msg);
4876  }
4877  case Match_MnemonicFail:
4878    return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4879  case Match_InvalidOperand: {
4880    SMLoc ErrorLoc = IDLoc;
4881
4882    if (ErrorInfo != ~0ULL) {
4883      if (ErrorInfo >= Operands.size())
4884        return Error(IDLoc, "too few operands for instruction",
4885                     SMRange(IDLoc, getTok().getLoc()));
4886
4887      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4888      if (ErrorLoc == SMLoc())
4889        ErrorLoc = IDLoc;
4890    }
4891    // If the match failed on a suffix token operand, tweak the diagnostic
4892    // accordingly.
4893    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4894        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4895      MatchResult = Match_InvalidSuffix;
4896
4897    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4898  }
4899  case Match_InvalidTiedOperand:
4900  case Match_InvalidMemoryIndexed1:
4901  case Match_InvalidMemoryIndexed2:
4902  case Match_InvalidMemoryIndexed4:
4903  case Match_InvalidMemoryIndexed8:
4904  case Match_InvalidMemoryIndexed16:
4905  case Match_InvalidCondCode:
4906  case Match_AddSubRegExtendSmall:
4907  case Match_AddSubRegExtendLarge:
4908  case Match_AddSubSecondSource:
4909  case Match_LogicalSecondSource:
4910  case Match_AddSubRegShift32:
4911  case Match_AddSubRegShift64:
4912  case Match_InvalidMovImm32Shift:
4913  case Match_InvalidMovImm64Shift:
4914  case Match_InvalidFPImm:
4915  case Match_InvalidMemoryWExtend8:
4916  case Match_InvalidMemoryWExtend16:
4917  case Match_InvalidMemoryWExtend32:
4918  case Match_InvalidMemoryWExtend64:
4919  case Match_InvalidMemoryWExtend128:
4920  case Match_InvalidMemoryXExtend8:
4921  case Match_InvalidMemoryXExtend16:
4922  case Match_InvalidMemoryXExtend32:
4923  case Match_InvalidMemoryXExtend64:
4924  case Match_InvalidMemoryXExtend128:
4925  case Match_InvalidMemoryIndexed1SImm4:
4926  case Match_InvalidMemoryIndexed2SImm4:
4927  case Match_InvalidMemoryIndexed3SImm4:
4928  case Match_InvalidMemoryIndexed4SImm4:
4929  case Match_InvalidMemoryIndexed1SImm6:
4930  case Match_InvalidMemoryIndexed16SImm4:
4931  case Match_InvalidMemoryIndexed32SImm4:
4932  case Match_InvalidMemoryIndexed4SImm7:
4933  case Match_InvalidMemoryIndexed8SImm7:
4934  case Match_InvalidMemoryIndexed16SImm7:
4935  case Match_InvalidMemoryIndexed8UImm5:
4936  case Match_InvalidMemoryIndexed4UImm5:
4937  case Match_InvalidMemoryIndexed2UImm5:
4938  case Match_InvalidMemoryIndexed1UImm6:
4939  case Match_InvalidMemoryIndexed2UImm6:
4940  case Match_InvalidMemoryIndexed4UImm6:
4941  case Match_InvalidMemoryIndexed8UImm6:
4942  case Match_InvalidMemoryIndexed16UImm6:
4943  case Match_InvalidMemoryIndexedSImm6:
4944  case Match_InvalidMemoryIndexedSImm5:
4945  case Match_InvalidMemoryIndexedSImm8:
4946  case Match_InvalidMemoryIndexedSImm9:
4947  case Match_InvalidMemoryIndexed16SImm9:
4948  case Match_InvalidMemoryIndexed8SImm10:
4949  case Match_InvalidImm0_1:
4950  case Match_InvalidImm0_7:
4951  case Match_InvalidImm0_15:
4952  case Match_InvalidImm0_31:
4953  case Match_InvalidImm0_63:
4954  case Match_InvalidImm0_127:
4955  case Match_InvalidImm0_255:
4956  case Match_InvalidImm0_65535:
4957  case Match_InvalidImm1_8:
4958  case Match_InvalidImm1_16:
4959  case Match_InvalidImm1_32:
4960  case Match_InvalidImm1_64:
4961  case Match_InvalidSVEAddSubImm8:
4962  case Match_InvalidSVEAddSubImm16:
4963  case Match_InvalidSVEAddSubImm32:
4964  case Match_InvalidSVEAddSubImm64:
4965  case Match_InvalidSVECpyImm8:
4966  case Match_InvalidSVECpyImm16:
4967  case Match_InvalidSVECpyImm32:
4968  case Match_InvalidSVECpyImm64:
4969  case Match_InvalidIndexRange1_1:
4970  case Match_InvalidIndexRange0_15:
4971  case Match_InvalidIndexRange0_7:
4972  case Match_InvalidIndexRange0_3:
4973  case Match_InvalidIndexRange0_1:
4974  case Match_InvalidSVEIndexRange0_63:
4975  case Match_InvalidSVEIndexRange0_31:
4976  case Match_InvalidSVEIndexRange0_15:
4977  case Match_InvalidSVEIndexRange0_7:
4978  case Match_InvalidSVEIndexRange0_3:
4979  case Match_InvalidLabel:
4980  case Match_InvalidComplexRotationEven:
4981  case Match_InvalidComplexRotationOdd:
4982  case Match_InvalidGPR64shifted8:
4983  case Match_InvalidGPR64shifted16:
4984  case Match_InvalidGPR64shifted32:
4985  case Match_InvalidGPR64shifted64:
4986  case Match_InvalidGPR64NoXZRshifted8:
4987  case Match_InvalidGPR64NoXZRshifted16:
4988  case Match_InvalidGPR64NoXZRshifted32:
4989  case Match_InvalidGPR64NoXZRshifted64:
4990  case Match_InvalidZPR32UXTW8:
4991  case Match_InvalidZPR32UXTW16:
4992  case Match_InvalidZPR32UXTW32:
4993  case Match_InvalidZPR32UXTW64:
4994  case Match_InvalidZPR32SXTW8:
4995  case Match_InvalidZPR32SXTW16:
4996  case Match_InvalidZPR32SXTW32:
4997  case Match_InvalidZPR32SXTW64:
4998  case Match_InvalidZPR64UXTW8:
4999  case Match_InvalidZPR64SXTW8:
5000  case Match_InvalidZPR64UXTW16:
5001  case Match_InvalidZPR64SXTW16:
5002  case Match_InvalidZPR64UXTW32:
5003  case Match_InvalidZPR64SXTW32:
5004  case Match_InvalidZPR64UXTW64:
5005  case Match_InvalidZPR64SXTW64:
5006  case Match_InvalidZPR32LSL8:
5007  case Match_InvalidZPR32LSL16:
5008  case Match_InvalidZPR32LSL32:
5009  case Match_InvalidZPR32LSL64:
5010  case Match_InvalidZPR64LSL8:
5011  case Match_InvalidZPR64LSL16:
5012  case Match_InvalidZPR64LSL32:
5013  case Match_InvalidZPR64LSL64:
5014  case Match_InvalidZPR0:
5015  case Match_InvalidZPR8:
5016  case Match_InvalidZPR16:
5017  case Match_InvalidZPR32:
5018  case Match_InvalidZPR64:
5019  case Match_InvalidZPR128:
5020  case Match_InvalidZPR_3b8:
5021  case Match_InvalidZPR_3b16:
5022  case Match_InvalidZPR_3b32:
5023  case Match_InvalidZPR_4b16:
5024  case Match_InvalidZPR_4b32:
5025  case Match_InvalidZPR_4b64:
5026  case Match_InvalidSVEPredicateAnyReg:
5027  case Match_InvalidSVEPattern:
5028  case Match_InvalidSVEPredicateBReg:
5029  case Match_InvalidSVEPredicateHReg:
5030  case Match_InvalidSVEPredicateSReg:
5031  case Match_InvalidSVEPredicateDReg:
5032  case Match_InvalidSVEPredicate3bAnyReg:
5033  case Match_InvalidSVEPredicate3bBReg:
5034  case Match_InvalidSVEPredicate3bHReg:
5035  case Match_InvalidSVEPredicate3bSReg:
5036  case Match_InvalidSVEPredicate3bDReg:
5037  case Match_InvalidSVEExactFPImmOperandHalfOne:
5038  case Match_InvalidSVEExactFPImmOperandHalfTwo:
5039  case Match_InvalidSVEExactFPImmOperandZeroOne:
5040  case Match_MSR:
5041  case Match_MRS: {
5042    if (ErrorInfo >= Operands.size())
5043      return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5044    // Any time we get here, there's nothing fancy to do. Just get the
5045    // operand SMLoc and display the diagnostic.
5046    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5047    if (ErrorLoc == SMLoc())
5048      ErrorLoc = IDLoc;
5049    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5050  }
5051  }
5052
5053  llvm_unreachable("Implement any new match types added!");
5054}
5055
5056/// ParseDirective parses the arm specific directives
5057bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5058  const MCObjectFileInfo::Environment Format =
5059    getContext().getObjectFileInfo()->getObjectFileType();
5060  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5061
5062  auto IDVal = DirectiveID.getIdentifier().lower();
5063  SMLoc Loc = DirectiveID.getLoc();
5064  if (IDVal == ".arch")
5065    parseDirectiveArch(Loc);
5066  else if (IDVal == ".cpu")
5067    parseDirectiveCPU(Loc);
5068  else if (IDVal == ".tlsdesccall")
5069    parseDirectiveTLSDescCall(Loc);
5070  else if (IDVal == ".ltorg" || IDVal == ".pool")
5071    parseDirectiveLtorg(Loc);
5072  else if (IDVal == ".unreq")
5073    parseDirectiveUnreq(Loc);
5074  else if (IDVal == ".inst")
5075    parseDirectiveInst(Loc);
5076  else if (IDVal == ".cfi_negate_ra_state")
5077    parseDirectiveCFINegateRAState();
5078  else if (IDVal == ".cfi_b_key_frame")
5079    parseDirectiveCFIBKeyFrame();
5080  else if (IDVal == ".arch_extension")
5081    parseDirectiveArchExtension(Loc);
5082  else if (IDVal == ".variant_pcs")
5083    parseDirectiveVariantPCS(Loc);
5084  else if (IsMachO) {
5085    if (IDVal == MCLOHDirectiveName())
5086      parseDirectiveLOH(IDVal, Loc);
5087    else
5088      return true;
5089  } else
5090    return true;
5091  return false;
5092}
5093
5094static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5095                            SmallVector<StringRef, 4> &RequestedExtensions) {
5096  const bool NoCrypto =
5097      (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5098                 "nocrypto") != std::end(RequestedExtensions));
5099  const bool Crypto =
5100      (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5101                 "crypto") != std::end(RequestedExtensions));
5102
5103  if (!NoCrypto && Crypto) {
5104    switch (ArchKind) {
5105    default:
5106      // Map 'generic' (and others) to sha2 and aes, because
5107      // that was the traditional meaning of crypto.
5108    case AArch64::ArchKind::ARMV8_1A:
5109    case AArch64::ArchKind::ARMV8_2A:
5110    case AArch64::ArchKind::ARMV8_3A:
5111      RequestedExtensions.push_back("sha2");
5112      RequestedExtensions.push_back("aes");
5113      break;
5114    case AArch64::ArchKind::ARMV8_4A:
5115    case AArch64::ArchKind::ARMV8_5A:
5116    case AArch64::ArchKind::ARMV8_6A:
5117      RequestedExtensions.push_back("sm4");
5118      RequestedExtensions.push_back("sha3");
5119      RequestedExtensions.push_back("sha2");
5120      RequestedExtensions.push_back("aes");
5121      break;
5122    }
5123  } else if (NoCrypto) {
5124    switch (ArchKind) {
5125    default:
5126      // Map 'generic' (and others) to sha2 and aes, because
5127      // that was the traditional meaning of crypto.
5128    case AArch64::ArchKind::ARMV8_1A:
5129    case AArch64::ArchKind::ARMV8_2A:
5130    case AArch64::ArchKind::ARMV8_3A:
5131      RequestedExtensions.push_back("nosha2");
5132      RequestedExtensions.push_back("noaes");
5133      break;
5134    case AArch64::ArchKind::ARMV8_4A:
5135    case AArch64::ArchKind::ARMV8_5A:
5136    case AArch64::ArchKind::ARMV8_6A:
5137      RequestedExtensions.push_back("nosm4");
5138      RequestedExtensions.push_back("nosha3");
5139      RequestedExtensions.push_back("nosha2");
5140      RequestedExtensions.push_back("noaes");
5141      break;
5142    }
5143  }
5144}
5145
5146/// parseDirectiveArch
5147///   ::= .arch token
5148bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5149  SMLoc ArchLoc = getLoc();
5150
5151  StringRef Arch, ExtensionString;
5152  std::tie(Arch, ExtensionString) =
5153      getParser().parseStringToEndOfStatement().trim().split('+');
5154
5155  AArch64::ArchKind ID = AArch64::parseArch(Arch);
5156  if (ID == AArch64::ArchKind::INVALID)
5157    return Error(ArchLoc, "unknown arch name");
5158
5159  if (parseToken(AsmToken::EndOfStatement))
5160    return true;
5161
5162  // Get the architecture and extension features.
5163  std::vector<StringRef> AArch64Features;
5164  AArch64::getArchFeatures(ID, AArch64Features);
5165  AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5166                                AArch64Features);
5167
5168  MCSubtargetInfo &STI = copySTI();
5169  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5170  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5171
5172  SmallVector<StringRef, 4> RequestedExtensions;
5173  if (!ExtensionString.empty())
5174    ExtensionString.split(RequestedExtensions, '+');
5175
5176  ExpandCryptoAEK(ID, RequestedExtensions);
5177
5178  FeatureBitset Features = STI.getFeatureBits();
5179  for (auto Name : RequestedExtensions) {
5180    bool EnableFeature = true;
5181
5182    if (Name.startswith_lower("no")) {
5183      EnableFeature = false;
5184      Name = Name.substr(2);
5185    }
5186
5187    for (const auto &Extension : ExtensionMap) {
5188      if (Extension.Name != Name)
5189        continue;
5190
5191      if (Extension.Features.none())
5192        report_fatal_error("unsupported architectural extension: " + Name);
5193
5194      FeatureBitset ToggleFeatures = EnableFeature
5195                                         ? (~Features & Extension.Features)
5196                                         : ( Features & Extension.Features);
5197      FeatureBitset Features =
5198          ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5199      setAvailableFeatures(Features);
5200      break;
5201    }
5202  }
5203  return false;
5204}
5205
5206/// parseDirectiveArchExtension
5207///   ::= .arch_extension [no]feature
5208bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5209  SMLoc ExtLoc = getLoc();
5210
5211  StringRef Name = getParser().parseStringToEndOfStatement().trim();
5212
5213  if (parseToken(AsmToken::EndOfStatement,
5214                 "unexpected token in '.arch_extension' directive"))
5215    return true;
5216
5217  bool EnableFeature = true;
5218  if (Name.startswith_lower("no")) {
5219    EnableFeature = false;
5220    Name = Name.substr(2);
5221  }
5222
5223  MCSubtargetInfo &STI = copySTI();
5224  FeatureBitset Features = STI.getFeatureBits();
5225  for (const auto &Extension : ExtensionMap) {
5226    if (Extension.Name != Name)
5227      continue;
5228
5229    if (Extension.Features.none())
5230      return Error(ExtLoc, "unsupported architectural extension: " + Name);
5231
5232    FeatureBitset ToggleFeatures = EnableFeature
5233                                       ? (~Features & Extension.Features)
5234                                       : (Features & Extension.Features);
5235    FeatureBitset Features =
5236        ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5237    setAvailableFeatures(Features);
5238    return false;
5239  }
5240
5241  return Error(ExtLoc, "unknown architectural extension: " + Name);
5242}
5243
5244static SMLoc incrementLoc(SMLoc L, int Offset) {
5245  return SMLoc::getFromPointer(L.getPointer() + Offset);
5246}
5247
5248/// parseDirectiveCPU
5249///   ::= .cpu id
5250bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5251  SMLoc CurLoc = getLoc();
5252
5253  StringRef CPU, ExtensionString;
5254  std::tie(CPU, ExtensionString) =
5255      getParser().parseStringToEndOfStatement().trim().split('+');
5256
5257  if (parseToken(AsmToken::EndOfStatement))
5258    return true;
5259
5260  SmallVector<StringRef, 4> RequestedExtensions;
5261  if (!ExtensionString.empty())
5262    ExtensionString.split(RequestedExtensions, '+');
5263
5264  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5265  // once that is tablegen'ed
5266  if (!getSTI().isCPUStringValid(CPU)) {
5267    Error(CurLoc, "unknown CPU name");
5268    return false;
5269  }
5270
5271  MCSubtargetInfo &STI = copySTI();
5272  STI.setDefaultFeatures(CPU, "");
5273  CurLoc = incrementLoc(CurLoc, CPU.size());
5274
5275  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5276
5277  FeatureBitset Features = STI.getFeatureBits();
5278  for (auto Name : RequestedExtensions) {
5279    // Advance source location past '+'.
5280    CurLoc = incrementLoc(CurLoc, 1);
5281
5282    bool EnableFeature = true;
5283
5284    if (Name.startswith_lower("no")) {
5285      EnableFeature = false;
5286      Name = Name.substr(2);
5287    }
5288
5289    bool FoundExtension = false;
5290    for (const auto &Extension : ExtensionMap) {
5291      if (Extension.Name != Name)
5292        continue;
5293
5294      if (Extension.Features.none())
5295        report_fatal_error("unsupported architectural extension: " + Name);
5296
5297      FeatureBitset ToggleFeatures = EnableFeature
5298                                         ? (~Features & Extension.Features)
5299                                         : ( Features & Extension.Features);
5300      FeatureBitset Features =
5301          ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5302      setAvailableFeatures(Features);
5303      FoundExtension = true;
5304
5305      break;
5306    }
5307
5308    if (!FoundExtension)
5309      Error(CurLoc, "unsupported architectural extension");
5310
5311    CurLoc = incrementLoc(CurLoc, Name.size());
5312  }
5313  return false;
5314}
5315
5316/// parseDirectiveInst
5317///  ::= .inst opcode [, ...]
5318bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5319  if (getLexer().is(AsmToken::EndOfStatement))
5320    return Error(Loc, "expected expression following '.inst' directive");
5321
5322  auto parseOp = [&]() -> bool {
5323    SMLoc L = getLoc();
5324    const MCExpr *Expr = nullptr;
5325    if (check(getParser().parseExpression(Expr), L, "expected expression"))
5326      return true;
5327    const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5328    if (check(!Value, L, "expected constant expression"))
5329      return true;
5330    getTargetStreamer().emitInst(Value->getValue());
5331    return false;
5332  };
5333
5334  if (parseMany(parseOp))
5335    return addErrorSuffix(" in '.inst' directive");
5336  return false;
5337}
5338
5339// parseDirectiveTLSDescCall:
5340//   ::= .tlsdesccall symbol
5341bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5342  StringRef Name;
5343  if (check(getParser().parseIdentifier(Name), L,
5344            "expected symbol after directive") ||
5345      parseToken(AsmToken::EndOfStatement))
5346    return true;
5347
5348  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5349  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5350  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5351
5352  MCInst Inst;
5353  Inst.setOpcode(AArch64::TLSDESCCALL);
5354  Inst.addOperand(MCOperand::createExpr(Expr));
5355
5356  getParser().getStreamer().emitInstruction(Inst, getSTI());
5357  return false;
5358}
5359
5360/// ::= .loh <lohName | lohId> label1, ..., labelN
5361/// The number of arguments depends on the loh identifier.
5362bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5363  MCLOHType Kind;
5364  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5365    if (getParser().getTok().isNot(AsmToken::Integer))
5366      return TokError("expected an identifier or a number in directive");
5367    // We successfully get a numeric value for the identifier.
5368    // Check if it is valid.
5369    int64_t Id = getParser().getTok().getIntVal();
5370    if (Id <= -1U && !isValidMCLOHType(Id))
5371      return TokError("invalid numeric identifier in directive");
5372    Kind = (MCLOHType)Id;
5373  } else {
5374    StringRef Name = getTok().getIdentifier();
5375    // We successfully parse an identifier.
5376    // Check if it is a recognized one.
5377    int Id = MCLOHNameToId(Name);
5378
5379    if (Id == -1)
5380      return TokError("invalid identifier in directive");
5381    Kind = (MCLOHType)Id;
5382  }
5383  // Consume the identifier.
5384  Lex();
5385  // Get the number of arguments of this LOH.
5386  int NbArgs = MCLOHIdToNbArgs(Kind);
5387
5388  assert(NbArgs != -1 && "Invalid number of arguments");
5389
5390  SmallVector<MCSymbol *, 3> Args;
5391  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5392    StringRef Name;
5393    if (getParser().parseIdentifier(Name))
5394      return TokError("expected identifier in directive");
5395    Args.push_back(getContext().getOrCreateSymbol(Name));
5396
5397    if (Idx + 1 == NbArgs)
5398      break;
5399    if (parseToken(AsmToken::Comma,
5400                   "unexpected token in '" + Twine(IDVal) + "' directive"))
5401      return true;
5402  }
5403  if (parseToken(AsmToken::EndOfStatement,
5404                 "unexpected token in '" + Twine(IDVal) + "' directive"))
5405    return true;
5406
5407  getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5408  return false;
5409}
5410
5411/// parseDirectiveLtorg
5412///  ::= .ltorg | .pool
5413bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5414  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5415    return true;
5416  getTargetStreamer().emitCurrentConstantPool();
5417  return false;
5418}
5419
5420/// parseDirectiveReq
5421///  ::= name .req registername
5422bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5423  MCAsmParser &Parser = getParser();
5424  Parser.Lex(); // Eat the '.req' token.
5425  SMLoc SRegLoc = getLoc();
5426  RegKind RegisterKind = RegKind::Scalar;
5427  unsigned RegNum;
5428  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5429
5430  if (ParseRes != MatchOperand_Success) {
5431    StringRef Kind;
5432    RegisterKind = RegKind::NeonVector;
5433    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5434
5435    if (ParseRes == MatchOperand_ParseFail)
5436      return true;
5437
5438    if (ParseRes == MatchOperand_Success && !Kind.empty())
5439      return Error(SRegLoc, "vector register without type specifier expected");
5440  }
5441
5442  if (ParseRes != MatchOperand_Success) {
5443    StringRef Kind;
5444    RegisterKind = RegKind::SVEDataVector;
5445    ParseRes =
5446        tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5447
5448    if (ParseRes == MatchOperand_ParseFail)
5449      return true;
5450
5451    if (ParseRes == MatchOperand_Success && !Kind.empty())
5452      return Error(SRegLoc,
5453                   "sve vector register without type specifier expected");
5454  }
5455
5456  if (ParseRes != MatchOperand_Success) {
5457    StringRef Kind;
5458    RegisterKind = RegKind::SVEPredicateVector;
5459    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5460
5461    if (ParseRes == MatchOperand_ParseFail)
5462      return true;
5463
5464    if (ParseRes == MatchOperand_Success && !Kind.empty())
5465      return Error(SRegLoc,
5466                   "sve predicate register without type specifier expected");
5467  }
5468
5469  if (ParseRes != MatchOperand_Success)
5470    return Error(SRegLoc, "register name or alias expected");
5471
5472  // Shouldn't be anything else.
5473  if (parseToken(AsmToken::EndOfStatement,
5474                 "unexpected input in .req directive"))
5475    return true;
5476
5477  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5478  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5479    Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5480
5481  return false;
5482}
5483
5484/// parseDirectiveUneq
5485///  ::= .unreq registername
5486bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5487  MCAsmParser &Parser = getParser();
5488  if (getTok().isNot(AsmToken::Identifier))
5489    return TokError("unexpected input in .unreq directive.");
5490  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5491  Parser.Lex(); // Eat the identifier.
5492  if (parseToken(AsmToken::EndOfStatement))
5493    return addErrorSuffix("in '.unreq' directive");
5494  return false;
5495}
5496
5497bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5498  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5499    return true;
5500  getStreamer().emitCFINegateRAState();
5501  return false;
5502}
5503
5504/// parseDirectiveCFIBKeyFrame
5505/// ::= .cfi_b_key
5506bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5507  if (parseToken(AsmToken::EndOfStatement,
5508                 "unexpected token in '.cfi_b_key_frame'"))
5509    return true;
5510  getStreamer().emitCFIBKeyFrame();
5511  return false;
5512}
5513
5514/// parseDirectiveVariantPCS
5515/// ::= .variant_pcs symbolname
5516bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
5517  MCAsmParser &Parser = getParser();
5518
5519  const AsmToken &Tok = Parser.getTok();
5520  if (Tok.isNot(AsmToken::Identifier))
5521    return TokError("expected symbol name");
5522
5523  StringRef SymbolName = Tok.getIdentifier();
5524
5525  MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
5526  if (!Sym)
5527    return TokError("unknown symbol in '.variant_pcs' directive");
5528
5529  Parser.Lex(); // Eat the symbol
5530
5531  // Shouldn't be any more tokens
5532  if (parseToken(AsmToken::EndOfStatement))
5533    return addErrorSuffix(" in '.variant_pcs' directive");
5534
5535  getTargetStreamer().emitDirectiveVariantPCS(Sym);
5536
5537  return false;
5538}
5539
5540bool
5541AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5542                                    AArch64MCExpr::VariantKind &ELFRefKind,
5543                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
5544                                    int64_t &Addend) {
5545  ELFRefKind = AArch64MCExpr::VK_INVALID;
5546  DarwinRefKind = MCSymbolRefExpr::VK_None;
5547  Addend = 0;
5548
5549  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5550    ELFRefKind = AE->getKind();
5551    Expr = AE->getSubExpr();
5552  }
5553
5554  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5555  if (SE) {
5556    // It's a simple symbol reference with no addend.
5557    DarwinRefKind = SE->getKind();
5558    return true;
5559  }
5560
5561  // Check that it looks like a symbol + an addend
5562  MCValue Res;
5563  bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5564  if (!Relocatable || Res.getSymB())
5565    return false;
5566
5567  // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5568  // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5569  if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5570    return false;
5571
5572  if (Res.getSymA())
5573    DarwinRefKind = Res.getSymA()->getKind();
5574  Addend = Res.getConstant();
5575
5576  // It's some symbol reference + a constant addend, but really
5577  // shouldn't use both Darwin and ELF syntax.
5578  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5579         DarwinRefKind == MCSymbolRefExpr::VK_None;
5580}
5581
5582/// Force static initialization.
5583extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
5584  RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5585  RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5586  RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5587  RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
5588  RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
5589}
5590
5591#define GET_REGISTER_MATCHER
5592#define GET_SUBTARGET_FEATURE_NAME
5593#define GET_MATCHER_IMPLEMENTATION
5594#define GET_MNEMONIC_SPELL_CHECKER
5595#include "AArch64GenAsmMatcher.inc"
5596
5597// Define this matcher function after the auto-generated include so we
5598// have the match class enum definitions.
5599unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5600                                                      unsigned Kind) {
5601  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5602  // If the kind is a token for a literal immediate, check if our asm
5603  // operand matches. This is for InstAliases which have a fixed-value
5604  // immediate in the syntax.
5605  int64_t ExpectedVal;
5606  switch (Kind) {
5607  default:
5608    return Match_InvalidOperand;
5609  case MCK__HASH_0:
5610    ExpectedVal = 0;
5611    break;
5612  case MCK__HASH_1:
5613    ExpectedVal = 1;
5614    break;
5615  case MCK__HASH_12:
5616    ExpectedVal = 12;
5617    break;
5618  case MCK__HASH_16:
5619    ExpectedVal = 16;
5620    break;
5621  case MCK__HASH_2:
5622    ExpectedVal = 2;
5623    break;
5624  case MCK__HASH_24:
5625    ExpectedVal = 24;
5626    break;
5627  case MCK__HASH_3:
5628    ExpectedVal = 3;
5629    break;
5630  case MCK__HASH_32:
5631    ExpectedVal = 32;
5632    break;
5633  case MCK__HASH_4:
5634    ExpectedVal = 4;
5635    break;
5636  case MCK__HASH_48:
5637    ExpectedVal = 48;
5638    break;
5639  case MCK__HASH_6:
5640    ExpectedVal = 6;
5641    break;
5642  case MCK__HASH_64:
5643    ExpectedVal = 64;
5644    break;
5645  case MCK__HASH_8:
5646    ExpectedVal = 8;
5647    break;
5648  }
5649  if (!Op.isImm())
5650    return Match_InvalidOperand;
5651  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5652  if (!CE)
5653    return Match_InvalidOperand;
5654  if (CE->getValue() == ExpectedVal)
5655    return Match_Success;
5656  return Match_InvalidOperand;
5657}
5658
5659OperandMatchResultTy
5660AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5661
5662  SMLoc S = getLoc();
5663
5664  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5665    Error(S, "expected register");
5666    return MatchOperand_ParseFail;
5667  }
5668
5669  unsigned FirstReg;
5670  OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5671  if (Res != MatchOperand_Success)
5672    return MatchOperand_ParseFail;
5673
5674  const MCRegisterClass &WRegClass =
5675      AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5676  const MCRegisterClass &XRegClass =
5677      AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5678
5679  bool isXReg = XRegClass.contains(FirstReg),
5680       isWReg = WRegClass.contains(FirstReg);
5681  if (!isXReg && !isWReg) {
5682    Error(S, "expected first even register of a "
5683             "consecutive same-size even/odd register pair");
5684    return MatchOperand_ParseFail;
5685  }
5686
5687  const MCRegisterInfo *RI = getContext().getRegisterInfo();
5688  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5689
5690  if (FirstEncoding & 0x1) {
5691    Error(S, "expected first even register of a "
5692             "consecutive same-size even/odd register pair");
5693    return MatchOperand_ParseFail;
5694  }
5695
5696  if (getParser().getTok().isNot(AsmToken::Comma)) {
5697    Error(getLoc(), "expected comma");
5698    return MatchOperand_ParseFail;
5699  }
5700  // Eat the comma
5701  getParser().Lex();
5702
5703  SMLoc E = getLoc();
5704  unsigned SecondReg;
5705  Res = tryParseScalarRegister(SecondReg);
5706  if (Res != MatchOperand_Success)
5707    return MatchOperand_ParseFail;
5708
5709  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5710      (isXReg && !XRegClass.contains(SecondReg)) ||
5711      (isWReg && !WRegClass.contains(SecondReg))) {
5712    Error(E,"expected second odd register of a "
5713             "consecutive same-size even/odd register pair");
5714    return MatchOperand_ParseFail;
5715  }
5716
5717  unsigned Pair = 0;
5718  if (isXReg) {
5719    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5720           &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5721  } else {
5722    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5723           &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5724  }
5725
5726  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5727      getLoc(), getContext()));
5728
5729  return MatchOperand_Success;
5730}
5731
5732template <bool ParseShiftExtend, bool ParseSuffix>
5733OperandMatchResultTy
5734AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5735  const SMLoc S = getLoc();
5736  // Check for a SVE vector register specifier first.
5737  unsigned RegNum;
5738  StringRef Kind;
5739
5740  OperandMatchResultTy Res =
5741      tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5742
5743  if (Res != MatchOperand_Success)
5744    return Res;
5745
5746  if (ParseSuffix && Kind.empty())
5747    return MatchOperand_NoMatch;
5748
5749  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5750  if (!KindRes)
5751    return MatchOperand_NoMatch;
5752
5753  unsigned ElementWidth = KindRes->second;
5754
5755  // No shift/extend is the default.
5756  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5757    Operands.push_back(AArch64Operand::CreateVectorReg(
5758        RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5759
5760    OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5761    if (Res == MatchOperand_ParseFail)
5762      return MatchOperand_ParseFail;
5763    return MatchOperand_Success;
5764  }
5765
5766  // Eat the comma
5767  getParser().Lex();
5768
5769  // Match the shift
5770  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5771  Res = tryParseOptionalShiftExtend(ExtOpnd);
5772  if (Res != MatchOperand_Success)
5773    return Res;
5774
5775  auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5776  Operands.push_back(AArch64Operand::CreateVectorReg(
5777      RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5778      getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5779      Ext->hasShiftExtendAmount()));
5780
5781  return MatchOperand_Success;
5782}
5783
5784OperandMatchResultTy
5785AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5786  MCAsmParser &Parser = getParser();
5787
5788  SMLoc SS = getLoc();
5789  const AsmToken &TokE = Parser.getTok();
5790  bool IsHash = TokE.is(AsmToken::Hash);
5791
5792  if (!IsHash && TokE.isNot(AsmToken::Identifier))
5793    return MatchOperand_NoMatch;
5794
5795  int64_t Pattern;
5796  if (IsHash) {
5797    Parser.Lex(); // Eat hash
5798
5799    // Parse the immediate operand.
5800    const MCExpr *ImmVal;
5801    SS = getLoc();
5802    if (Parser.parseExpression(ImmVal))
5803      return MatchOperand_ParseFail;
5804
5805    auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5806    if (!MCE)
5807      return MatchOperand_ParseFail;
5808
5809    Pattern = MCE->getValue();
5810  } else {
5811    // Parse the pattern
5812    auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5813    if (!Pat)
5814      return MatchOperand_NoMatch;
5815
5816    Parser.Lex();
5817    Pattern = Pat->Encoding;
5818    assert(Pattern >= 0 && Pattern < 32);
5819  }
5820
5821  Operands.push_back(
5822      AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5823                                SS, getLoc(), getContext()));
5824
5825  return MatchOperand_Success;
5826}
5827