1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/AArch64TargetParser.h"
52#include "llvm/Support/TargetParser.h"
53#include "llvm/Support/raw_ostream.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69  Scalar,
70  NeonVector,
71  SVEDataVector,
72  SVEPredicateAsCounter,
73  SVEPredicateVector,
74  Matrix,
75  LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81  EqualsReg,
82  EqualsSuperReg,
83  EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88  StringRef Mnemonic; ///< Instruction mnemonic.
89
90  // Map of register aliases registers via the .req directive.
91  StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
92
93  class PrefixInfo {
94  public:
95    static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96      PrefixInfo Prefix;
97      switch (Inst.getOpcode()) {
98      case AArch64::MOVPRFX_ZZ:
99        Prefix.Active = true;
100        Prefix.Dst = Inst.getOperand(0).getReg();
101        break;
102      case AArch64::MOVPRFX_ZPmZ_B:
103      case AArch64::MOVPRFX_ZPmZ_H:
104      case AArch64::MOVPRFX_ZPmZ_S:
105      case AArch64::MOVPRFX_ZPmZ_D:
106        Prefix.Active = true;
107        Prefix.Predicated = true;
108        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110               "No destructive element size set for movprfx");
111        Prefix.Dst = Inst.getOperand(0).getReg();
112        Prefix.Pg = Inst.getOperand(2).getReg();
113        break;
114      case AArch64::MOVPRFX_ZPzZ_B:
115      case AArch64::MOVPRFX_ZPzZ_H:
116      case AArch64::MOVPRFX_ZPzZ_S:
117      case AArch64::MOVPRFX_ZPzZ_D:
118        Prefix.Active = true;
119        Prefix.Predicated = true;
120        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122               "No destructive element size set for movprfx");
123        Prefix.Dst = Inst.getOperand(0).getReg();
124        Prefix.Pg = Inst.getOperand(1).getReg();
125        break;
126      default:
127        break;
128      }
129
130      return Prefix;
131    }
132
133    PrefixInfo() = default;
134    bool isActive() const { return Active; }
135    bool isPredicated() const { return Predicated; }
136    unsigned getElementSize() const {
137      assert(Predicated);
138      return ElementSize;
139    }
140    unsigned getDstReg() const { return Dst; }
141    unsigned getPgReg() const {
142      assert(Predicated);
143      return Pg;
144    }
145
146  private:
147    bool Active = false;
148    bool Predicated = false;
149    unsigned ElementSize;
150    unsigned Dst;
151    unsigned Pg;
152  } NextPrefix;
153
154  AArch64TargetStreamer &getTargetStreamer() {
155    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156    return static_cast<AArch64TargetStreamer &>(TS);
157  }
158
159  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162  bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
164  AArch64CC::CondCode parseCondCodeString(StringRef Cond,
165                                          std::string &Suggestion);
166  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
167  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168  bool parseRegister(OperandVector &Operands);
169  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
170  bool parseNeonVectorList(OperandVector &Operands);
171  bool parseOptionalMulOperand(OperandVector &Operands);
172  bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
173  bool parseKeywordOperand(OperandVector &Operands);
174  bool parseOperand(OperandVector &Operands, bool isCondCode,
175                    bool invertCondCode);
176  bool parseImmExpr(int64_t &Out);
177  bool parseComma();
178  bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
179                            unsigned Last);
180
181  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182                      OperandVector &Operands);
183
184  bool parseDirectiveArch(SMLoc L);
185  bool parseDirectiveArchExtension(SMLoc L);
186  bool parseDirectiveCPU(SMLoc L);
187  bool parseDirectiveInst(SMLoc L);
188
189  bool parseDirectiveTLSDescCall(SMLoc L);
190
191  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
192  bool parseDirectiveLtorg(SMLoc L);
193
194  bool parseDirectiveReq(StringRef Name, SMLoc L);
195  bool parseDirectiveUnreq(SMLoc L);
196  bool parseDirectiveCFINegateRAState();
197  bool parseDirectiveCFIBKeyFrame();
198  bool parseDirectiveCFIMTETaggedFrame();
199
200  bool parseDirectiveVariantPCS(SMLoc L);
201
202  bool parseDirectiveSEHAllocStack(SMLoc L);
203  bool parseDirectiveSEHPrologEnd(SMLoc L);
204  bool parseDirectiveSEHSaveR19R20X(SMLoc L);
205  bool parseDirectiveSEHSaveFPLR(SMLoc L);
206  bool parseDirectiveSEHSaveFPLRX(SMLoc L);
207  bool parseDirectiveSEHSaveReg(SMLoc L);
208  bool parseDirectiveSEHSaveRegX(SMLoc L);
209  bool parseDirectiveSEHSaveRegP(SMLoc L);
210  bool parseDirectiveSEHSaveRegPX(SMLoc L);
211  bool parseDirectiveSEHSaveLRPair(SMLoc L);
212  bool parseDirectiveSEHSaveFReg(SMLoc L);
213  bool parseDirectiveSEHSaveFRegX(SMLoc L);
214  bool parseDirectiveSEHSaveFRegP(SMLoc L);
215  bool parseDirectiveSEHSaveFRegPX(SMLoc L);
216  bool parseDirectiveSEHSetFP(SMLoc L);
217  bool parseDirectiveSEHAddFP(SMLoc L);
218  bool parseDirectiveSEHNop(SMLoc L);
219  bool parseDirectiveSEHSaveNext(SMLoc L);
220  bool parseDirectiveSEHEpilogStart(SMLoc L);
221  bool parseDirectiveSEHEpilogEnd(SMLoc L);
222  bool parseDirectiveSEHTrapFrame(SMLoc L);
223  bool parseDirectiveSEHMachineFrame(SMLoc L);
224  bool parseDirectiveSEHContext(SMLoc L);
225  bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
226  bool parseDirectiveSEHPACSignLR(SMLoc L);
227  bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
228
229  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
230                           SmallVectorImpl<SMLoc> &Loc);
231  unsigned getNumRegsForRegKind(RegKind K);
232  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
233                               OperandVector &Operands, MCStreamer &Out,
234                               uint64_t &ErrorInfo,
235                               bool MatchingInlineAsm) override;
236/// @name Auto-generated Match Functions
237/// {
238
239#define GET_ASSEMBLER_HEADER
240#include "AArch64GenAsmMatcher.inc"
241
242  /// }
243
244  OperandMatchResultTy tryParseScalarRegister(MCRegister &Reg);
245  OperandMatchResultTy tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
246                                              RegKind MatchKind);
247  OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
248  OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
249  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
250  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
251  OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
252  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
253  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
254  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
255  template <bool IsSVEPrefetch = false>
256  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
257  OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands);
258  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
259  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
260  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
261  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
262  template<bool AddFPZeroAsLiteral>
263  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
264  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
265  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
266  bool tryParseNeonVectorRegister(OperandVector &Operands);
267  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
268  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
269  OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands);
270  template <bool ParseShiftExtend,
271            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
272  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
273  OperandMatchResultTy tryParseZTOperand(OperandVector &Operands);
274  template <bool ParseShiftExtend, bool ParseSuffix>
275  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
276  template <RegKind RK>
277  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
278  template <RegKind VectorKind>
279  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
280                                          bool ExpectMatch = false);
281  OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
282  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
283  OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands);
284  OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
285  OperandMatchResultTy tryParseImmRange(OperandVector &Operands);
286
287public:
288  enum AArch64MatchResultTy {
289    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
290#define GET_OPERAND_DIAGNOSTIC_TYPES
291#include "AArch64GenAsmMatcher.inc"
292  };
293  bool IsILP32;
294
295  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
296                   const MCInstrInfo &MII, const MCTargetOptions &Options)
297    : MCTargetAsmParser(Options, STI, MII) {
298    IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
299    MCAsmParserExtension::Initialize(Parser);
300    MCStreamer &S = getParser().getStreamer();
301    if (S.getTargetStreamer() == nullptr)
302      new AArch64TargetStreamer(S);
303
304    // Alias .hword/.word/.[dx]word to the target-independent
305    // .2byte/.4byte/.8byte directives as they have the same form and
306    // semantics:
307    ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
308    Parser.addAliasForDirective(".hword", ".2byte");
309    Parser.addAliasForDirective(".word", ".4byte");
310    Parser.addAliasForDirective(".dword", ".8byte");
311    Parser.addAliasForDirective(".xword", ".8byte");
312
313    // Initialize the set of available features.
314    setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
315  }
316
317  bool areEqualRegs(const MCParsedAsmOperand &Op1,
318                    const MCParsedAsmOperand &Op2) const override;
319  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
320                        SMLoc NameLoc, OperandVector &Operands) override;
321  bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
322                     SMLoc &EndLoc) override;
323  OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc,
324                                        SMLoc &EndLoc) override;
325  bool ParseDirective(AsmToken DirectiveID) override;
326  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
327                                      unsigned Kind) override;
328
329  static bool classifySymbolRef(const MCExpr *Expr,
330                                AArch64MCExpr::VariantKind &ELFRefKind,
331                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
332                                int64_t &Addend);
333};
334
335/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
336/// instruction.
337class AArch64Operand : public MCParsedAsmOperand {
338private:
339  enum KindTy {
340    k_Immediate,
341    k_ShiftedImm,
342    k_ImmRange,
343    k_CondCode,
344    k_Register,
345    k_MatrixRegister,
346    k_MatrixTileList,
347    k_SVCR,
348    k_VectorList,
349    k_VectorIndex,
350    k_Token,
351    k_SysReg,
352    k_SysCR,
353    k_Prefetch,
354    k_ShiftExtend,
355    k_FPImm,
356    k_Barrier,
357    k_PSBHint,
358    k_BTIHint,
359  } Kind;
360
361  SMLoc StartLoc, EndLoc;
362
363  struct TokOp {
364    const char *Data;
365    unsigned Length;
366    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
367  };
368
369  // Separate shift/extend operand.
370  struct ShiftExtendOp {
371    AArch64_AM::ShiftExtendType Type;
372    unsigned Amount;
373    bool HasExplicitAmount;
374  };
375
376  struct RegOp {
377    unsigned RegNum;
378    RegKind Kind;
379    int ElementWidth;
380
381    // The register may be allowed as a different register class,
382    // e.g. for GPR64as32 or GPR32as64.
383    RegConstraintEqualityTy EqualityTy;
384
385    // In some cases the shift/extend needs to be explicitly parsed together
386    // with the register, rather than as a separate operand. This is needed
387    // for addressing modes where the instruction as a whole dictates the
388    // scaling/extend, rather than specific bits in the instruction.
389    // By parsing them as a single operand, we avoid the need to pass an
390    // extra operand in all CodeGen patterns (because all operands need to
391    // have an associated value), and we avoid the need to update TableGen to
392    // accept operands that have no associated bits in the instruction.
393    //
394    // An added benefit of parsing them together is that the assembler
395    // can give a sensible diagnostic if the scaling is not correct.
396    //
397    // The default is 'lsl #0' (HasExplicitAmount = false) if no
398    // ShiftExtend is specified.
399    ShiftExtendOp ShiftExtend;
400  };
401
402  struct MatrixRegOp {
403    unsigned RegNum;
404    unsigned ElementWidth;
405    MatrixKind Kind;
406  };
407
408  struct MatrixTileListOp {
409    unsigned RegMask = 0;
410  };
411
412  struct VectorListOp {
413    unsigned RegNum;
414    unsigned Count;
415    unsigned Stride;
416    unsigned NumElements;
417    unsigned ElementWidth;
418    RegKind  RegisterKind;
419  };
420
421  struct VectorIndexOp {
422    int Val;
423  };
424
425  struct ImmOp {
426    const MCExpr *Val;
427  };
428
429  struct ShiftedImmOp {
430    const MCExpr *Val;
431    unsigned ShiftAmount;
432  };
433
434  struct ImmRangeOp {
435    unsigned First;
436    unsigned Last;
437  };
438
439  struct CondCodeOp {
440    AArch64CC::CondCode Code;
441  };
442
443  struct FPImmOp {
444    uint64_t Val; // APFloat value bitcasted to uint64_t.
445    bool IsExact; // describes whether parsed value was exact.
446  };
447
448  struct BarrierOp {
449    const char *Data;
450    unsigned Length;
451    unsigned Val; // Not the enum since not all values have names.
452    bool HasnXSModifier;
453  };
454
455  struct SysRegOp {
456    const char *Data;
457    unsigned Length;
458    uint32_t MRSReg;
459    uint32_t MSRReg;
460    uint32_t PStateField;
461  };
462
463  struct SysCRImmOp {
464    unsigned Val;
465  };
466
467  struct PrefetchOp {
468    const char *Data;
469    unsigned Length;
470    unsigned Val;
471  };
472
473  struct PSBHintOp {
474    const char *Data;
475    unsigned Length;
476    unsigned Val;
477  };
478
479  struct BTIHintOp {
480    const char *Data;
481    unsigned Length;
482    unsigned Val;
483  };
484
485  struct SVCROp {
486    const char *Data;
487    unsigned Length;
488    unsigned PStateField;
489  };
490
491  union {
492    struct TokOp Tok;
493    struct RegOp Reg;
494    struct MatrixRegOp MatrixReg;
495    struct MatrixTileListOp MatrixTileList;
496    struct VectorListOp VectorList;
497    struct VectorIndexOp VectorIndex;
498    struct ImmOp Imm;
499    struct ShiftedImmOp ShiftedImm;
500    struct ImmRangeOp ImmRange;
501    struct CondCodeOp CondCode;
502    struct FPImmOp FPImm;
503    struct BarrierOp Barrier;
504    struct SysRegOp SysReg;
505    struct SysCRImmOp SysCRImm;
506    struct PrefetchOp Prefetch;
507    struct PSBHintOp PSBHint;
508    struct BTIHintOp BTIHint;
509    struct ShiftExtendOp ShiftExtend;
510    struct SVCROp SVCR;
511  };
512
513  // Keep the MCContext around as the MCExprs may need manipulated during
514  // the add<>Operands() calls.
515  MCContext &Ctx;
516
517public:
518  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
519
520  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
521    Kind = o.Kind;
522    StartLoc = o.StartLoc;
523    EndLoc = o.EndLoc;
524    switch (Kind) {
525    case k_Token:
526      Tok = o.Tok;
527      break;
528    case k_Immediate:
529      Imm = o.Imm;
530      break;
531    case k_ShiftedImm:
532      ShiftedImm = o.ShiftedImm;
533      break;
534    case k_ImmRange:
535      ImmRange = o.ImmRange;
536      break;
537    case k_CondCode:
538      CondCode = o.CondCode;
539      break;
540    case k_FPImm:
541      FPImm = o.FPImm;
542      break;
543    case k_Barrier:
544      Barrier = o.Barrier;
545      break;
546    case k_Register:
547      Reg = o.Reg;
548      break;
549    case k_MatrixRegister:
550      MatrixReg = o.MatrixReg;
551      break;
552    case k_MatrixTileList:
553      MatrixTileList = o.MatrixTileList;
554      break;
555    case k_VectorList:
556      VectorList = o.VectorList;
557      break;
558    case k_VectorIndex:
559      VectorIndex = o.VectorIndex;
560      break;
561    case k_SysReg:
562      SysReg = o.SysReg;
563      break;
564    case k_SysCR:
565      SysCRImm = o.SysCRImm;
566      break;
567    case k_Prefetch:
568      Prefetch = o.Prefetch;
569      break;
570    case k_PSBHint:
571      PSBHint = o.PSBHint;
572      break;
573    case k_BTIHint:
574      BTIHint = o.BTIHint;
575      break;
576    case k_ShiftExtend:
577      ShiftExtend = o.ShiftExtend;
578      break;
579    case k_SVCR:
580      SVCR = o.SVCR;
581      break;
582    }
583  }
584
585  /// getStartLoc - Get the location of the first token of this operand.
586  SMLoc getStartLoc() const override { return StartLoc; }
587  /// getEndLoc - Get the location of the last token of this operand.
588  SMLoc getEndLoc() const override { return EndLoc; }
589
590  StringRef getToken() const {
591    assert(Kind == k_Token && "Invalid access!");
592    return StringRef(Tok.Data, Tok.Length);
593  }
594
595  bool isTokenSuffix() const {
596    assert(Kind == k_Token && "Invalid access!");
597    return Tok.IsSuffix;
598  }
599
600  const MCExpr *getImm() const {
601    assert(Kind == k_Immediate && "Invalid access!");
602    return Imm.Val;
603  }
604
605  const MCExpr *getShiftedImmVal() const {
606    assert(Kind == k_ShiftedImm && "Invalid access!");
607    return ShiftedImm.Val;
608  }
609
610  unsigned getShiftedImmShift() const {
611    assert(Kind == k_ShiftedImm && "Invalid access!");
612    return ShiftedImm.ShiftAmount;
613  }
614
615  unsigned getFirstImmVal() const {
616    assert(Kind == k_ImmRange && "Invalid access!");
617    return ImmRange.First;
618  }
619
620  unsigned getLastImmVal() const {
621    assert(Kind == k_ImmRange && "Invalid access!");
622    return ImmRange.Last;
623  }
624
625  AArch64CC::CondCode getCondCode() const {
626    assert(Kind == k_CondCode && "Invalid access!");
627    return CondCode.Code;
628  }
629
630  APFloat getFPImm() const {
631    assert (Kind == k_FPImm && "Invalid access!");
632    return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
633  }
634
635  bool getFPImmIsExact() const {
636    assert (Kind == k_FPImm && "Invalid access!");
637    return FPImm.IsExact;
638  }
639
640  unsigned getBarrier() const {
641    assert(Kind == k_Barrier && "Invalid access!");
642    return Barrier.Val;
643  }
644
645  StringRef getBarrierName() const {
646    assert(Kind == k_Barrier && "Invalid access!");
647    return StringRef(Barrier.Data, Barrier.Length);
648  }
649
650  bool getBarriernXSModifier() const {
651    assert(Kind == k_Barrier && "Invalid access!");
652    return Barrier.HasnXSModifier;
653  }
654
655  unsigned getReg() const override {
656    assert(Kind == k_Register && "Invalid access!");
657    return Reg.RegNum;
658  }
659
660  unsigned getMatrixReg() const {
661    assert(Kind == k_MatrixRegister && "Invalid access!");
662    return MatrixReg.RegNum;
663  }
664
665  unsigned getMatrixElementWidth() const {
666    assert(Kind == k_MatrixRegister && "Invalid access!");
667    return MatrixReg.ElementWidth;
668  }
669
670  MatrixKind getMatrixKind() const {
671    assert(Kind == k_MatrixRegister && "Invalid access!");
672    return MatrixReg.Kind;
673  }
674
675  unsigned getMatrixTileListRegMask() const {
676    assert(isMatrixTileList() && "Invalid access!");
677    return MatrixTileList.RegMask;
678  }
679
680  RegConstraintEqualityTy getRegEqualityTy() const {
681    assert(Kind == k_Register && "Invalid access!");
682    return Reg.EqualityTy;
683  }
684
685  unsigned getVectorListStart() const {
686    assert(Kind == k_VectorList && "Invalid access!");
687    return VectorList.RegNum;
688  }
689
690  unsigned getVectorListCount() const {
691    assert(Kind == k_VectorList && "Invalid access!");
692    return VectorList.Count;
693  }
694
695  unsigned getVectorListStride() const {
696    assert(Kind == k_VectorList && "Invalid access!");
697    return VectorList.Stride;
698  }
699
700  int getVectorIndex() const {
701    assert(Kind == k_VectorIndex && "Invalid access!");
702    return VectorIndex.Val;
703  }
704
705  StringRef getSysReg() const {
706    assert(Kind == k_SysReg && "Invalid access!");
707    return StringRef(SysReg.Data, SysReg.Length);
708  }
709
710  unsigned getSysCR() const {
711    assert(Kind == k_SysCR && "Invalid access!");
712    return SysCRImm.Val;
713  }
714
715  unsigned getPrefetch() const {
716    assert(Kind == k_Prefetch && "Invalid access!");
717    return Prefetch.Val;
718  }
719
720  unsigned getPSBHint() const {
721    assert(Kind == k_PSBHint && "Invalid access!");
722    return PSBHint.Val;
723  }
724
725  StringRef getPSBHintName() const {
726    assert(Kind == k_PSBHint && "Invalid access!");
727    return StringRef(PSBHint.Data, PSBHint.Length);
728  }
729
730  unsigned getBTIHint() const {
731    assert(Kind == k_BTIHint && "Invalid access!");
732    return BTIHint.Val;
733  }
734
735  StringRef getBTIHintName() const {
736    assert(Kind == k_BTIHint && "Invalid access!");
737    return StringRef(BTIHint.Data, BTIHint.Length);
738  }
739
740  StringRef getSVCR() const {
741    assert(Kind == k_SVCR && "Invalid access!");
742    return StringRef(SVCR.Data, SVCR.Length);
743  }
744
745  StringRef getPrefetchName() const {
746    assert(Kind == k_Prefetch && "Invalid access!");
747    return StringRef(Prefetch.Data, Prefetch.Length);
748  }
749
750  AArch64_AM::ShiftExtendType getShiftExtendType() const {
751    if (Kind == k_ShiftExtend)
752      return ShiftExtend.Type;
753    if (Kind == k_Register)
754      return Reg.ShiftExtend.Type;
755    llvm_unreachable("Invalid access!");
756  }
757
758  unsigned getShiftExtendAmount() const {
759    if (Kind == k_ShiftExtend)
760      return ShiftExtend.Amount;
761    if (Kind == k_Register)
762      return Reg.ShiftExtend.Amount;
763    llvm_unreachable("Invalid access!");
764  }
765
766  bool hasShiftExtendAmount() const {
767    if (Kind == k_ShiftExtend)
768      return ShiftExtend.HasExplicitAmount;
769    if (Kind == k_Register)
770      return Reg.ShiftExtend.HasExplicitAmount;
771    llvm_unreachable("Invalid access!");
772  }
773
774  bool isImm() const override { return Kind == k_Immediate; }
775  bool isMem() const override { return false; }
776
777  bool isUImm6() const {
778    if (!isImm())
779      return false;
780    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
781    if (!MCE)
782      return false;
783    int64_t Val = MCE->getValue();
784    return (Val >= 0 && Val < 64);
785  }
786
787  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
788
789  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
790    return isImmScaled<Bits, Scale>(true);
791  }
792
793  template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
794  DiagnosticPredicate isUImmScaled() const {
795    if (IsRange && isImmRange() &&
796        (getLastImmVal() != getFirstImmVal() + Offset))
797      return DiagnosticPredicateTy::NoMatch;
798
799    return isImmScaled<Bits, Scale, IsRange>(false);
800  }
801
802  template <int Bits, int Scale, bool IsRange = false>
803  DiagnosticPredicate isImmScaled(bool Signed) const {
804    if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
805        (isImmRange() && !IsRange))
806      return DiagnosticPredicateTy::NoMatch;
807
808    int64_t Val;
809    if (isImmRange())
810      Val = getFirstImmVal();
811    else {
812      const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
813      if (!MCE)
814        return DiagnosticPredicateTy::NoMatch;
815      Val = MCE->getValue();
816    }
817
818    int64_t MinVal, MaxVal;
819    if (Signed) {
820      int64_t Shift = Bits - 1;
821      MinVal = (int64_t(1) << Shift) * -Scale;
822      MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
823    } else {
824      MinVal = 0;
825      MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
826    }
827
828    if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
829      return DiagnosticPredicateTy::Match;
830
831    return DiagnosticPredicateTy::NearMatch;
832  }
833
834  DiagnosticPredicate isSVEPattern() const {
835    if (!isImm())
836      return DiagnosticPredicateTy::NoMatch;
837    auto *MCE = dyn_cast<MCConstantExpr>(getImm());
838    if (!MCE)
839      return DiagnosticPredicateTy::NoMatch;
840    int64_t Val = MCE->getValue();
841    if (Val >= 0 && Val < 32)
842      return DiagnosticPredicateTy::Match;
843    return DiagnosticPredicateTy::NearMatch;
844  }
845
846  DiagnosticPredicate isSVEVecLenSpecifier() const {
847    if (!isImm())
848      return DiagnosticPredicateTy::NoMatch;
849    auto *MCE = dyn_cast<MCConstantExpr>(getImm());
850    if (!MCE)
851      return DiagnosticPredicateTy::NoMatch;
852    int64_t Val = MCE->getValue();
853    if (Val >= 0 && Val <= 1)
854      return DiagnosticPredicateTy::Match;
855    return DiagnosticPredicateTy::NearMatch;
856  }
857
858  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
859    AArch64MCExpr::VariantKind ELFRefKind;
860    MCSymbolRefExpr::VariantKind DarwinRefKind;
861    int64_t Addend;
862    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
863                                           Addend)) {
864      // If we don't understand the expression, assume the best and
865      // let the fixup and relocation code deal with it.
866      return true;
867    }
868
869    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
870        ELFRefKind == AArch64MCExpr::VK_LO12 ||
871        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
872        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
873        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
874        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
875        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
876        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
877        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
878        ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
879        ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
880        ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
881      // Note that we don't range-check the addend. It's adjusted modulo page
882      // size when converted, so there is no "out of range" condition when using
883      // @pageoff.
884      return true;
885    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
886               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
887      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
888      return Addend == 0;
889    }
890
891    return false;
892  }
893
894  template <int Scale> bool isUImm12Offset() const {
895    if (!isImm())
896      return false;
897
898    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
899    if (!MCE)
900      return isSymbolicUImm12Offset(getImm());
901
902    int64_t Val = MCE->getValue();
903    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
904  }
905
906  template <int N, int M>
907  bool isImmInRange() const {
908    if (!isImm())
909      return false;
910    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
911    if (!MCE)
912      return false;
913    int64_t Val = MCE->getValue();
914    return (Val >= N && Val <= M);
915  }
916
917  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
918  // a logical immediate can always be represented when inverted.
919  template <typename T>
920  bool isLogicalImm() const {
921    if (!isImm())
922      return false;
923    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
924    if (!MCE)
925      return false;
926
927    int64_t Val = MCE->getValue();
928    // Avoid left shift by 64 directly.
929    uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
930    // Allow all-0 or all-1 in top bits to permit bitwise NOT.
931    if ((Val & Upper) && (Val & Upper) != Upper)
932      return false;
933
934    return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
935  }
936
937  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
938
939  bool isImmRange() const { return Kind == k_ImmRange; }
940
941  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
942  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
943  /// immediate that can be shifted by 'Shift'.
944  template <unsigned Width>
945  std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
946    if (isShiftedImm() && Width == getShiftedImmShift())
947      if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
948        return std::make_pair(CE->getValue(), Width);
949
950    if (isImm())
951      if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
952        int64_t Val = CE->getValue();
953        if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
954          return std::make_pair(Val >> Width, Width);
955        else
956          return std::make_pair(Val, 0u);
957      }
958
959    return {};
960  }
961
962  bool isAddSubImm() const {
963    if (!isShiftedImm() && !isImm())
964      return false;
965
966    const MCExpr *Expr;
967
968    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
969    if (isShiftedImm()) {
970      unsigned Shift = ShiftedImm.ShiftAmount;
971      Expr = ShiftedImm.Val;
972      if (Shift != 0 && Shift != 12)
973        return false;
974    } else {
975      Expr = getImm();
976    }
977
978    AArch64MCExpr::VariantKind ELFRefKind;
979    MCSymbolRefExpr::VariantKind DarwinRefKind;
980    int64_t Addend;
981    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
982                                          DarwinRefKind, Addend)) {
983      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
984          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
985          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
986          || ELFRefKind == AArch64MCExpr::VK_LO12
987          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
988          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
989          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
990          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
991          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
992          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
993          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
994          || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
995          || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
996    }
997
998    // If it's a constant, it should be a real immediate in range.
999    if (auto ShiftedVal = getShiftedVal<12>())
1000      return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1001
1002    // If it's an expression, we hope for the best and let the fixup/relocation
1003    // code deal with it.
1004    return true;
1005  }
1006
1007  bool isAddSubImmNeg() const {
1008    if (!isShiftedImm() && !isImm())
1009      return false;
1010
1011    // Otherwise it should be a real negative immediate in range.
1012    if (auto ShiftedVal = getShiftedVal<12>())
1013      return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1014
1015    return false;
1016  }
1017
1018  // Signed value in the range -128 to +127. For element widths of
1019  // 16 bits or higher it may also be a signed multiple of 256 in the
1020  // range -32768 to +32512.
1021  // For element-width of 8 bits a range of -128 to 255 is accepted,
1022  // since a copy of a byte can be either signed/unsigned.
1023  template <typename T>
1024  DiagnosticPredicate isSVECpyImm() const {
1025    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1026      return DiagnosticPredicateTy::NoMatch;
1027
1028    bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1029                  std::is_same<int8_t, T>::value;
1030    if (auto ShiftedImm = getShiftedVal<8>())
1031      if (!(IsByte && ShiftedImm->second) &&
1032          AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1033                                     << ShiftedImm->second))
1034        return DiagnosticPredicateTy::Match;
1035
1036    return DiagnosticPredicateTy::NearMatch;
1037  }
1038
1039  // Unsigned value in the range 0 to 255. For element widths of
1040  // 16 bits or higher it may also be a signed multiple of 256 in the
1041  // range 0 to 65280.
1042  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1043    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1044      return DiagnosticPredicateTy::NoMatch;
1045
1046    bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1047                  std::is_same<int8_t, T>::value;
1048    if (auto ShiftedImm = getShiftedVal<8>())
1049      if (!(IsByte && ShiftedImm->second) &&
1050          AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1051                                        << ShiftedImm->second))
1052        return DiagnosticPredicateTy::Match;
1053
1054    return DiagnosticPredicateTy::NearMatch;
1055  }
1056
1057  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1058    if (isLogicalImm<T>() && !isSVECpyImm<T>())
1059      return DiagnosticPredicateTy::Match;
1060    return DiagnosticPredicateTy::NoMatch;
1061  }
1062
1063  bool isCondCode() const { return Kind == k_CondCode; }
1064
1065  bool isSIMDImmType10() const {
1066    if (!isImm())
1067      return false;
1068    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1069    if (!MCE)
1070      return false;
1071    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1072  }
1073
1074  template<int N>
1075  bool isBranchTarget() const {
1076    if (!isImm())
1077      return false;
1078    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1079    if (!MCE)
1080      return true;
1081    int64_t Val = MCE->getValue();
1082    if (Val & 0x3)
1083      return false;
1084    assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1085    return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1086  }
1087
1088  bool
1089  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1090    if (!isImm())
1091      return false;
1092
1093    AArch64MCExpr::VariantKind ELFRefKind;
1094    MCSymbolRefExpr::VariantKind DarwinRefKind;
1095    int64_t Addend;
1096    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1097                                             DarwinRefKind, Addend)) {
1098      return false;
1099    }
1100    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1101      return false;
1102
1103    return llvm::is_contained(AllowedModifiers, ELFRefKind);
1104  }
1105
1106  bool isMovWSymbolG3() const {
1107    return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1108  }
1109
1110  bool isMovWSymbolG2() const {
1111    return isMovWSymbol(
1112        {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1113         AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1114         AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1115         AArch64MCExpr::VK_DTPREL_G2});
1116  }
1117
1118  bool isMovWSymbolG1() const {
1119    return isMovWSymbol(
1120        {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1121         AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1122         AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1123         AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1124         AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1125  }
1126
1127  bool isMovWSymbolG0() const {
1128    return isMovWSymbol(
1129        {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1130         AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1131         AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1132         AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1133         AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1134  }
1135
1136  template<int RegWidth, int Shift>
1137  bool isMOVZMovAlias() const {
1138    if (!isImm()) return false;
1139
1140    const MCExpr *E = getImm();
1141    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1142      uint64_t Value = CE->getValue();
1143
1144      return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1145    }
1146    // Only supports the case of Shift being 0 if an expression is used as an
1147    // operand
1148    return !Shift && E;
1149  }
1150
1151  template<int RegWidth, int Shift>
1152  bool isMOVNMovAlias() const {
1153    if (!isImm()) return false;
1154
1155    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1156    if (!CE) return false;
1157    uint64_t Value = CE->getValue();
1158
1159    return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1160  }
1161
1162  bool isFPImm() const {
1163    return Kind == k_FPImm &&
1164           AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1165  }
1166
1167  bool isBarrier() const {
1168    return Kind == k_Barrier && !getBarriernXSModifier();
1169  }
1170  bool isBarriernXS() const {
1171    return Kind == k_Barrier && getBarriernXSModifier();
1172  }
1173  bool isSysReg() const { return Kind == k_SysReg; }
1174
1175  bool isMRSSystemRegister() const {
1176    if (!isSysReg()) return false;
1177
1178    return SysReg.MRSReg != -1U;
1179  }
1180
1181  bool isMSRSystemRegister() const {
1182    if (!isSysReg()) return false;
1183    return SysReg.MSRReg != -1U;
1184  }
1185
1186  bool isSystemPStateFieldWithImm0_1() const {
1187    if (!isSysReg()) return false;
1188    return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1189  }
1190
1191  bool isSystemPStateFieldWithImm0_15() const {
1192    if (!isSysReg())
1193      return false;
1194    return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1195  }
1196
1197  bool isSVCR() const {
1198    if (Kind != k_SVCR)
1199      return false;
1200    return SVCR.PStateField != -1U;
1201  }
1202
1203  bool isReg() const override {
1204    return Kind == k_Register;
1205  }
1206
1207  bool isVectorList() const { return Kind == k_VectorList; }
1208
1209  bool isScalarReg() const {
1210    return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1211  }
1212
1213  bool isNeonVectorReg() const {
1214    return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1215  }
1216
1217  bool isNeonVectorRegLo() const {
1218    return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1219           (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1220                Reg.RegNum) ||
1221            AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1222                Reg.RegNum));
1223  }
1224
1225  bool isMatrix() const { return Kind == k_MatrixRegister; }
1226  bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1227
1228  template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1229    RegKind RK;
1230    switch (Class) {
1231    case AArch64::PPRRegClassID:
1232    case AArch64::PPR_3bRegClassID:
1233    case AArch64::PPR_p8to15RegClassID:
1234      RK = RegKind::SVEPredicateAsCounter;
1235      break;
1236    default:
1237      llvm_unreachable("Unsupport register class");
1238    }
1239
1240    return (Kind == k_Register && Reg.Kind == RK) &&
1241           AArch64MCRegisterClasses[Class].contains(getReg());
1242  }
1243
1244  template <unsigned Class> bool isSVEVectorReg() const {
1245    RegKind RK;
1246    switch (Class) {
1247    case AArch64::ZPRRegClassID:
1248    case AArch64::ZPR_3bRegClassID:
1249    case AArch64::ZPR_4bRegClassID:
1250      RK = RegKind::SVEDataVector;
1251      break;
1252    case AArch64::PPRRegClassID:
1253    case AArch64::PPR_3bRegClassID:
1254      RK = RegKind::SVEPredicateVector;
1255      break;
1256    default:
1257      llvm_unreachable("Unsupport register class");
1258    }
1259
1260    return (Kind == k_Register && Reg.Kind == RK) &&
1261           AArch64MCRegisterClasses[Class].contains(getReg());
1262  }
1263
1264  template <unsigned Class> bool isFPRasZPR() const {
1265    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1266           AArch64MCRegisterClasses[Class].contains(getReg());
1267  }
1268
1269  template <int ElementWidth, unsigned Class>
1270  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1271    if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1272      return DiagnosticPredicateTy::NoMatch;
1273
1274    if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1275      return DiagnosticPredicateTy::Match;
1276
1277    return DiagnosticPredicateTy::NearMatch;
1278  }
1279
1280  template <int ElementWidth, unsigned Class>
1281  DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1282    if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1283      return DiagnosticPredicateTy::NoMatch;
1284
1285    if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1286      return DiagnosticPredicateTy::Match;
1287
1288    return DiagnosticPredicateTy::NearMatch;
1289  }
1290
1291  template <int ElementWidth, unsigned Class>
1292  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1293    if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1294      return DiagnosticPredicateTy::NoMatch;
1295
1296    if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1297      return DiagnosticPredicateTy::Match;
1298
1299    return DiagnosticPredicateTy::NearMatch;
1300  }
1301
1302  template <int ElementWidth, unsigned Class,
1303            AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1304            bool ShiftWidthAlwaysSame>
1305  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1306    auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1307    if (!VectorMatch.isMatch())
1308      return DiagnosticPredicateTy::NoMatch;
1309
1310    // Give a more specific diagnostic when the user has explicitly typed in
1311    // a shift-amount that does not match what is expected, but for which
1312    // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1313    bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1314    if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1315                        ShiftExtendTy == AArch64_AM::SXTW) &&
1316        !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1317      return DiagnosticPredicateTy::NoMatch;
1318
1319    if (MatchShift && ShiftExtendTy == getShiftExtendType())
1320      return DiagnosticPredicateTy::Match;
1321
1322    return DiagnosticPredicateTy::NearMatch;
1323  }
1324
1325  bool isGPR32as64() const {
1326    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1327      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1328  }
1329
1330  bool isGPR64as32() const {
1331    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1332      AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1333  }
1334
1335  bool isGPR64x8() const {
1336    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1337           AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1338               Reg.RegNum);
1339  }
1340
1341  bool isWSeqPair() const {
1342    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1343           AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1344               Reg.RegNum);
1345  }
1346
1347  bool isXSeqPair() const {
1348    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1349           AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1350               Reg.RegNum);
1351  }
1352
1353  bool isSyspXzrPair() const {
1354    return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1355  }
1356
1357  template<int64_t Angle, int64_t Remainder>
1358  DiagnosticPredicate isComplexRotation() const {
1359    if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1360
1361    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362    if (!CE) return DiagnosticPredicateTy::NoMatch;
1363    uint64_t Value = CE->getValue();
1364
1365    if (Value % Angle == Remainder && Value <= 270)
1366      return DiagnosticPredicateTy::Match;
1367    return DiagnosticPredicateTy::NearMatch;
1368  }
1369
1370  template <unsigned RegClassID> bool isGPR64() const {
1371    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1372           AArch64MCRegisterClasses[RegClassID].contains(getReg());
1373  }
1374
1375  template <unsigned RegClassID, int ExtWidth>
1376  DiagnosticPredicate isGPR64WithShiftExtend() const {
1377    if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1378      return DiagnosticPredicateTy::NoMatch;
1379
1380    if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1381        getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1382      return DiagnosticPredicateTy::Match;
1383    return DiagnosticPredicateTy::NearMatch;
1384  }
1385
1386  /// Is this a vector list with the type implicit (presumably attached to the
1387  /// instruction itself)?
1388  template <RegKind VectorKind, unsigned NumRegs>
1389  bool isImplicitlyTypedVectorList() const {
1390    return Kind == k_VectorList && VectorList.Count == NumRegs &&
1391           VectorList.NumElements == 0 &&
1392           VectorList.RegisterKind == VectorKind;
1393  }
1394
1395  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1396            unsigned ElementWidth, unsigned Stride = 1>
1397  bool isTypedVectorList() const {
1398    if (Kind != k_VectorList)
1399      return false;
1400    if (VectorList.Count != NumRegs)
1401      return false;
1402    if (VectorList.RegisterKind != VectorKind)
1403      return false;
1404    if (VectorList.ElementWidth != ElementWidth)
1405      return false;
1406    if (VectorList.Stride != Stride)
1407      return false;
1408    return VectorList.NumElements == NumElements;
1409  }
1410
1411  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1412            unsigned ElementWidth>
1413  DiagnosticPredicate isTypedVectorListMultiple() const {
1414    bool Res =
1415        isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1416    if (!Res)
1417      return DiagnosticPredicateTy::NoMatch;
1418    if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1419      return DiagnosticPredicateTy::NearMatch;
1420    return DiagnosticPredicateTy::Match;
1421  }
1422
1423  template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1424            unsigned ElementWidth>
1425  DiagnosticPredicate isTypedVectorListStrided() const {
1426    bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1427                                 ElementWidth, Stride>();
1428    if (!Res)
1429      return DiagnosticPredicateTy::NoMatch;
1430    if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1431        ((VectorList.RegNum >= AArch64::Z16) &&
1432         (VectorList.RegNum < (AArch64::Z16 + Stride))))
1433      return DiagnosticPredicateTy::Match;
1434    return DiagnosticPredicateTy::NoMatch;
1435  }
1436
1437  template <int Min, int Max>
1438  DiagnosticPredicate isVectorIndex() const {
1439    if (Kind != k_VectorIndex)
1440      return DiagnosticPredicateTy::NoMatch;
1441    if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1442      return DiagnosticPredicateTy::Match;
1443    return DiagnosticPredicateTy::NearMatch;
1444  }
1445
1446  bool isToken() const override { return Kind == k_Token; }
1447
1448  bool isTokenEqual(StringRef Str) const {
1449    return Kind == k_Token && getToken() == Str;
1450  }
1451  bool isSysCR() const { return Kind == k_SysCR; }
1452  bool isPrefetch() const { return Kind == k_Prefetch; }
1453  bool isPSBHint() const { return Kind == k_PSBHint; }
1454  bool isBTIHint() const { return Kind == k_BTIHint; }
1455  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1456  bool isShifter() const {
1457    if (!isShiftExtend())
1458      return false;
1459
1460    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1461    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1462            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1463            ST == AArch64_AM::MSL);
1464  }
1465
1466  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1467    if (Kind != k_FPImm)
1468      return DiagnosticPredicateTy::NoMatch;
1469
1470    if (getFPImmIsExact()) {
1471      // Lookup the immediate from table of supported immediates.
1472      auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1473      assert(Desc && "Unknown enum value");
1474
1475      // Calculate its FP value.
1476      APFloat RealVal(APFloat::IEEEdouble());
1477      auto StatusOrErr =
1478          RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1479      if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1480        llvm_unreachable("FP immediate is not exact");
1481
1482      if (getFPImm().bitwiseIsEqual(RealVal))
1483        return DiagnosticPredicateTy::Match;
1484    }
1485
1486    return DiagnosticPredicateTy::NearMatch;
1487  }
1488
1489  template <unsigned ImmA, unsigned ImmB>
1490  DiagnosticPredicate isExactFPImm() const {
1491    DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1492    if ((Res = isExactFPImm<ImmA>()))
1493      return DiagnosticPredicateTy::Match;
1494    if ((Res = isExactFPImm<ImmB>()))
1495      return DiagnosticPredicateTy::Match;
1496    return Res;
1497  }
1498
1499  bool isExtend() const {
1500    if (!isShiftExtend())
1501      return false;
1502
1503    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1505            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1506            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1507            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1508            ET == AArch64_AM::LSL) &&
1509           getShiftExtendAmount() <= 4;
1510  }
1511
1512  bool isExtend64() const {
1513    if (!isExtend())
1514      return false;
1515    // Make sure the extend expects a 32-bit source register.
1516    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517    return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1518           ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1519           ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1520  }
1521
1522  bool isExtendLSL64() const {
1523    if (!isExtend())
1524      return false;
1525    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1526    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1527            ET == AArch64_AM::LSL) &&
1528           getShiftExtendAmount() <= 4;
1529  }
1530
1531  template<int Width> bool isMemXExtend() const {
1532    if (!isExtend())
1533      return false;
1534    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1535    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1536           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1537            getShiftExtendAmount() == 0);
1538  }
1539
1540  template<int Width> bool isMemWExtend() const {
1541    if (!isExtend())
1542      return false;
1543    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1544    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1545           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1546            getShiftExtendAmount() == 0);
1547  }
1548
1549  template <unsigned width>
1550  bool isArithmeticShifter() const {
1551    if (!isShifter())
1552      return false;
1553
1554    // An arithmetic shifter is LSL, LSR, or ASR.
1555    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1556    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1557            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1558  }
1559
1560  template <unsigned width>
1561  bool isLogicalShifter() const {
1562    if (!isShifter())
1563      return false;
1564
1565    // A logical shifter is LSL, LSR, ASR or ROR.
1566    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1567    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1568            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1569           getShiftExtendAmount() < width;
1570  }
1571
1572  bool isMovImm32Shifter() const {
1573    if (!isShifter())
1574      return false;
1575
1576    // A MOVi shifter is LSL of 0, 16, 32, or 48.
1577    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1578    if (ST != AArch64_AM::LSL)
1579      return false;
1580    uint64_t Val = getShiftExtendAmount();
1581    return (Val == 0 || Val == 16);
1582  }
1583
1584  bool isMovImm64Shifter() const {
1585    if (!isShifter())
1586      return false;
1587
1588    // A MOVi shifter is LSL of 0 or 16.
1589    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1590    if (ST != AArch64_AM::LSL)
1591      return false;
1592    uint64_t Val = getShiftExtendAmount();
1593    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1594  }
1595
1596  bool isLogicalVecShifter() const {
1597    if (!isShifter())
1598      return false;
1599
1600    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1601    unsigned Shift = getShiftExtendAmount();
1602    return getShiftExtendType() == AArch64_AM::LSL &&
1603           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1604  }
1605
1606  bool isLogicalVecHalfWordShifter() const {
1607    if (!isLogicalVecShifter())
1608      return false;
1609
1610    // A logical vector shifter is a left shift by 0 or 8.
1611    unsigned Shift = getShiftExtendAmount();
1612    return getShiftExtendType() == AArch64_AM::LSL &&
1613           (Shift == 0 || Shift == 8);
1614  }
1615
1616  bool isMoveVecShifter() const {
1617    if (!isShiftExtend())
1618      return false;
1619
1620    // A logical vector shifter is a left shift by 8 or 16.
1621    unsigned Shift = getShiftExtendAmount();
1622    return getShiftExtendType() == AArch64_AM::MSL &&
1623           (Shift == 8 || Shift == 16);
1624  }
1625
1626  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1627  // to LDUR/STUR when the offset is not legal for the former but is for
1628  // the latter. As such, in addition to checking for being a legal unscaled
1629  // address, also check that it is not a legal scaled address. This avoids
1630  // ambiguity in the matcher.
1631  template<int Width>
1632  bool isSImm9OffsetFB() const {
1633    return isSImm<9>() && !isUImm12Offset<Width / 8>();
1634  }
1635
1636  bool isAdrpLabel() const {
1637    // Validation was handled during parsing, so we just verify that
1638    // something didn't go haywire.
1639    if (!isImm())
1640        return false;
1641
1642    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1643      int64_t Val = CE->getValue();
1644      int64_t Min = - (4096 * (1LL << (21 - 1)));
1645      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1646      return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1647    }
1648
1649    return true;
1650  }
1651
1652  bool isAdrLabel() const {
1653    // Validation was handled during parsing, so we just verify that
1654    // something didn't go haywire.
1655    if (!isImm())
1656        return false;
1657
1658    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1659      int64_t Val = CE->getValue();
1660      int64_t Min = - (1LL << (21 - 1));
1661      int64_t Max = ((1LL << (21 - 1)) - 1);
1662      return Val >= Min && Val <= Max;
1663    }
1664
1665    return true;
1666  }
1667
1668  template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1669  DiagnosticPredicate isMatrixRegOperand() const {
1670    if (!isMatrix())
1671      return DiagnosticPredicateTy::NoMatch;
1672    if (getMatrixKind() != Kind ||
1673        !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1674        EltSize != getMatrixElementWidth())
1675      return DiagnosticPredicateTy::NearMatch;
1676    return DiagnosticPredicateTy::Match;
1677  }
1678
1679  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1680    // Add as immediates when possible.  Null MCExpr = 0.
1681    if (!Expr)
1682      Inst.addOperand(MCOperand::createImm(0));
1683    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1684      Inst.addOperand(MCOperand::createImm(CE->getValue()));
1685    else
1686      Inst.addOperand(MCOperand::createExpr(Expr));
1687  }
1688
1689  void addRegOperands(MCInst &Inst, unsigned N) const {
1690    assert(N == 1 && "Invalid number of operands!");
1691    Inst.addOperand(MCOperand::createReg(getReg()));
1692  }
1693
1694  void addMatrixOperands(MCInst &Inst, unsigned N) const {
1695    assert(N == 1 && "Invalid number of operands!");
1696    Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1697  }
1698
1699  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1700    assert(N == 1 && "Invalid number of operands!");
1701    assert(
1702        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1703
1704    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1705    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1706        RI->getEncodingValue(getReg()));
1707
1708    Inst.addOperand(MCOperand::createReg(Reg));
1709  }
1710
1711  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1712    assert(N == 1 && "Invalid number of operands!");
1713    assert(
1714        AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1715
1716    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1717    uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1718        RI->getEncodingValue(getReg()));
1719
1720    Inst.addOperand(MCOperand::createReg(Reg));
1721  }
1722
1723  template <int Width>
1724  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1725    unsigned Base;
1726    switch (Width) {
1727    case 8:   Base = AArch64::B0; break;
1728    case 16:  Base = AArch64::H0; break;
1729    case 32:  Base = AArch64::S0; break;
1730    case 64:  Base = AArch64::D0; break;
1731    case 128: Base = AArch64::Q0; break;
1732    default:
1733      llvm_unreachable("Unsupported width");
1734    }
1735    Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1736  }
1737
1738  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1739    assert(N == 1 && "Invalid number of operands!");
1740    assert(
1741        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1742    Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1743  }
1744
1745  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1746    assert(N == 1 && "Invalid number of operands!");
1747    assert(
1748        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1749    Inst.addOperand(MCOperand::createReg(getReg()));
1750  }
1751
1752  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1753    assert(N == 1 && "Invalid number of operands!");
1754    Inst.addOperand(MCOperand::createReg(getReg()));
1755  }
1756
1757  enum VecListIndexType {
1758    VecListIdx_DReg = 0,
1759    VecListIdx_QReg = 1,
1760    VecListIdx_ZReg = 2,
1761    VecListIdx_PReg = 3,
1762  };
1763
1764  template <VecListIndexType RegTy, unsigned NumRegs>
1765  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1766    assert(N == 1 && "Invalid number of operands!");
1767    static const unsigned FirstRegs[][5] = {
1768      /* DReg */ { AArch64::Q0,
1769                   AArch64::D0,       AArch64::D0_D1,
1770                   AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1771      /* QReg */ { AArch64::Q0,
1772                   AArch64::Q0,       AArch64::Q0_Q1,
1773                   AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1774      /* ZReg */ { AArch64::Z0,
1775                   AArch64::Z0,       AArch64::Z0_Z1,
1776                   AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1777      /* PReg */ { AArch64::P0,
1778                   AArch64::P0,       AArch64::P0_P1 }
1779    };
1780
1781    assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1782           " NumRegs must be <= 4 for ZRegs");
1783
1784    assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1785           " NumRegs must be <= 2 for PRegs");
1786
1787    unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1788    Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1789                                         FirstRegs[(unsigned)RegTy][0]));
1790  }
1791
1792  template <unsigned NumRegs>
1793  void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 1 && "Invalid number of operands!");
1795    assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1796
1797    switch (NumRegs) {
1798    case 2:
1799      if (getVectorListStart() < AArch64::Z16) {
1800        assert((getVectorListStart() < AArch64::Z8) &&
1801               (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1802        Inst.addOperand(MCOperand::createReg(
1803            AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1804      } else {
1805        assert((getVectorListStart() < AArch64::Z24) &&
1806               (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1807        Inst.addOperand(MCOperand::createReg(
1808            AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1809      }
1810      break;
1811    case 4:
1812      if (getVectorListStart() < AArch64::Z16) {
1813        assert((getVectorListStart() < AArch64::Z4) &&
1814               (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1815        Inst.addOperand(MCOperand::createReg(
1816            AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1817      } else {
1818        assert((getVectorListStart() < AArch64::Z20) &&
1819               (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1820        Inst.addOperand(MCOperand::createReg(
1821            AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1822      }
1823      break;
1824    default:
1825      llvm_unreachable("Unsupported number of registers for strided vec list");
1826    }
1827  }
1828
1829  void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1830    assert(N == 1 && "Invalid number of operands!");
1831    unsigned RegMask = getMatrixTileListRegMask();
1832    assert(RegMask <= 0xFF && "Invalid mask!");
1833    Inst.addOperand(MCOperand::createImm(RegMask));
1834  }
1835
1836  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1837    assert(N == 1 && "Invalid number of operands!");
1838    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1839  }
1840
1841  template <unsigned ImmIs0, unsigned ImmIs1>
1842  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1843    assert(N == 1 && "Invalid number of operands!");
1844    assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1845    Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1846  }
1847
1848  void addImmOperands(MCInst &Inst, unsigned N) const {
1849    assert(N == 1 && "Invalid number of operands!");
1850    // If this is a pageoff symrefexpr with an addend, adjust the addend
1851    // to be only the page-offset portion. Otherwise, just add the expr
1852    // as-is.
1853    addExpr(Inst, getImm());
1854  }
1855
1856  template <int Shift>
1857  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1858    assert(N == 2 && "Invalid number of operands!");
1859    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1860      Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1861      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1862    } else if (isShiftedImm()) {
1863      addExpr(Inst, getShiftedImmVal());
1864      Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1865    } else {
1866      addExpr(Inst, getImm());
1867      Inst.addOperand(MCOperand::createImm(0));
1868    }
1869  }
1870
1871  template <int Shift>
1872  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1873    assert(N == 2 && "Invalid number of operands!");
1874    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1875      Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1876      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1877    } else
1878      llvm_unreachable("Not a shifted negative immediate");
1879  }
1880
1881  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1882    assert(N == 1 && "Invalid number of operands!");
1883    Inst.addOperand(MCOperand::createImm(getCondCode()));
1884  }
1885
1886  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1887    assert(N == 1 && "Invalid number of operands!");
1888    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1889    if (!MCE)
1890      addExpr(Inst, getImm());
1891    else
1892      Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1893  }
1894
1895  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1896    addImmOperands(Inst, N);
1897  }
1898
1899  template<int Scale>
1900  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1901    assert(N == 1 && "Invalid number of operands!");
1902    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1903
1904    if (!MCE) {
1905      Inst.addOperand(MCOperand::createExpr(getImm()));
1906      return;
1907    }
1908    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1909  }
1910
1911  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1912    assert(N == 1 && "Invalid number of operands!");
1913    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1914    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1915  }
1916
1917  template <int Scale>
1918  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1919    assert(N == 1 && "Invalid number of operands!");
1920    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1921    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1922  }
1923
1924  template <int Scale>
1925  void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1926    assert(N == 1 && "Invalid number of operands!");
1927    Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1928  }
1929
1930  template <typename T>
1931  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1932    assert(N == 1 && "Invalid number of operands!");
1933    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1934    std::make_unsigned_t<T> Val = MCE->getValue();
1935    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1936    Inst.addOperand(MCOperand::createImm(encoding));
1937  }
1938
1939  template <typename T>
1940  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1941    assert(N == 1 && "Invalid number of operands!");
1942    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1943    std::make_unsigned_t<T> Val = ~MCE->getValue();
1944    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1945    Inst.addOperand(MCOperand::createImm(encoding));
1946  }
1947
1948  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1949    assert(N == 1 && "Invalid number of operands!");
1950    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1951    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1952    Inst.addOperand(MCOperand::createImm(encoding));
1953  }
1954
1955  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1956    // Branch operands don't encode the low bits, so shift them off
1957    // here. If it's a label, however, just put it on directly as there's
1958    // not enough information now to do anything.
1959    assert(N == 1 && "Invalid number of operands!");
1960    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1961    if (!MCE) {
1962      addExpr(Inst, getImm());
1963      return;
1964    }
1965    assert(MCE && "Invalid constant immediate operand!");
1966    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1967  }
1968
1969  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1970    // Branch operands don't encode the low bits, so shift them off
1971    // here. If it's a label, however, just put it on directly as there's
1972    // not enough information now to do anything.
1973    assert(N == 1 && "Invalid number of operands!");
1974    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1975    if (!MCE) {
1976      addExpr(Inst, getImm());
1977      return;
1978    }
1979    assert(MCE && "Invalid constant immediate operand!");
1980    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1981  }
1982
1983  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1984    // Branch operands don't encode the low bits, so shift them off
1985    // here. If it's a label, however, just put it on directly as there's
1986    // not enough information now to do anything.
1987    assert(N == 1 && "Invalid number of operands!");
1988    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1989    if (!MCE) {
1990      addExpr(Inst, getImm());
1991      return;
1992    }
1993    assert(MCE && "Invalid constant immediate operand!");
1994    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1995  }
1996
1997  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1998    assert(N == 1 && "Invalid number of operands!");
1999    Inst.addOperand(MCOperand::createImm(
2000        AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2001  }
2002
2003  void addBarrierOperands(MCInst &Inst, unsigned N) const {
2004    assert(N == 1 && "Invalid number of operands!");
2005    Inst.addOperand(MCOperand::createImm(getBarrier()));
2006  }
2007
2008  void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2009    assert(N == 1 && "Invalid number of operands!");
2010    Inst.addOperand(MCOperand::createImm(getBarrier()));
2011  }
2012
2013  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2014    assert(N == 1 && "Invalid number of operands!");
2015
2016    Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2017  }
2018
2019  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2020    assert(N == 1 && "Invalid number of operands!");
2021
2022    Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2023  }
2024
2025  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2026    assert(N == 1 && "Invalid number of operands!");
2027
2028    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2029  }
2030
2031  void addSVCROperands(MCInst &Inst, unsigned N) const {
2032    assert(N == 1 && "Invalid number of operands!");
2033
2034    Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2035  }
2036
2037  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2038    assert(N == 1 && "Invalid number of operands!");
2039
2040    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2041  }
2042
2043  void addSysCROperands(MCInst &Inst, unsigned N) const {
2044    assert(N == 1 && "Invalid number of operands!");
2045    Inst.addOperand(MCOperand::createImm(getSysCR()));
2046  }
2047
2048  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2049    assert(N == 1 && "Invalid number of operands!");
2050    Inst.addOperand(MCOperand::createImm(getPrefetch()));
2051  }
2052
2053  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2054    assert(N == 1 && "Invalid number of operands!");
2055    Inst.addOperand(MCOperand::createImm(getPSBHint()));
2056  }
2057
2058  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2059    assert(N == 1 && "Invalid number of operands!");
2060    Inst.addOperand(MCOperand::createImm(getBTIHint()));
2061  }
2062
2063  void addShifterOperands(MCInst &Inst, unsigned N) const {
2064    assert(N == 1 && "Invalid number of operands!");
2065    unsigned Imm =
2066        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2067    Inst.addOperand(MCOperand::createImm(Imm));
2068  }
2069
2070  void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2071    assert(N == 1 && "Invalid number of operands!");
2072
2073    if (!isScalarReg())
2074      return;
2075
2076    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2077    uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2078                       .getRegister(RI->getEncodingValue(getReg()));
2079    if (Reg != AArch64::XZR)
2080      llvm_unreachable("wrong register");
2081
2082    Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2083  }
2084
2085  void addExtendOperands(MCInst &Inst, unsigned N) const {
2086    assert(N == 1 && "Invalid number of operands!");
2087    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2088    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2089    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2090    Inst.addOperand(MCOperand::createImm(Imm));
2091  }
2092
2093  void addExtend64Operands(MCInst &Inst, unsigned N) const {
2094    assert(N == 1 && "Invalid number of operands!");
2095    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2096    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2097    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2098    Inst.addOperand(MCOperand::createImm(Imm));
2099  }
2100
2101  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2102    assert(N == 2 && "Invalid number of operands!");
2103    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2104    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2105    Inst.addOperand(MCOperand::createImm(IsSigned));
2106    Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2107  }
2108
2109  // For 8-bit load/store instructions with a register offset, both the
2110  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2111  // they're disambiguated by whether the shift was explicit or implicit rather
2112  // than its size.
2113  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2114    assert(N == 2 && "Invalid number of operands!");
2115    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2116    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2117    Inst.addOperand(MCOperand::createImm(IsSigned));
2118    Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2119  }
2120
2121  template<int Shift>
2122  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2123    assert(N == 1 && "Invalid number of operands!");
2124
2125    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2126    if (CE) {
2127      uint64_t Value = CE->getValue();
2128      Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2129    } else {
2130      addExpr(Inst, getImm());
2131    }
2132  }
2133
2134  template<int Shift>
2135  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2136    assert(N == 1 && "Invalid number of operands!");
2137
2138    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2139    uint64_t Value = CE->getValue();
2140    Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2141  }
2142
2143  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2144    assert(N == 1 && "Invalid number of operands!");
2145    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2146    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2147  }
2148
2149  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2150    assert(N == 1 && "Invalid number of operands!");
2151    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2152    Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2153  }
2154
2155  void print(raw_ostream &OS) const override;
2156
2157  static std::unique_ptr<AArch64Operand>
2158  CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2159    auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2160    Op->Tok.Data = Str.data();
2161    Op->Tok.Length = Str.size();
2162    Op->Tok.IsSuffix = IsSuffix;
2163    Op->StartLoc = S;
2164    Op->EndLoc = S;
2165    return Op;
2166  }
2167
2168  static std::unique_ptr<AArch64Operand>
2169  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2170            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2171            AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2172            unsigned ShiftAmount = 0,
2173            unsigned HasExplicitAmount = false) {
2174    auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2175    Op->Reg.RegNum = RegNum;
2176    Op->Reg.Kind = Kind;
2177    Op->Reg.ElementWidth = 0;
2178    Op->Reg.EqualityTy = EqTy;
2179    Op->Reg.ShiftExtend.Type = ExtTy;
2180    Op->Reg.ShiftExtend.Amount = ShiftAmount;
2181    Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2182    Op->StartLoc = S;
2183    Op->EndLoc = E;
2184    return Op;
2185  }
2186
2187  static std::unique_ptr<AArch64Operand>
2188  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2189                  SMLoc S, SMLoc E, MCContext &Ctx,
2190                  AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2191                  unsigned ShiftAmount = 0,
2192                  unsigned HasExplicitAmount = false) {
2193    assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2194            Kind == RegKind::SVEPredicateVector ||
2195            Kind == RegKind::SVEPredicateAsCounter) &&
2196           "Invalid vector kind");
2197    auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2198                        HasExplicitAmount);
2199    Op->Reg.ElementWidth = ElementWidth;
2200    return Op;
2201  }
2202
2203  static std::unique_ptr<AArch64Operand>
2204  CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2205                   unsigned NumElements, unsigned ElementWidth,
2206                   RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2207    auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2208    Op->VectorList.RegNum = RegNum;
2209    Op->VectorList.Count = Count;
2210    Op->VectorList.Stride = Stride;
2211    Op->VectorList.NumElements = NumElements;
2212    Op->VectorList.ElementWidth = ElementWidth;
2213    Op->VectorList.RegisterKind = RegisterKind;
2214    Op->StartLoc = S;
2215    Op->EndLoc = E;
2216    return Op;
2217  }
2218
2219  static std::unique_ptr<AArch64Operand>
2220  CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2221    auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2222    Op->VectorIndex.Val = Idx;
2223    Op->StartLoc = S;
2224    Op->EndLoc = E;
2225    return Op;
2226  }
2227
2228  static std::unique_ptr<AArch64Operand>
2229  CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2230    auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2231    Op->MatrixTileList.RegMask = RegMask;
2232    Op->StartLoc = S;
2233    Op->EndLoc = E;
2234    return Op;
2235  }
2236
2237  static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2238                                  const unsigned ElementWidth) {
2239    static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2240        RegMap = {
2241            {{0, AArch64::ZAB0},
2242             {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2243              AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2244            {{8, AArch64::ZAB0},
2245             {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2246              AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2247            {{16, AArch64::ZAH0},
2248             {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2249            {{16, AArch64::ZAH1},
2250             {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2251            {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2252            {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2253            {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2254            {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2255        };
2256
2257    if (ElementWidth == 64)
2258      OutRegs.insert(Reg);
2259    else {
2260      std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2261      assert(!Regs.empty() && "Invalid tile or element width!");
2262      for (auto OutReg : Regs)
2263        OutRegs.insert(OutReg);
2264    }
2265  }
2266
2267  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2268                                                   SMLoc E, MCContext &Ctx) {
2269    auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2270    Op->Imm.Val = Val;
2271    Op->StartLoc = S;
2272    Op->EndLoc = E;
2273    return Op;
2274  }
2275
2276  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2277                                                          unsigned ShiftAmount,
2278                                                          SMLoc S, SMLoc E,
2279                                                          MCContext &Ctx) {
2280    auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2281    Op->ShiftedImm .Val = Val;
2282    Op->ShiftedImm.ShiftAmount = ShiftAmount;
2283    Op->StartLoc = S;
2284    Op->EndLoc = E;
2285    return Op;
2286  }
2287
2288  static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2289                                                        unsigned Last, SMLoc S,
2290                                                        SMLoc E,
2291                                                        MCContext &Ctx) {
2292    auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2293    Op->ImmRange.First = First;
2294    Op->ImmRange.Last = Last;
2295    Op->EndLoc = E;
2296    return Op;
2297  }
2298
2299  static std::unique_ptr<AArch64Operand>
2300  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2301    auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2302    Op->CondCode.Code = Code;
2303    Op->StartLoc = S;
2304    Op->EndLoc = E;
2305    return Op;
2306  }
2307
2308  static std::unique_ptr<AArch64Operand>
2309  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2310    auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2311    Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2312    Op->FPImm.IsExact = IsExact;
2313    Op->StartLoc = S;
2314    Op->EndLoc = S;
2315    return Op;
2316  }
2317
2318  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2319                                                       StringRef Str,
2320                                                       SMLoc S,
2321                                                       MCContext &Ctx,
2322                                                       bool HasnXSModifier) {
2323    auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2324    Op->Barrier.Val = Val;
2325    Op->Barrier.Data = Str.data();
2326    Op->Barrier.Length = Str.size();
2327    Op->Barrier.HasnXSModifier = HasnXSModifier;
2328    Op->StartLoc = S;
2329    Op->EndLoc = S;
2330    return Op;
2331  }
2332
2333  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2334                                                      uint32_t MRSReg,
2335                                                      uint32_t MSRReg,
2336                                                      uint32_t PStateField,
2337                                                      MCContext &Ctx) {
2338    auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2339    Op->SysReg.Data = Str.data();
2340    Op->SysReg.Length = Str.size();
2341    Op->SysReg.MRSReg = MRSReg;
2342    Op->SysReg.MSRReg = MSRReg;
2343    Op->SysReg.PStateField = PStateField;
2344    Op->StartLoc = S;
2345    Op->EndLoc = S;
2346    return Op;
2347  }
2348
2349  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2350                                                     SMLoc E, MCContext &Ctx) {
2351    auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2352    Op->SysCRImm.Val = Val;
2353    Op->StartLoc = S;
2354    Op->EndLoc = E;
2355    return Op;
2356  }
2357
2358  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2359                                                        StringRef Str,
2360                                                        SMLoc S,
2361                                                        MCContext &Ctx) {
2362    auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2363    Op->Prefetch.Val = Val;
2364    Op->Barrier.Data = Str.data();
2365    Op->Barrier.Length = Str.size();
2366    Op->StartLoc = S;
2367    Op->EndLoc = S;
2368    return Op;
2369  }
2370
2371  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2372                                                       StringRef Str,
2373                                                       SMLoc S,
2374                                                       MCContext &Ctx) {
2375    auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2376    Op->PSBHint.Val = Val;
2377    Op->PSBHint.Data = Str.data();
2378    Op->PSBHint.Length = Str.size();
2379    Op->StartLoc = S;
2380    Op->EndLoc = S;
2381    return Op;
2382  }
2383
2384  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2385                                                       StringRef Str,
2386                                                       SMLoc S,
2387                                                       MCContext &Ctx) {
2388    auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2389    Op->BTIHint.Val = Val | 32;
2390    Op->BTIHint.Data = Str.data();
2391    Op->BTIHint.Length = Str.size();
2392    Op->StartLoc = S;
2393    Op->EndLoc = S;
2394    return Op;
2395  }
2396
2397  static std::unique_ptr<AArch64Operand>
2398  CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2399                       SMLoc S, SMLoc E, MCContext &Ctx) {
2400    auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2401    Op->MatrixReg.RegNum = RegNum;
2402    Op->MatrixReg.ElementWidth = ElementWidth;
2403    Op->MatrixReg.Kind = Kind;
2404    Op->StartLoc = S;
2405    Op->EndLoc = E;
2406    return Op;
2407  }
2408
2409  static std::unique_ptr<AArch64Operand>
2410  CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2411    auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2412    Op->SVCR.PStateField = PStateField;
2413    Op->SVCR.Data = Str.data();
2414    Op->SVCR.Length = Str.size();
2415    Op->StartLoc = S;
2416    Op->EndLoc = S;
2417    return Op;
2418  }
2419
2420  static std::unique_ptr<AArch64Operand>
2421  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2422                    bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2423    auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2424    Op->ShiftExtend.Type = ShOp;
2425    Op->ShiftExtend.Amount = Val;
2426    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2427    Op->StartLoc = S;
2428    Op->EndLoc = E;
2429    return Op;
2430  }
2431};
2432
2433} // end anonymous namespace.
2434
2435void AArch64Operand::print(raw_ostream &OS) const {
2436  switch (Kind) {
2437  case k_FPImm:
2438    OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2439    if (!getFPImmIsExact())
2440      OS << " (inexact)";
2441    OS << ">";
2442    break;
2443  case k_Barrier: {
2444    StringRef Name = getBarrierName();
2445    if (!Name.empty())
2446      OS << "<barrier " << Name << ">";
2447    else
2448      OS << "<barrier invalid #" << getBarrier() << ">";
2449    break;
2450  }
2451  case k_Immediate:
2452    OS << *getImm();
2453    break;
2454  case k_ShiftedImm: {
2455    unsigned Shift = getShiftedImmShift();
2456    OS << "<shiftedimm ";
2457    OS << *getShiftedImmVal();
2458    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2459    break;
2460  }
2461  case k_ImmRange: {
2462    OS << "<immrange ";
2463    OS << getFirstImmVal();
2464    OS << ":" << getLastImmVal() << ">";
2465    break;
2466  }
2467  case k_CondCode:
2468    OS << "<condcode " << getCondCode() << ">";
2469    break;
2470  case k_VectorList: {
2471    OS << "<vectorlist ";
2472    unsigned Reg = getVectorListStart();
2473    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2474      OS << Reg + i * getVectorListStride() << " ";
2475    OS << ">";
2476    break;
2477  }
2478  case k_VectorIndex:
2479    OS << "<vectorindex " << getVectorIndex() << ">";
2480    break;
2481  case k_SysReg:
2482    OS << "<sysreg: " << getSysReg() << '>';
2483    break;
2484  case k_Token:
2485    OS << "'" << getToken() << "'";
2486    break;
2487  case k_SysCR:
2488    OS << "c" << getSysCR();
2489    break;
2490  case k_Prefetch: {
2491    StringRef Name = getPrefetchName();
2492    if (!Name.empty())
2493      OS << "<prfop " << Name << ">";
2494    else
2495      OS << "<prfop invalid #" << getPrefetch() << ">";
2496    break;
2497  }
2498  case k_PSBHint:
2499    OS << getPSBHintName();
2500    break;
2501  case k_BTIHint:
2502    OS << getBTIHintName();
2503    break;
2504  case k_MatrixRegister:
2505    OS << "<matrix " << getMatrixReg() << ">";
2506    break;
2507  case k_MatrixTileList: {
2508    OS << "<matrixlist ";
2509    unsigned RegMask = getMatrixTileListRegMask();
2510    unsigned MaxBits = 8;
2511    for (unsigned I = MaxBits; I > 0; --I)
2512      OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2513    OS << '>';
2514    break;
2515  }
2516  case k_SVCR: {
2517    OS << getSVCR();
2518    break;
2519  }
2520  case k_Register:
2521    OS << "<register " << getReg() << ">";
2522    if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2523      break;
2524    [[fallthrough]];
2525  case k_ShiftExtend:
2526    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2527       << getShiftExtendAmount();
2528    if (!hasShiftExtendAmount())
2529      OS << "<imp>";
2530    OS << '>';
2531    break;
2532  }
2533}
2534
2535/// @name Auto-generated Match Functions
2536/// {
2537
2538static unsigned MatchRegisterName(StringRef Name);
2539
2540/// }
2541
2542static unsigned MatchNeonVectorRegName(StringRef Name) {
2543  return StringSwitch<unsigned>(Name.lower())
2544      .Case("v0", AArch64::Q0)
2545      .Case("v1", AArch64::Q1)
2546      .Case("v2", AArch64::Q2)
2547      .Case("v3", AArch64::Q3)
2548      .Case("v4", AArch64::Q4)
2549      .Case("v5", AArch64::Q5)
2550      .Case("v6", AArch64::Q6)
2551      .Case("v7", AArch64::Q7)
2552      .Case("v8", AArch64::Q8)
2553      .Case("v9", AArch64::Q9)
2554      .Case("v10", AArch64::Q10)
2555      .Case("v11", AArch64::Q11)
2556      .Case("v12", AArch64::Q12)
2557      .Case("v13", AArch64::Q13)
2558      .Case("v14", AArch64::Q14)
2559      .Case("v15", AArch64::Q15)
2560      .Case("v16", AArch64::Q16)
2561      .Case("v17", AArch64::Q17)
2562      .Case("v18", AArch64::Q18)
2563      .Case("v19", AArch64::Q19)
2564      .Case("v20", AArch64::Q20)
2565      .Case("v21", AArch64::Q21)
2566      .Case("v22", AArch64::Q22)
2567      .Case("v23", AArch64::Q23)
2568      .Case("v24", AArch64::Q24)
2569      .Case("v25", AArch64::Q25)
2570      .Case("v26", AArch64::Q26)
2571      .Case("v27", AArch64::Q27)
2572      .Case("v28", AArch64::Q28)
2573      .Case("v29", AArch64::Q29)
2574      .Case("v30", AArch64::Q30)
2575      .Case("v31", AArch64::Q31)
2576      .Default(0);
2577}
2578
2579/// Returns an optional pair of (#elements, element-width) if Suffix
2580/// is a valid vector kind. Where the number of elements in a vector
2581/// or the vector width is implicit or explicitly unknown (but still a
2582/// valid suffix kind), 0 is used.
2583static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2584                                                          RegKind VectorKind) {
2585  std::pair<int, int> Res = {-1, -1};
2586
2587  switch (VectorKind) {
2588  case RegKind::NeonVector:
2589    Res =
2590        StringSwitch<std::pair<int, int>>(Suffix.lower())
2591            .Case("", {0, 0})
2592            .Case(".1d", {1, 64})
2593            .Case(".1q", {1, 128})
2594            // '.2h' needed for fp16 scalar pairwise reductions
2595            .Case(".2h", {2, 16})
2596            .Case(".2s", {2, 32})
2597            .Case(".2d", {2, 64})
2598            // '.4b' is another special case for the ARMv8.2a dot product
2599            // operand
2600            .Case(".4b", {4, 8})
2601            .Case(".4h", {4, 16})
2602            .Case(".4s", {4, 32})
2603            .Case(".8b", {8, 8})
2604            .Case(".8h", {8, 16})
2605            .Case(".16b", {16, 8})
2606            // Accept the width neutral ones, too, for verbose syntax. If those
2607            // aren't used in the right places, the token operand won't match so
2608            // all will work out.
2609            .Case(".b", {0, 8})
2610            .Case(".h", {0, 16})
2611            .Case(".s", {0, 32})
2612            .Case(".d", {0, 64})
2613            .Default({-1, -1});
2614    break;
2615  case RegKind::SVEPredicateAsCounter:
2616  case RegKind::SVEPredicateVector:
2617  case RegKind::SVEDataVector:
2618  case RegKind::Matrix:
2619    Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2620              .Case("", {0, 0})
2621              .Case(".b", {0, 8})
2622              .Case(".h", {0, 16})
2623              .Case(".s", {0, 32})
2624              .Case(".d", {0, 64})
2625              .Case(".q", {0, 128})
2626              .Default({-1, -1});
2627    break;
2628  default:
2629    llvm_unreachable("Unsupported RegKind");
2630  }
2631
2632  if (Res == std::make_pair(-1, -1))
2633    return std::nullopt;
2634
2635  return std::optional<std::pair<int, int>>(Res);
2636}
2637
2638static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2639  return parseVectorKind(Suffix, VectorKind).has_value();
2640}
2641
2642static unsigned matchSVEDataVectorRegName(StringRef Name) {
2643  return StringSwitch<unsigned>(Name.lower())
2644      .Case("z0", AArch64::Z0)
2645      .Case("z1", AArch64::Z1)
2646      .Case("z2", AArch64::Z2)
2647      .Case("z3", AArch64::Z3)
2648      .Case("z4", AArch64::Z4)
2649      .Case("z5", AArch64::Z5)
2650      .Case("z6", AArch64::Z6)
2651      .Case("z7", AArch64::Z7)
2652      .Case("z8", AArch64::Z8)
2653      .Case("z9", AArch64::Z9)
2654      .Case("z10", AArch64::Z10)
2655      .Case("z11", AArch64::Z11)
2656      .Case("z12", AArch64::Z12)
2657      .Case("z13", AArch64::Z13)
2658      .Case("z14", AArch64::Z14)
2659      .Case("z15", AArch64::Z15)
2660      .Case("z16", AArch64::Z16)
2661      .Case("z17", AArch64::Z17)
2662      .Case("z18", AArch64::Z18)
2663      .Case("z19", AArch64::Z19)
2664      .Case("z20", AArch64::Z20)
2665      .Case("z21", AArch64::Z21)
2666      .Case("z22", AArch64::Z22)
2667      .Case("z23", AArch64::Z23)
2668      .Case("z24", AArch64::Z24)
2669      .Case("z25", AArch64::Z25)
2670      .Case("z26", AArch64::Z26)
2671      .Case("z27", AArch64::Z27)
2672      .Case("z28", AArch64::Z28)
2673      .Case("z29", AArch64::Z29)
2674      .Case("z30", AArch64::Z30)
2675      .Case("z31", AArch64::Z31)
2676      .Default(0);
2677}
2678
2679static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2680  return StringSwitch<unsigned>(Name.lower())
2681      .Case("p0", AArch64::P0)
2682      .Case("p1", AArch64::P1)
2683      .Case("p2", AArch64::P2)
2684      .Case("p3", AArch64::P3)
2685      .Case("p4", AArch64::P4)
2686      .Case("p5", AArch64::P5)
2687      .Case("p6", AArch64::P6)
2688      .Case("p7", AArch64::P7)
2689      .Case("p8", AArch64::P8)
2690      .Case("p9", AArch64::P9)
2691      .Case("p10", AArch64::P10)
2692      .Case("p11", AArch64::P11)
2693      .Case("p12", AArch64::P12)
2694      .Case("p13", AArch64::P13)
2695      .Case("p14", AArch64::P14)
2696      .Case("p15", AArch64::P15)
2697      .Default(0);
2698}
2699
2700static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2701  return StringSwitch<unsigned>(Name.lower())
2702      .Case("pn0", AArch64::P0)
2703      .Case("pn1", AArch64::P1)
2704      .Case("pn2", AArch64::P2)
2705      .Case("pn3", AArch64::P3)
2706      .Case("pn4", AArch64::P4)
2707      .Case("pn5", AArch64::P5)
2708      .Case("pn6", AArch64::P6)
2709      .Case("pn7", AArch64::P7)
2710      .Case("pn8", AArch64::P8)
2711      .Case("pn9", AArch64::P9)
2712      .Case("pn10", AArch64::P10)
2713      .Case("pn11", AArch64::P11)
2714      .Case("pn12", AArch64::P12)
2715      .Case("pn13", AArch64::P13)
2716      .Case("pn14", AArch64::P14)
2717      .Case("pn15", AArch64::P15)
2718      .Default(0);
2719}
2720
2721static unsigned matchMatrixTileListRegName(StringRef Name) {
2722  return StringSwitch<unsigned>(Name.lower())
2723      .Case("za0.d", AArch64::ZAD0)
2724      .Case("za1.d", AArch64::ZAD1)
2725      .Case("za2.d", AArch64::ZAD2)
2726      .Case("za3.d", AArch64::ZAD3)
2727      .Case("za4.d", AArch64::ZAD4)
2728      .Case("za5.d", AArch64::ZAD5)
2729      .Case("za6.d", AArch64::ZAD6)
2730      .Case("za7.d", AArch64::ZAD7)
2731      .Case("za0.s", AArch64::ZAS0)
2732      .Case("za1.s", AArch64::ZAS1)
2733      .Case("za2.s", AArch64::ZAS2)
2734      .Case("za3.s", AArch64::ZAS3)
2735      .Case("za0.h", AArch64::ZAH0)
2736      .Case("za1.h", AArch64::ZAH1)
2737      .Case("za0.b", AArch64::ZAB0)
2738      .Default(0);
2739}
2740
2741static unsigned matchMatrixRegName(StringRef Name) {
2742  return StringSwitch<unsigned>(Name.lower())
2743      .Case("za", AArch64::ZA)
2744      .Case("za0.q", AArch64::ZAQ0)
2745      .Case("za1.q", AArch64::ZAQ1)
2746      .Case("za2.q", AArch64::ZAQ2)
2747      .Case("za3.q", AArch64::ZAQ3)
2748      .Case("za4.q", AArch64::ZAQ4)
2749      .Case("za5.q", AArch64::ZAQ5)
2750      .Case("za6.q", AArch64::ZAQ6)
2751      .Case("za7.q", AArch64::ZAQ7)
2752      .Case("za8.q", AArch64::ZAQ8)
2753      .Case("za9.q", AArch64::ZAQ9)
2754      .Case("za10.q", AArch64::ZAQ10)
2755      .Case("za11.q", AArch64::ZAQ11)
2756      .Case("za12.q", AArch64::ZAQ12)
2757      .Case("za13.q", AArch64::ZAQ13)
2758      .Case("za14.q", AArch64::ZAQ14)
2759      .Case("za15.q", AArch64::ZAQ15)
2760      .Case("za0.d", AArch64::ZAD0)
2761      .Case("za1.d", AArch64::ZAD1)
2762      .Case("za2.d", AArch64::ZAD2)
2763      .Case("za3.d", AArch64::ZAD3)
2764      .Case("za4.d", AArch64::ZAD4)
2765      .Case("za5.d", AArch64::ZAD5)
2766      .Case("za6.d", AArch64::ZAD6)
2767      .Case("za7.d", AArch64::ZAD7)
2768      .Case("za0.s", AArch64::ZAS0)
2769      .Case("za1.s", AArch64::ZAS1)
2770      .Case("za2.s", AArch64::ZAS2)
2771      .Case("za3.s", AArch64::ZAS3)
2772      .Case("za0.h", AArch64::ZAH0)
2773      .Case("za1.h", AArch64::ZAH1)
2774      .Case("za0.b", AArch64::ZAB0)
2775      .Case("za0h.q", AArch64::ZAQ0)
2776      .Case("za1h.q", AArch64::ZAQ1)
2777      .Case("za2h.q", AArch64::ZAQ2)
2778      .Case("za3h.q", AArch64::ZAQ3)
2779      .Case("za4h.q", AArch64::ZAQ4)
2780      .Case("za5h.q", AArch64::ZAQ5)
2781      .Case("za6h.q", AArch64::ZAQ6)
2782      .Case("za7h.q", AArch64::ZAQ7)
2783      .Case("za8h.q", AArch64::ZAQ8)
2784      .Case("za9h.q", AArch64::ZAQ9)
2785      .Case("za10h.q", AArch64::ZAQ10)
2786      .Case("za11h.q", AArch64::ZAQ11)
2787      .Case("za12h.q", AArch64::ZAQ12)
2788      .Case("za13h.q", AArch64::ZAQ13)
2789      .Case("za14h.q", AArch64::ZAQ14)
2790      .Case("za15h.q", AArch64::ZAQ15)
2791      .Case("za0h.d", AArch64::ZAD0)
2792      .Case("za1h.d", AArch64::ZAD1)
2793      .Case("za2h.d", AArch64::ZAD2)
2794      .Case("za3h.d", AArch64::ZAD3)
2795      .Case("za4h.d", AArch64::ZAD4)
2796      .Case("za5h.d", AArch64::ZAD5)
2797      .Case("za6h.d", AArch64::ZAD6)
2798      .Case("za7h.d", AArch64::ZAD7)
2799      .Case("za0h.s", AArch64::ZAS0)
2800      .Case("za1h.s", AArch64::ZAS1)
2801      .Case("za2h.s", AArch64::ZAS2)
2802      .Case("za3h.s", AArch64::ZAS3)
2803      .Case("za0h.h", AArch64::ZAH0)
2804      .Case("za1h.h", AArch64::ZAH1)
2805      .Case("za0h.b", AArch64::ZAB0)
2806      .Case("za0v.q", AArch64::ZAQ0)
2807      .Case("za1v.q", AArch64::ZAQ1)
2808      .Case("za2v.q", AArch64::ZAQ2)
2809      .Case("za3v.q", AArch64::ZAQ3)
2810      .Case("za4v.q", AArch64::ZAQ4)
2811      .Case("za5v.q", AArch64::ZAQ5)
2812      .Case("za6v.q", AArch64::ZAQ6)
2813      .Case("za7v.q", AArch64::ZAQ7)
2814      .Case("za8v.q", AArch64::ZAQ8)
2815      .Case("za9v.q", AArch64::ZAQ9)
2816      .Case("za10v.q", AArch64::ZAQ10)
2817      .Case("za11v.q", AArch64::ZAQ11)
2818      .Case("za12v.q", AArch64::ZAQ12)
2819      .Case("za13v.q", AArch64::ZAQ13)
2820      .Case("za14v.q", AArch64::ZAQ14)
2821      .Case("za15v.q", AArch64::ZAQ15)
2822      .Case("za0v.d", AArch64::ZAD0)
2823      .Case("za1v.d", AArch64::ZAD1)
2824      .Case("za2v.d", AArch64::ZAD2)
2825      .Case("za3v.d", AArch64::ZAD3)
2826      .Case("za4v.d", AArch64::ZAD4)
2827      .Case("za5v.d", AArch64::ZAD5)
2828      .Case("za6v.d", AArch64::ZAD6)
2829      .Case("za7v.d", AArch64::ZAD7)
2830      .Case("za0v.s", AArch64::ZAS0)
2831      .Case("za1v.s", AArch64::ZAS1)
2832      .Case("za2v.s", AArch64::ZAS2)
2833      .Case("za3v.s", AArch64::ZAS3)
2834      .Case("za0v.h", AArch64::ZAH0)
2835      .Case("za1v.h", AArch64::ZAH1)
2836      .Case("za0v.b", AArch64::ZAB0)
2837      .Default(0);
2838}
2839
2840bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
2841                                     SMLoc &EndLoc) {
2842  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2843}
2844
2845OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo,
2846                                                        SMLoc &StartLoc,
2847                                                        SMLoc &EndLoc) {
2848  StartLoc = getLoc();
2849  auto Res = tryParseScalarRegister(RegNo);
2850  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2851  return Res;
2852}
2853
2854// Matches a register name or register alias previously defined by '.req'
2855unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2856                                                  RegKind Kind) {
2857  unsigned RegNum = 0;
2858  if ((RegNum = matchSVEDataVectorRegName(Name)))
2859    return Kind == RegKind::SVEDataVector ? RegNum : 0;
2860
2861  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2862    return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2863
2864  if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2865    return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2866
2867  if ((RegNum = MatchNeonVectorRegName(Name)))
2868    return Kind == RegKind::NeonVector ? RegNum : 0;
2869
2870  if ((RegNum = matchMatrixRegName(Name)))
2871    return Kind == RegKind::Matrix ? RegNum : 0;
2872
2873 if (Name.equals_insensitive("zt0"))
2874    return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2875
2876  // The parsed register must be of RegKind Scalar
2877  if ((RegNum = MatchRegisterName(Name)))
2878    return (Kind == RegKind::Scalar) ? RegNum : 0;
2879
2880  if (!RegNum) {
2881    // Handle a few common aliases of registers.
2882    if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2883                    .Case("fp", AArch64::FP)
2884                    .Case("lr",  AArch64::LR)
2885                    .Case("x31", AArch64::XZR)
2886                    .Case("w31", AArch64::WZR)
2887                    .Default(0))
2888      return Kind == RegKind::Scalar ? RegNum : 0;
2889
2890    // Check for aliases registered via .req. Canonicalize to lower case.
2891    // That's more consistent since register names are case insensitive, and
2892    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2893    auto Entry = RegisterReqs.find(Name.lower());
2894    if (Entry == RegisterReqs.end())
2895      return 0;
2896
2897    // set RegNum if the match is the right kind of register
2898    if (Kind == Entry->getValue().first)
2899      RegNum = Entry->getValue().second;
2900  }
2901  return RegNum;
2902}
2903
2904unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2905  switch (K) {
2906  case RegKind::Scalar:
2907  case RegKind::NeonVector:
2908  case RegKind::SVEDataVector:
2909    return 32;
2910  case RegKind::Matrix:
2911  case RegKind::SVEPredicateVector:
2912  case RegKind::SVEPredicateAsCounter:
2913    return 16;
2914  case RegKind::LookupTable:
2915   return 1;
2916  }
2917  llvm_unreachable("Unsupported RegKind");
2918}
2919
2920/// tryParseScalarRegister - Try to parse a register name. The token must be an
2921/// Identifier when called, and if it is a register name the token is eaten and
2922/// the register is added to the operand list.
2923OperandMatchResultTy
2924AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2925  const AsmToken &Tok = getTok();
2926  if (Tok.isNot(AsmToken::Identifier))
2927    return MatchOperand_NoMatch;
2928
2929  std::string lowerCase = Tok.getString().lower();
2930  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2931  if (Reg == 0)
2932    return MatchOperand_NoMatch;
2933
2934  RegNum = Reg;
2935  Lex(); // Eat identifier token.
2936  return MatchOperand_Success;
2937}
2938
2939/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2940OperandMatchResultTy
2941AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2942  SMLoc S = getLoc();
2943
2944  if (getTok().isNot(AsmToken::Identifier)) {
2945    Error(S, "Expected cN operand where 0 <= N <= 15");
2946    return MatchOperand_ParseFail;
2947  }
2948
2949  StringRef Tok = getTok().getIdentifier();
2950  if (Tok[0] != 'c' && Tok[0] != 'C') {
2951    Error(S, "Expected cN operand where 0 <= N <= 15");
2952    return MatchOperand_ParseFail;
2953  }
2954
2955  uint32_t CRNum;
2956  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2957  if (BadNum || CRNum > 15) {
2958    Error(S, "Expected cN operand where 0 <= N <= 15");
2959    return MatchOperand_ParseFail;
2960  }
2961
2962  Lex(); // Eat identifier token.
2963  Operands.push_back(
2964      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2965  return MatchOperand_Success;
2966}
2967
2968// Either an identifier for named values or a 6-bit immediate.
2969OperandMatchResultTy
2970AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2971  SMLoc S = getLoc();
2972  const AsmToken &Tok = getTok();
2973
2974  unsigned MaxVal = 63;
2975
2976  // Immediate case, with optional leading hash:
2977  if (parseOptionalToken(AsmToken::Hash) ||
2978      Tok.is(AsmToken::Integer)) {
2979    const MCExpr *ImmVal;
2980    if (getParser().parseExpression(ImmVal))
2981      return MatchOperand_ParseFail;
2982
2983    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2984    if (!MCE) {
2985      TokError("immediate value expected for prefetch operand");
2986      return MatchOperand_ParseFail;
2987    }
2988    unsigned prfop = MCE->getValue();
2989    if (prfop > MaxVal) {
2990      TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2991               "] expected");
2992      return MatchOperand_ParseFail;
2993    }
2994
2995    auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
2996    Operands.push_back(AArch64Operand::CreatePrefetch(
2997        prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
2998    return MatchOperand_Success;
2999  }
3000
3001  if (Tok.isNot(AsmToken::Identifier)) {
3002    TokError("prefetch hint expected");
3003    return MatchOperand_ParseFail;
3004  }
3005
3006  auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3007  if (!RPRFM) {
3008    TokError("prefetch hint expected");
3009    return MatchOperand_ParseFail;
3010  }
3011
3012  Operands.push_back(AArch64Operand::CreatePrefetch(
3013      RPRFM->Encoding, Tok.getString(), S, getContext()));
3014  Lex(); // Eat identifier token.
3015  return MatchOperand_Success;
3016}
3017
3018/// tryParsePrefetch - Try to parse a prefetch operand.
3019template <bool IsSVEPrefetch>
3020OperandMatchResultTy
3021AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3022  SMLoc S = getLoc();
3023  const AsmToken &Tok = getTok();
3024
3025  auto LookupByName = [](StringRef N) {
3026    if (IsSVEPrefetch) {
3027      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3028        return std::optional<unsigned>(Res->Encoding);
3029    } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3030      return std::optional<unsigned>(Res->Encoding);
3031    return std::optional<unsigned>();
3032  };
3033
3034  auto LookupByEncoding = [](unsigned E) {
3035    if (IsSVEPrefetch) {
3036      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3037        return std::optional<StringRef>(Res->Name);
3038    } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3039      return std::optional<StringRef>(Res->Name);
3040    return std::optional<StringRef>();
3041  };
3042  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3043
3044  // Either an identifier for named values or a 5-bit immediate.
3045  // Eat optional hash.
3046  if (parseOptionalToken(AsmToken::Hash) ||
3047      Tok.is(AsmToken::Integer)) {
3048    const MCExpr *ImmVal;
3049    if (getParser().parseExpression(ImmVal))
3050      return MatchOperand_ParseFail;
3051
3052    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3053    if (!MCE) {
3054      TokError("immediate value expected for prefetch operand");
3055      return MatchOperand_ParseFail;
3056    }
3057    unsigned prfop = MCE->getValue();
3058    if (prfop > MaxVal) {
3059      TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3060               "] expected");
3061      return MatchOperand_ParseFail;
3062    }
3063
3064    auto PRFM = LookupByEncoding(MCE->getValue());
3065    Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3066                                                      S, getContext()));
3067    return MatchOperand_Success;
3068  }
3069
3070  if (Tok.isNot(AsmToken::Identifier)) {
3071    TokError("prefetch hint expected");
3072    return MatchOperand_ParseFail;
3073  }
3074
3075  auto PRFM = LookupByName(Tok.getString());
3076  if (!PRFM) {
3077    TokError("prefetch hint expected");
3078    return MatchOperand_ParseFail;
3079  }
3080
3081  Operands.push_back(AArch64Operand::CreatePrefetch(
3082      *PRFM, Tok.getString(), S, getContext()));
3083  Lex(); // Eat identifier token.
3084  return MatchOperand_Success;
3085}
3086
3087/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3088OperandMatchResultTy
3089AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3090  SMLoc S = getLoc();
3091  const AsmToken &Tok = getTok();
3092  if (Tok.isNot(AsmToken::Identifier)) {
3093    TokError("invalid operand for instruction");
3094    return MatchOperand_ParseFail;
3095  }
3096
3097  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3098  if (!PSB) {
3099    TokError("invalid operand for instruction");
3100    return MatchOperand_ParseFail;
3101  }
3102
3103  Operands.push_back(AArch64Operand::CreatePSBHint(
3104      PSB->Encoding, Tok.getString(), S, getContext()));
3105  Lex(); // Eat identifier token.
3106  return MatchOperand_Success;
3107}
3108
3109OperandMatchResultTy
3110AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3111  SMLoc StartLoc = getLoc();
3112
3113  MCRegister RegNum;
3114
3115  // The case where xzr, xzr is not present is handled by an InstAlias.
3116
3117  auto RegTok = getTok(); // in case we need to backtrack
3118  if (tryParseScalarRegister(RegNum) != MatchOperand_Success)
3119    return MatchOperand_NoMatch;
3120
3121  if (RegNum != AArch64::XZR) {
3122    getLexer().UnLex(RegTok);
3123    return MatchOperand_NoMatch;
3124  }
3125
3126  if (parseComma())
3127    return MatchOperand_ParseFail;
3128
3129  if (tryParseScalarRegister(RegNum) != MatchOperand_Success) {
3130    TokError("expected register operand");
3131    return MatchOperand_ParseFail;
3132  }
3133
3134  if (RegNum != AArch64::XZR) {
3135    TokError("xzr must be followed by xzr");
3136    return MatchOperand_ParseFail;
3137  }
3138
3139  // We need to push something, since we claim this is an operand in .td.
3140  // See also AArch64AsmParser::parseKeywordOperand.
3141  Operands.push_back(AArch64Operand::CreateReg(
3142      RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3143
3144  return MatchOperand_Success;
3145}
3146
3147/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3148OperandMatchResultTy
3149AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3150  SMLoc S = getLoc();
3151  const AsmToken &Tok = getTok();
3152  if (Tok.isNot(AsmToken::Identifier)) {
3153    TokError("invalid operand for instruction");
3154    return MatchOperand_ParseFail;
3155  }
3156
3157  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3158  if (!BTI) {
3159    TokError("invalid operand for instruction");
3160    return MatchOperand_ParseFail;
3161  }
3162
3163  Operands.push_back(AArch64Operand::CreateBTIHint(
3164      BTI->Encoding, Tok.getString(), S, getContext()));
3165  Lex(); // Eat identifier token.
3166  return MatchOperand_Success;
3167}
3168
3169/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3170/// instruction.
3171OperandMatchResultTy
3172AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3173  SMLoc S = getLoc();
3174  const MCExpr *Expr = nullptr;
3175
3176  if (getTok().is(AsmToken::Hash)) {
3177    Lex(); // Eat hash token.
3178  }
3179
3180  if (parseSymbolicImmVal(Expr))
3181    return MatchOperand_ParseFail;
3182
3183  AArch64MCExpr::VariantKind ELFRefKind;
3184  MCSymbolRefExpr::VariantKind DarwinRefKind;
3185  int64_t Addend;
3186  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3187    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3188        ELFRefKind == AArch64MCExpr::VK_INVALID) {
3189      // No modifier was specified at all; this is the syntax for an ELF basic
3190      // ADRP relocation (unfortunately).
3191      Expr =
3192          AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3193    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3194                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3195               Addend != 0) {
3196      Error(S, "gotpage label reference not allowed an addend");
3197      return MatchOperand_ParseFail;
3198    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3199               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3200               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3201               ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3202               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3203               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3204               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3205               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3206      // The operand must be an @page or @gotpage qualified symbolref.
3207      Error(S, "page or gotpage label reference expected");
3208      return MatchOperand_ParseFail;
3209    }
3210  }
3211
3212  // We have either a label reference possibly with addend or an immediate. The
3213  // addend is a raw value here. The linker will adjust it to only reference the
3214  // page.
3215  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3216  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3217
3218  return MatchOperand_Success;
3219}
3220
3221/// tryParseAdrLabel - Parse and validate a source label for the ADR
3222/// instruction.
3223OperandMatchResultTy
3224AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3225  SMLoc S = getLoc();
3226  const MCExpr *Expr = nullptr;
3227
3228  // Leave anything with a bracket to the default for SVE
3229  if (getTok().is(AsmToken::LBrac))
3230    return MatchOperand_NoMatch;
3231
3232  if (getTok().is(AsmToken::Hash))
3233    Lex(); // Eat hash token.
3234
3235  if (parseSymbolicImmVal(Expr))
3236    return MatchOperand_ParseFail;
3237
3238  AArch64MCExpr::VariantKind ELFRefKind;
3239  MCSymbolRefExpr::VariantKind DarwinRefKind;
3240  int64_t Addend;
3241  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3242    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3243        ELFRefKind == AArch64MCExpr::VK_INVALID) {
3244      // No modifier was specified at all; this is the syntax for an ELF basic
3245      // ADR relocation (unfortunately).
3246      Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3247    } else {
3248      Error(S, "unexpected adr label");
3249      return MatchOperand_ParseFail;
3250    }
3251  }
3252
3253  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3254  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3255  return MatchOperand_Success;
3256}
3257
3258/// tryParseFPImm - A floating point immediate expression operand.
3259template<bool AddFPZeroAsLiteral>
3260OperandMatchResultTy
3261AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3262  SMLoc S = getLoc();
3263
3264  bool Hash = parseOptionalToken(AsmToken::Hash);
3265
3266  // Handle negation, as that still comes through as a separate token.
3267  bool isNegative = parseOptionalToken(AsmToken::Minus);
3268
3269  const AsmToken &Tok = getTok();
3270  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3271    if (!Hash)
3272      return MatchOperand_NoMatch;
3273    TokError("invalid floating point immediate");
3274    return MatchOperand_ParseFail;
3275  }
3276
3277  // Parse hexadecimal representation.
3278  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3279    if (Tok.getIntVal() > 255 || isNegative) {
3280      TokError("encoded floating point value out of range");
3281      return MatchOperand_ParseFail;
3282    }
3283
3284    APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3285    Operands.push_back(
3286        AArch64Operand::CreateFPImm(F, true, S, getContext()));
3287  } else {
3288    // Parse FP representation.
3289    APFloat RealVal(APFloat::IEEEdouble());
3290    auto StatusOrErr =
3291        RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3292    if (errorToBool(StatusOrErr.takeError())) {
3293      TokError("invalid floating point representation");
3294      return MatchOperand_ParseFail;
3295    }
3296
3297    if (isNegative)
3298      RealVal.changeSign();
3299
3300    if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3301      Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3302      Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3303    } else
3304      Operands.push_back(AArch64Operand::CreateFPImm(
3305          RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3306  }
3307
3308  Lex(); // Eat the token.
3309
3310  return MatchOperand_Success;
3311}
3312
3313/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3314/// a shift suffix, for example '#1, lsl #12'.
3315OperandMatchResultTy
3316AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3317  SMLoc S = getLoc();
3318
3319  if (getTok().is(AsmToken::Hash))
3320    Lex(); // Eat '#'
3321  else if (getTok().isNot(AsmToken::Integer))
3322    // Operand should start from # or should be integer, emit error otherwise.
3323    return MatchOperand_NoMatch;
3324
3325  if (getTok().is(AsmToken::Integer) &&
3326      getLexer().peekTok().is(AsmToken::Colon))
3327    return tryParseImmRange(Operands);
3328
3329  const MCExpr *Imm = nullptr;
3330  if (parseSymbolicImmVal(Imm))
3331    return MatchOperand_ParseFail;
3332  else if (getTok().isNot(AsmToken::Comma)) {
3333    Operands.push_back(
3334        AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3335    return MatchOperand_Success;
3336  }
3337
3338  // Eat ','
3339  Lex();
3340  StringRef VecGroup;
3341  if (!parseOptionalVGOperand(Operands, VecGroup)) {
3342    Operands.push_back(
3343        AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3344    Operands.push_back(
3345        AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3346    return MatchOperand_Success;
3347  }
3348
3349  // The optional operand must be "lsl #N" where N is non-negative.
3350  if (!getTok().is(AsmToken::Identifier) ||
3351      !getTok().getIdentifier().equals_insensitive("lsl")) {
3352    Error(getLoc(), "only 'lsl #+N' valid after immediate");
3353    return MatchOperand_ParseFail;
3354  }
3355
3356  // Eat 'lsl'
3357  Lex();
3358
3359  parseOptionalToken(AsmToken::Hash);
3360
3361  if (getTok().isNot(AsmToken::Integer)) {
3362    Error(getLoc(), "only 'lsl #+N' valid after immediate");
3363    return MatchOperand_ParseFail;
3364  }
3365
3366  int64_t ShiftAmount = getTok().getIntVal();
3367
3368  if (ShiftAmount < 0) {
3369    Error(getLoc(), "positive shift amount required");
3370    return MatchOperand_ParseFail;
3371  }
3372  Lex(); // Eat the number
3373
3374  // Just in case the optional lsl #0 is used for immediates other than zero.
3375  if (ShiftAmount == 0 && Imm != nullptr) {
3376    Operands.push_back(
3377        AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3378    return MatchOperand_Success;
3379  }
3380
3381  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3382                                                      getLoc(), getContext()));
3383  return MatchOperand_Success;
3384}
3385
3386/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3387/// suggestion to help common typos.
3388AArch64CC::CondCode
3389AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3390  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3391                    .Case("eq", AArch64CC::EQ)
3392                    .Case("ne", AArch64CC::NE)
3393                    .Case("cs", AArch64CC::HS)
3394                    .Case("hs", AArch64CC::HS)
3395                    .Case("cc", AArch64CC::LO)
3396                    .Case("lo", AArch64CC::LO)
3397                    .Case("mi", AArch64CC::MI)
3398                    .Case("pl", AArch64CC::PL)
3399                    .Case("vs", AArch64CC::VS)
3400                    .Case("vc", AArch64CC::VC)
3401                    .Case("hi", AArch64CC::HI)
3402                    .Case("ls", AArch64CC::LS)
3403                    .Case("ge", AArch64CC::GE)
3404                    .Case("lt", AArch64CC::LT)
3405                    .Case("gt", AArch64CC::GT)
3406                    .Case("le", AArch64CC::LE)
3407                    .Case("al", AArch64CC::AL)
3408                    .Case("nv", AArch64CC::NV)
3409                    .Default(AArch64CC::Invalid);
3410
3411  if (CC == AArch64CC::Invalid &&
3412      getSTI().getFeatureBits()[AArch64::FeatureSVE]) {
3413    CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3414                    .Case("none",  AArch64CC::EQ)
3415                    .Case("any",   AArch64CC::NE)
3416                    .Case("nlast", AArch64CC::HS)
3417                    .Case("last",  AArch64CC::LO)
3418                    .Case("first", AArch64CC::MI)
3419                    .Case("nfrst", AArch64CC::PL)
3420                    .Case("pmore", AArch64CC::HI)
3421                    .Case("plast", AArch64CC::LS)
3422                    .Case("tcont", AArch64CC::GE)
3423                    .Case("tstop", AArch64CC::LT)
3424                    .Default(AArch64CC::Invalid);
3425
3426    if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3427      Suggestion = "nfrst";
3428  }
3429  return CC;
3430}
3431
3432/// parseCondCode - Parse a Condition Code operand.
3433bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3434                                     bool invertCondCode) {
3435  SMLoc S = getLoc();
3436  const AsmToken &Tok = getTok();
3437  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3438
3439  StringRef Cond = Tok.getString();
3440  std::string Suggestion;
3441  AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3442  if (CC == AArch64CC::Invalid) {
3443    std::string Msg = "invalid condition code";
3444    if (!Suggestion.empty())
3445      Msg += ", did you mean " + Suggestion + "?";
3446    return TokError(Msg);
3447  }
3448  Lex(); // Eat identifier token.
3449
3450  if (invertCondCode) {
3451    if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3452      return TokError("condition codes AL and NV are invalid for this instruction");
3453    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3454  }
3455
3456  Operands.push_back(
3457      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3458  return false;
3459}
3460
3461OperandMatchResultTy
3462AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3463  const AsmToken &Tok = getTok();
3464  SMLoc S = getLoc();
3465
3466  if (Tok.isNot(AsmToken::Identifier)) {
3467    TokError("invalid operand for instruction");
3468    return MatchOperand_ParseFail;
3469  }
3470
3471  unsigned PStateImm = -1;
3472  const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3473  if (!SVCR)
3474    return MatchOperand_NoMatch;
3475  if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3476    PStateImm = SVCR->Encoding;
3477
3478  Operands.push_back(
3479      AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3480  Lex(); // Eat identifier token.
3481  return MatchOperand_Success;
3482}
3483
3484OperandMatchResultTy
3485AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3486  const AsmToken &Tok = getTok();
3487  SMLoc S = getLoc();
3488
3489  StringRef Name = Tok.getString();
3490
3491  if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) {
3492    Lex(); // eat "za[.(b|h|s|d)]"
3493    unsigned ElementWidth = 0;
3494    auto DotPosition = Name.find('.');
3495    if (DotPosition != StringRef::npos) {
3496      const auto &KindRes =
3497          parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3498      if (!KindRes) {
3499        TokError(
3500            "Expected the register to be followed by element width suffix");
3501        return MatchOperand_ParseFail;
3502      }
3503      ElementWidth = KindRes->second;
3504    }
3505    Operands.push_back(AArch64Operand::CreateMatrixRegister(
3506        AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3507        getContext()));
3508    if (getLexer().is(AsmToken::LBrac)) {
3509      // There's no comma after matrix operand, so we can parse the next operand
3510      // immediately.
3511      if (parseOperand(Operands, false, false))
3512        return MatchOperand_NoMatch;
3513    }
3514    return MatchOperand_Success;
3515  }
3516
3517  // Try to parse matrix register.
3518  unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3519  if (!Reg)
3520    return MatchOperand_NoMatch;
3521
3522  size_t DotPosition = Name.find('.');
3523  assert(DotPosition != StringRef::npos && "Unexpected register");
3524
3525  StringRef Head = Name.take_front(DotPosition);
3526  StringRef Tail = Name.drop_front(DotPosition);
3527  StringRef RowOrColumn = Head.take_back();
3528
3529  MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3530                        .Case("h", MatrixKind::Row)
3531                        .Case("v", MatrixKind::Col)
3532                        .Default(MatrixKind::Tile);
3533
3534  // Next up, parsing the suffix
3535  const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3536  if (!KindRes) {
3537    TokError("Expected the register to be followed by element width suffix");
3538    return MatchOperand_ParseFail;
3539  }
3540  unsigned ElementWidth = KindRes->second;
3541
3542  Lex();
3543
3544  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3545      Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3546
3547  if (getLexer().is(AsmToken::LBrac)) {
3548    // There's no comma after matrix operand, so we can parse the next operand
3549    // immediately.
3550    if (parseOperand(Operands, false, false))
3551      return MatchOperand_NoMatch;
3552  }
3553  return MatchOperand_Success;
3554}
3555
3556/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3557/// them if present.
3558OperandMatchResultTy
3559AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3560  const AsmToken &Tok = getTok();
3561  std::string LowerID = Tok.getString().lower();
3562  AArch64_AM::ShiftExtendType ShOp =
3563      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3564          .Case("lsl", AArch64_AM::LSL)
3565          .Case("lsr", AArch64_AM::LSR)
3566          .Case("asr", AArch64_AM::ASR)
3567          .Case("ror", AArch64_AM::ROR)
3568          .Case("msl", AArch64_AM::MSL)
3569          .Case("uxtb", AArch64_AM::UXTB)
3570          .Case("uxth", AArch64_AM::UXTH)
3571          .Case("uxtw", AArch64_AM::UXTW)
3572          .Case("uxtx", AArch64_AM::UXTX)
3573          .Case("sxtb", AArch64_AM::SXTB)
3574          .Case("sxth", AArch64_AM::SXTH)
3575          .Case("sxtw", AArch64_AM::SXTW)
3576          .Case("sxtx", AArch64_AM::SXTX)
3577          .Default(AArch64_AM::InvalidShiftExtend);
3578
3579  if (ShOp == AArch64_AM::InvalidShiftExtend)
3580    return MatchOperand_NoMatch;
3581
3582  SMLoc S = Tok.getLoc();
3583  Lex();
3584
3585  bool Hash = parseOptionalToken(AsmToken::Hash);
3586
3587  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3588    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3589        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3590        ShOp == AArch64_AM::MSL) {
3591      // We expect a number here.
3592      TokError("expected #imm after shift specifier");
3593      return MatchOperand_ParseFail;
3594    }
3595
3596    // "extend" type operations don't need an immediate, #0 is implicit.
3597    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3598    Operands.push_back(
3599        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3600    return MatchOperand_Success;
3601  }
3602
3603  // Make sure we do actually have a number, identifier or a parenthesized
3604  // expression.
3605  SMLoc E = getLoc();
3606  if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3607      !getTok().is(AsmToken::Identifier)) {
3608    Error(E, "expected integer shift amount");
3609    return MatchOperand_ParseFail;
3610  }
3611
3612  const MCExpr *ImmVal;
3613  if (getParser().parseExpression(ImmVal))
3614    return MatchOperand_ParseFail;
3615
3616  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3617  if (!MCE) {
3618    Error(E, "expected constant '#imm' after shift specifier");
3619    return MatchOperand_ParseFail;
3620  }
3621
3622  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3623  Operands.push_back(AArch64Operand::CreateShiftExtend(
3624      ShOp, MCE->getValue(), true, S, E, getContext()));
3625  return MatchOperand_Success;
3626}
3627
3628static const struct Extension {
3629  const char *Name;
3630  const FeatureBitset Features;
3631} ExtensionMap[] = {
3632    {"crc", {AArch64::FeatureCRC}},
3633    {"sm4", {AArch64::FeatureSM4}},
3634    {"sha3", {AArch64::FeatureSHA3}},
3635    {"sha2", {AArch64::FeatureSHA2}},
3636    {"aes", {AArch64::FeatureAES}},
3637    {"crypto", {AArch64::FeatureCrypto}},
3638    {"fp", {AArch64::FeatureFPARMv8}},
3639    {"simd", {AArch64::FeatureNEON}},
3640    {"ras", {AArch64::FeatureRAS}},
3641    {"rasv2", {AArch64::FeatureRASv2}},
3642    {"lse", {AArch64::FeatureLSE}},
3643    {"predres", {AArch64::FeaturePredRes}},
3644    {"predres2", {AArch64::FeatureSPECRES2}},
3645    {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3646    {"mte", {AArch64::FeatureMTE}},
3647    {"memtag", {AArch64::FeatureMTE}},
3648    {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3649    {"pan", {AArch64::FeaturePAN}},
3650    {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3651    {"ccpp", {AArch64::FeatureCCPP}},
3652    {"rcpc", {AArch64::FeatureRCPC}},
3653    {"rng", {AArch64::FeatureRandGen}},
3654    {"sve", {AArch64::FeatureSVE}},
3655    {"sve2", {AArch64::FeatureSVE2}},
3656    {"sve2-aes", {AArch64::FeatureSVE2AES}},
3657    {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3658    {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3659    {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3660    {"sve2p1", {AArch64::FeatureSVE2p1}},
3661    {"b16b16", {AArch64::FeatureB16B16}},
3662    {"ls64", {AArch64::FeatureLS64}},
3663    {"xs", {AArch64::FeatureXS}},
3664    {"pauth", {AArch64::FeaturePAuth}},
3665    {"flagm", {AArch64::FeatureFlagM}},
3666    {"rme", {AArch64::FeatureRME}},
3667    {"sme", {AArch64::FeatureSME}},
3668    {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3669    {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3670    {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3671    {"sme2", {AArch64::FeatureSME2}},
3672    {"sme2p1", {AArch64::FeatureSME2p1}},
3673    {"hbc", {AArch64::FeatureHBC}},
3674    {"mops", {AArch64::FeatureMOPS}},
3675    {"mec", {AArch64::FeatureMEC}},
3676    {"the", {AArch64::FeatureTHE}},
3677    {"d128", {AArch64::FeatureD128}},
3678    {"lse128", {AArch64::FeatureLSE128}},
3679    {"ite", {AArch64::FeatureITE}},
3680    {"cssc", {AArch64::FeatureCSSC}},
3681    {"rcpc3", {AArch64::FeatureRCPC3}},
3682    // FIXME: Unsupported extensions
3683    {"lor", {}},
3684    {"rdma", {}},
3685    {"profile", {}},
3686};
3687
3688static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3689  if (FBS[AArch64::HasV8_0aOps])
3690    Str += "ARMv8a";
3691  if (FBS[AArch64::HasV8_1aOps])
3692    Str += "ARMv8.1a";
3693  else if (FBS[AArch64::HasV8_2aOps])
3694    Str += "ARMv8.2a";
3695  else if (FBS[AArch64::HasV8_3aOps])
3696    Str += "ARMv8.3a";
3697  else if (FBS[AArch64::HasV8_4aOps])
3698    Str += "ARMv8.4a";
3699  else if (FBS[AArch64::HasV8_5aOps])
3700    Str += "ARMv8.5a";
3701  else if (FBS[AArch64::HasV8_6aOps])
3702    Str += "ARMv8.6a";
3703  else if (FBS[AArch64::HasV8_7aOps])
3704    Str += "ARMv8.7a";
3705  else if (FBS[AArch64::HasV8_8aOps])
3706    Str += "ARMv8.8a";
3707  else if (FBS[AArch64::HasV8_9aOps])
3708    Str += "ARMv8.9a";
3709  else if (FBS[AArch64::HasV9_0aOps])
3710    Str += "ARMv9-a";
3711  else if (FBS[AArch64::HasV9_1aOps])
3712    Str += "ARMv9.1a";
3713  else if (FBS[AArch64::HasV9_2aOps])
3714    Str += "ARMv9.2a";
3715  else if (FBS[AArch64::HasV9_3aOps])
3716    Str += "ARMv9.3a";
3717  else if (FBS[AArch64::HasV9_4aOps])
3718    Str += "ARMv9.4a";
3719  else if (FBS[AArch64::HasV8_0rOps])
3720    Str += "ARMv8r";
3721  else {
3722    SmallVector<std::string, 2> ExtMatches;
3723    for (const auto& Ext : ExtensionMap) {
3724      // Use & in case multiple features are enabled
3725      if ((FBS & Ext.Features) != FeatureBitset())
3726        ExtMatches.push_back(Ext.Name);
3727    }
3728    Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3729  }
3730}
3731
3732void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3733                                      SMLoc S) {
3734  const uint16_t Op2 = Encoding & 7;
3735  const uint16_t Cm = (Encoding & 0x78) >> 3;
3736  const uint16_t Cn = (Encoding & 0x780) >> 7;
3737  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3738
3739  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3740
3741  Operands.push_back(
3742      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3743  Operands.push_back(
3744      AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3745  Operands.push_back(
3746      AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3747  Expr = MCConstantExpr::create(Op2, getContext());
3748  Operands.push_back(
3749      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3750}
3751
3752/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3753/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3754bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3755                                   OperandVector &Operands) {
3756  if (Name.contains('.'))
3757    return TokError("invalid operand");
3758
3759  Mnemonic = Name;
3760  Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3761
3762  const AsmToken &Tok = getTok();
3763  StringRef Op = Tok.getString();
3764  SMLoc S = Tok.getLoc();
3765
3766  if (Mnemonic == "ic") {
3767    const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3768    if (!IC)
3769      return TokError("invalid operand for IC instruction");
3770    else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3771      std::string Str("IC " + std::string(IC->Name) + " requires: ");
3772      setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3773      return TokError(Str);
3774    }
3775    createSysAlias(IC->Encoding, Operands, S);
3776  } else if (Mnemonic == "dc") {
3777    const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3778    if (!DC)
3779      return TokError("invalid operand for DC instruction");
3780    else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3781      std::string Str("DC " + std::string(DC->Name) + " requires: ");
3782      setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3783      return TokError(Str);
3784    }
3785    createSysAlias(DC->Encoding, Operands, S);
3786  } else if (Mnemonic == "at") {
3787    const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3788    if (!AT)
3789      return TokError("invalid operand for AT instruction");
3790    else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3791      std::string Str("AT " + std::string(AT->Name) + " requires: ");
3792      setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3793      return TokError(Str);
3794    }
3795    createSysAlias(AT->Encoding, Operands, S);
3796  } else if (Mnemonic == "tlbi") {
3797    const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3798    if (!TLBI)
3799      return TokError("invalid operand for TLBI instruction");
3800    else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3801      std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3802      setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3803      return TokError(Str);
3804    }
3805    createSysAlias(TLBI->Encoding, Operands, S);
3806  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3807
3808    if (Op.lower() != "rctx")
3809      return TokError("invalid operand for prediction restriction instruction");
3810
3811    bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3812    bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3813    bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3814
3815    if (Mnemonic == "cosp" && !hasSpecres2)
3816      return TokError("COSP requires: predres2");
3817    if (!hasPredres)
3818      return TokError(Mnemonic.upper() + "RCTX requires: predres");
3819
3820    uint16_t PRCTX_Op2 = Mnemonic == "cfp"    ? 0b100
3821                         : Mnemonic == "dvp"  ? 0b101
3822                         : Mnemonic == "cosp" ? 0b110
3823                         : Mnemonic == "cpp"  ? 0b111
3824                                              : 0;
3825    assert(PRCTX_Op2 &&
3826           "Invalid mnemonic for prediction restriction instruction");
3827    const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3828    const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3829
3830    createSysAlias(Encoding, Operands, S);
3831  }
3832
3833  Lex(); // Eat operand.
3834
3835  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3836  bool HasRegister = false;
3837
3838  // Check for the optional register operand.
3839  if (parseOptionalToken(AsmToken::Comma)) {
3840    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3841      return TokError("expected register operand");
3842    HasRegister = true;
3843  }
3844
3845  if (ExpectRegister && !HasRegister)
3846    return TokError("specified " + Mnemonic + " op requires a register");
3847  else if (!ExpectRegister && HasRegister)
3848    return TokError("specified " + Mnemonic + " op does not use a register");
3849
3850  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3851    return true;
3852
3853  return false;
3854}
3855
3856/// parseSyspAlias - The TLBIP instructions are simple aliases for
3857/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3858bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3859                                      OperandVector &Operands) {
3860  if (Name.contains('.'))
3861    return TokError("invalid operand");
3862
3863  Mnemonic = Name;
3864  Operands.push_back(
3865      AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3866
3867  const AsmToken &Tok = getTok();
3868  StringRef Op = Tok.getString();
3869  SMLoc S = Tok.getLoc();
3870
3871  if (Mnemonic == "tlbip") {
3872    bool HasnXSQualifier = Op.endswith_insensitive("nXS");
3873    if (HasnXSQualifier) {
3874      Op = Op.drop_back(3);
3875    }
3876    const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3877    if (!TLBIorig)
3878      return TokError("invalid operand for TLBIP instruction");
3879    const AArch64TLBI::TLBI TLBI(
3880        TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3881        TLBIorig->NeedsReg,
3882        HasnXSQualifier
3883            ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3884            : TLBIorig->FeaturesRequired);
3885    if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3886      std::string Name =
3887          std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3888      std::string Str("TLBIP " + Name + " requires: ");
3889      setRequiredFeatureString(TLBI.getRequiredFeatures(), Str);
3890      return TokError(Str);
3891    }
3892    createSysAlias(TLBI.Encoding, Operands, S);
3893  }
3894
3895  Lex(); // Eat operand.
3896
3897  if (parseComma())
3898    return true;
3899
3900  if (Tok.isNot(AsmToken::Identifier))
3901    return TokError("expected register identifier");
3902  auto Result = tryParseSyspXzrPair(Operands);
3903  if (Result == MatchOperand_NoMatch)
3904    Result = tryParseGPRSeqPair(Operands);
3905  if (Result != MatchOperand_Success)
3906    return TokError("specified " + Mnemonic +
3907                    " op requires a pair of registers");
3908
3909  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3910    return true;
3911
3912  return false;
3913}
3914
3915OperandMatchResultTy
3916AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3917  MCAsmParser &Parser = getParser();
3918  const AsmToken &Tok = getTok();
3919
3920  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3921    TokError("'csync' operand expected");
3922    return MatchOperand_ParseFail;
3923  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3924    // Immediate operand.
3925    const MCExpr *ImmVal;
3926    SMLoc ExprLoc = getLoc();
3927    AsmToken IntTok = Tok;
3928    if (getParser().parseExpression(ImmVal))
3929      return MatchOperand_ParseFail;
3930    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3931    if (!MCE) {
3932      Error(ExprLoc, "immediate value expected for barrier operand");
3933      return MatchOperand_ParseFail;
3934    }
3935    int64_t Value = MCE->getValue();
3936    if (Mnemonic == "dsb" && Value > 15) {
3937      // This case is a no match here, but it might be matched by the nXS
3938      // variant. Deliberately not unlex the optional '#' as it is not necessary
3939      // to characterize an integer immediate.
3940      Parser.getLexer().UnLex(IntTok);
3941      return MatchOperand_NoMatch;
3942    }
3943    if (Value < 0 || Value > 15) {
3944      Error(ExprLoc, "barrier operand out of range");
3945      return MatchOperand_ParseFail;
3946    }
3947    auto DB = AArch64DB::lookupDBByEncoding(Value);
3948    Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3949                                                     ExprLoc, getContext(),
3950                                                     false /*hasnXSModifier*/));
3951    return MatchOperand_Success;
3952  }
3953
3954  if (Tok.isNot(AsmToken::Identifier)) {
3955    TokError("invalid operand for instruction");
3956    return MatchOperand_ParseFail;
3957  }
3958
3959  StringRef Operand = Tok.getString();
3960  auto TSB = AArch64TSB::lookupTSBByName(Operand);
3961  auto DB = AArch64DB::lookupDBByName(Operand);
3962  // The only valid named option for ISB is 'sy'
3963  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3964    TokError("'sy' or #imm operand expected");
3965    return MatchOperand_ParseFail;
3966  // The only valid named option for TSB is 'csync'
3967  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3968    TokError("'csync' operand expected");
3969    return MatchOperand_ParseFail;
3970  } else if (!DB && !TSB) {
3971    if (Mnemonic == "dsb") {
3972      // This case is a no match here, but it might be matched by the nXS
3973      // variant.
3974      return MatchOperand_NoMatch;
3975    }
3976    TokError("invalid barrier option name");
3977    return MatchOperand_ParseFail;
3978  }
3979
3980  Operands.push_back(AArch64Operand::CreateBarrier(
3981      DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3982      getContext(), false /*hasnXSModifier*/));
3983  Lex(); // Consume the option
3984
3985  return MatchOperand_Success;
3986}
3987
3988OperandMatchResultTy
3989AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3990  const AsmToken &Tok = getTok();
3991
3992  assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3993  if (Mnemonic != "dsb")
3994    return MatchOperand_ParseFail;
3995
3996  if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3997    // Immediate operand.
3998    const MCExpr *ImmVal;
3999    SMLoc ExprLoc = getLoc();
4000    if (getParser().parseExpression(ImmVal))
4001      return MatchOperand_ParseFail;
4002    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4003    if (!MCE) {
4004      Error(ExprLoc, "immediate value expected for barrier operand");
4005      return MatchOperand_ParseFail;
4006    }
4007    int64_t Value = MCE->getValue();
4008    // v8.7-A DSB in the nXS variant accepts only the following immediate
4009    // values: 16, 20, 24, 28.
4010    if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
4011      Error(ExprLoc, "barrier operand out of range");
4012      return MatchOperand_ParseFail;
4013    }
4014    auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4015    Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4016                                                     ExprLoc, getContext(),
4017                                                     true /*hasnXSModifier*/));
4018    return MatchOperand_Success;
4019  }
4020
4021  if (Tok.isNot(AsmToken::Identifier)) {
4022    TokError("invalid operand for instruction");
4023    return MatchOperand_ParseFail;
4024  }
4025
4026  StringRef Operand = Tok.getString();
4027  auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4028
4029  if (!DB) {
4030    TokError("invalid barrier option name");
4031    return MatchOperand_ParseFail;
4032  }
4033
4034  Operands.push_back(
4035      AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4036                                    getContext(), true /*hasnXSModifier*/));
4037  Lex(); // Consume the option
4038
4039  return MatchOperand_Success;
4040}
4041
4042OperandMatchResultTy
4043AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4044  const AsmToken &Tok = getTok();
4045
4046  if (Tok.isNot(AsmToken::Identifier))
4047    return MatchOperand_NoMatch;
4048
4049  if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4050    return MatchOperand_NoMatch;
4051
4052  int MRSReg, MSRReg;
4053  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4054  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4055    MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4056    MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4057  } else
4058    MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4059
4060  unsigned PStateImm = -1;
4061  auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4062  if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4063    PStateImm = PState15->Encoding;
4064  if (!PState15) {
4065    auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4066    if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4067      PStateImm = PState1->Encoding;
4068  }
4069
4070  Operands.push_back(
4071      AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4072                                   PStateImm, getContext()));
4073  Lex(); // Eat identifier
4074
4075  return MatchOperand_Success;
4076}
4077
4078/// tryParseNeonVectorRegister - Parse a vector register operand.
4079bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4080  if (getTok().isNot(AsmToken::Identifier))
4081    return true;
4082
4083  SMLoc S = getLoc();
4084  // Check for a vector register specifier first.
4085  StringRef Kind;
4086  MCRegister Reg;
4087  OperandMatchResultTy Res =
4088      tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4089  if (Res != MatchOperand_Success)
4090    return true;
4091
4092  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4093  if (!KindRes)
4094    return true;
4095
4096  unsigned ElementWidth = KindRes->second;
4097  Operands.push_back(
4098      AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4099                                      S, getLoc(), getContext()));
4100
4101  // If there was an explicit qualifier, that goes on as a literal text
4102  // operand.
4103  if (!Kind.empty())
4104    Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4105
4106  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4107}
4108
4109OperandMatchResultTy
4110AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4111  SMLoc SIdx = getLoc();
4112  if (parseOptionalToken(AsmToken::LBrac)) {
4113    const MCExpr *ImmVal;
4114    if (getParser().parseExpression(ImmVal))
4115      return MatchOperand_NoMatch;
4116    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4117    if (!MCE) {
4118      TokError("immediate value expected for vector index");
4119      return MatchOperand_ParseFail;;
4120    }
4121
4122    SMLoc E = getLoc();
4123
4124    if (parseToken(AsmToken::RBrac, "']' expected"))
4125      return MatchOperand_ParseFail;;
4126
4127    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4128                                                         E, getContext()));
4129    return MatchOperand_Success;
4130  }
4131
4132  return MatchOperand_NoMatch;
4133}
4134
4135// tryParseVectorRegister - Try to parse a vector register name with
4136// optional kind specifier. If it is a register specifier, eat the token
4137// and return it.
4138OperandMatchResultTy
4139AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
4140                                         RegKind MatchKind) {
4141  const AsmToken &Tok = getTok();
4142
4143  if (Tok.isNot(AsmToken::Identifier))
4144    return MatchOperand_NoMatch;
4145
4146  StringRef Name = Tok.getString();
4147  // If there is a kind specifier, it's separated from the register name by
4148  // a '.'.
4149  size_t Start = 0, Next = Name.find('.');
4150  StringRef Head = Name.slice(Start, Next);
4151  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4152
4153  if (RegNum) {
4154    if (Next != StringRef::npos) {
4155      Kind = Name.slice(Next, StringRef::npos);
4156      if (!isValidVectorKind(Kind, MatchKind)) {
4157        TokError("invalid vector kind qualifier");
4158        return MatchOperand_ParseFail;
4159      }
4160    }
4161    Lex(); // Eat the register token.
4162
4163    Reg = RegNum;
4164    return MatchOperand_Success;
4165  }
4166
4167  return MatchOperand_NoMatch;
4168}
4169
4170/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4171template <RegKind RK> OperandMatchResultTy
4172AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4173  // Check for a SVE predicate register specifier first.
4174  const SMLoc S = getLoc();
4175  StringRef Kind;
4176  MCRegister RegNum;
4177  auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4178  if (Res != MatchOperand_Success)
4179    return Res;
4180
4181  const auto &KindRes = parseVectorKind(Kind, RK);
4182  if (!KindRes)
4183    return MatchOperand_NoMatch;
4184
4185  unsigned ElementWidth = KindRes->second;
4186  Operands.push_back(AArch64Operand::CreateVectorReg(
4187      RegNum, RK, ElementWidth, S,
4188      getLoc(), getContext()));
4189
4190  if (getLexer().is(AsmToken::LBrac)) {
4191    if (RK == RegKind::SVEPredicateAsCounter) {
4192      OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands);
4193      if (ResIndex == MatchOperand_Success)
4194        return MatchOperand_Success;
4195    } else {
4196      // Indexed predicate, there's no comma so try parse the next operand
4197      // immediately.
4198      if (parseOperand(Operands, false, false))
4199        return MatchOperand_NoMatch;
4200    }
4201  }
4202
4203  // Not all predicates are followed by a '/m' or '/z'.
4204  if (getTok().isNot(AsmToken::Slash))
4205    return MatchOperand_Success;
4206
4207  // But when they do they shouldn't have an element type suffix.
4208  if (!Kind.empty()) {
4209    Error(S, "not expecting size suffix");
4210    return MatchOperand_ParseFail;
4211  }
4212
4213  // Add a literal slash as operand
4214  Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4215
4216  Lex(); // Eat the slash.
4217
4218  // Zeroing or merging?
4219  auto Pred = getTok().getString().lower();
4220  if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") {
4221    Error(getLoc(), "expecting 'z' predication");
4222    return MatchOperand_ParseFail;
4223  }
4224
4225  if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") {
4226    Error(getLoc(), "expecting 'm' or 'z' predication");
4227    return MatchOperand_ParseFail;
4228  }
4229
4230  // Add zero/merge token.
4231  const char *ZM = Pred == "z" ? "z" : "m";
4232  Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4233
4234  Lex(); // Eat zero/merge token.
4235  return MatchOperand_Success;
4236}
4237
4238/// parseRegister - Parse a register operand.
4239bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4240  // Try for a Neon vector register.
4241  if (!tryParseNeonVectorRegister(Operands))
4242    return false;
4243
4244  if (tryParseZTOperand(Operands) == MatchOperand_Success)
4245    return false;
4246
4247  // Otherwise try for a scalar register.
4248  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
4249    return false;
4250
4251  return true;
4252}
4253
4254bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4255  bool HasELFModifier = false;
4256  AArch64MCExpr::VariantKind RefKind;
4257
4258  if (parseOptionalToken(AsmToken::Colon)) {
4259    HasELFModifier = true;
4260
4261    if (getTok().isNot(AsmToken::Identifier))
4262      return TokError("expect relocation specifier in operand after ':'");
4263
4264    std::string LowerCase = getTok().getIdentifier().lower();
4265    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4266                  .Case("lo12", AArch64MCExpr::VK_LO12)
4267                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4268                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4269                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4270                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4271                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4272                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4273                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4274                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4275                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4276                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4277                  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4278                  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4279                  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4280                  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4281                  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4282                  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4283                  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4284                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4285                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4286                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4287                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4288                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4289                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4290                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4291                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4292                  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4293                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4294                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4295                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4296                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4297                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4298                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4299                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4300                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4301                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4302                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4303                  .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4304                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4305                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4306                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4307                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4308                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4309                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4310                  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4311                  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4312                  .Default(AArch64MCExpr::VK_INVALID);
4313
4314    if (RefKind == AArch64MCExpr::VK_INVALID)
4315      return TokError("expect relocation specifier in operand after ':'");
4316
4317    Lex(); // Eat identifier
4318
4319    if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4320      return true;
4321  }
4322
4323  if (getParser().parseExpression(ImmVal))
4324    return true;
4325
4326  if (HasELFModifier)
4327    ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4328
4329  return false;
4330}
4331
4332OperandMatchResultTy
4333AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4334  if (getTok().isNot(AsmToken::LCurly))
4335    return MatchOperand_NoMatch;
4336
4337  auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
4338    StringRef Name = getTok().getString();
4339    size_t DotPosition = Name.find('.');
4340    if (DotPosition == StringRef::npos)
4341      return MatchOperand_NoMatch;
4342
4343    unsigned RegNum = matchMatrixTileListRegName(Name);
4344    if (!RegNum)
4345      return MatchOperand_NoMatch;
4346
4347    StringRef Tail = Name.drop_front(DotPosition);
4348    const std::optional<std::pair<int, int>> &KindRes =
4349        parseVectorKind(Tail, RegKind::Matrix);
4350    if (!KindRes) {
4351      TokError("Expected the register to be followed by element width suffix");
4352      return MatchOperand_ParseFail;
4353    }
4354    ElementWidth = KindRes->second;
4355    Reg = RegNum;
4356    Lex(); // Eat the register.
4357    return MatchOperand_Success;
4358  };
4359
4360  SMLoc S = getLoc();
4361  auto LCurly = getTok();
4362  Lex(); // Eat left bracket token.
4363
4364  // Empty matrix list
4365  if (parseOptionalToken(AsmToken::RCurly)) {
4366    Operands.push_back(AArch64Operand::CreateMatrixTileList(
4367        /*RegMask=*/0, S, getLoc(), getContext()));
4368    return MatchOperand_Success;
4369  }
4370
4371  // Try parse {za} alias early
4372  if (getTok().getString().equals_insensitive("za")) {
4373    Lex(); // Eat 'za'
4374
4375    if (parseToken(AsmToken::RCurly, "'}' expected"))
4376      return MatchOperand_ParseFail;
4377
4378    Operands.push_back(AArch64Operand::CreateMatrixTileList(
4379        /*RegMask=*/0xFF, S, getLoc(), getContext()));
4380    return MatchOperand_Success;
4381  }
4382
4383  SMLoc TileLoc = getLoc();
4384
4385  unsigned FirstReg, ElementWidth;
4386  auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4387  if (ParseRes != MatchOperand_Success) {
4388    getLexer().UnLex(LCurly);
4389    return ParseRes;
4390  }
4391
4392  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4393
4394  unsigned PrevReg = FirstReg;
4395
4396  SmallSet<unsigned, 8> DRegs;
4397  AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4398
4399  SmallSet<unsigned, 8> SeenRegs;
4400  SeenRegs.insert(FirstReg);
4401
4402  while (parseOptionalToken(AsmToken::Comma)) {
4403    TileLoc = getLoc();
4404    unsigned Reg, NextElementWidth;
4405    ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4406    if (ParseRes != MatchOperand_Success)
4407      return ParseRes;
4408
4409    // Element size must match on all regs in the list.
4410    if (ElementWidth != NextElementWidth) {
4411      Error(TileLoc, "mismatched register size suffix");
4412      return MatchOperand_ParseFail;
4413    }
4414
4415    if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4416      Warning(TileLoc, "tile list not in ascending order");
4417
4418    if (SeenRegs.contains(Reg))
4419      Warning(TileLoc, "duplicate tile in list");
4420    else {
4421      SeenRegs.insert(Reg);
4422      AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4423    }
4424
4425    PrevReg = Reg;
4426  }
4427
4428  if (parseToken(AsmToken::RCurly, "'}' expected"))
4429    return MatchOperand_ParseFail;
4430
4431  unsigned RegMask = 0;
4432  for (auto Reg : DRegs)
4433    RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4434                       RI->getEncodingValue(AArch64::ZAD0));
4435  Operands.push_back(
4436      AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4437
4438  return MatchOperand_Success;
4439}
4440
4441template <RegKind VectorKind>
4442OperandMatchResultTy
4443AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4444                                     bool ExpectMatch) {
4445  MCAsmParser &Parser = getParser();
4446  if (!getTok().is(AsmToken::LCurly))
4447    return MatchOperand_NoMatch;
4448
4449  // Wrapper around parse function
4450  auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4451                            bool NoMatchIsError) {
4452    auto RegTok = getTok();
4453    auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4454    if (ParseRes == MatchOperand_Success) {
4455      if (parseVectorKind(Kind, VectorKind))
4456        return ParseRes;
4457      llvm_unreachable("Expected a valid vector kind");
4458    }
4459
4460    if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch &&
4461        RegTok.getString().equals_insensitive("zt0"))
4462      return MatchOperand_NoMatch;
4463
4464    if (RegTok.isNot(AsmToken::Identifier) ||
4465        ParseRes == MatchOperand_ParseFail ||
4466        (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
4467         !RegTok.getString().startswith_insensitive("za"))) {
4468      Error(Loc, "vector register expected");
4469      return MatchOperand_ParseFail;
4470    }
4471
4472    return MatchOperand_NoMatch;
4473  };
4474
4475  int NumRegs = getNumRegsForRegKind(VectorKind);
4476  SMLoc S = getLoc();
4477  auto LCurly = getTok();
4478  Lex(); // Eat left bracket token.
4479
4480  StringRef Kind;
4481  MCRegister FirstReg;
4482  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4483
4484  // Put back the original left bracket if there was no match, so that
4485  // different types of list-operands can be matched (e.g. SVE, Neon).
4486  if (ParseRes == MatchOperand_NoMatch)
4487    Parser.getLexer().UnLex(LCurly);
4488
4489  if (ParseRes != MatchOperand_Success)
4490    return ParseRes;
4491
4492  int64_t PrevReg = FirstReg;
4493  unsigned Count = 1;
4494
4495  int Stride = 1;
4496  if (parseOptionalToken(AsmToken::Minus)) {
4497    SMLoc Loc = getLoc();
4498    StringRef NextKind;
4499
4500    MCRegister Reg;
4501    ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4502    if (ParseRes != MatchOperand_Success)
4503      return ParseRes;
4504
4505    // Any Kind suffices must match on all regs in the list.
4506    if (Kind != NextKind) {
4507      Error(Loc, "mismatched register size suffix");
4508      return MatchOperand_ParseFail;
4509    }
4510
4511    unsigned Space =
4512        (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4513
4514    if (Space == 0 || Space > 3) {
4515      Error(Loc, "invalid number of vectors");
4516      return MatchOperand_ParseFail;
4517    }
4518
4519    Count += Space;
4520  }
4521  else {
4522    bool HasCalculatedStride = false;
4523    while (parseOptionalToken(AsmToken::Comma)) {
4524      SMLoc Loc = getLoc();
4525      StringRef NextKind;
4526      MCRegister Reg;
4527      ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4528      if (ParseRes != MatchOperand_Success)
4529        return ParseRes;
4530
4531      // Any Kind suffices must match on all regs in the list.
4532      if (Kind != NextKind) {
4533        Error(Loc, "mismatched register size suffix");
4534        return MatchOperand_ParseFail;
4535      }
4536
4537      unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4538      unsigned PrevRegVal =
4539          getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4540      if (!HasCalculatedStride) {
4541        Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4542                                       : (RegVal + NumRegs - PrevRegVal);
4543        HasCalculatedStride = true;
4544      }
4545
4546      // Register must be incremental (with a wraparound at last register).
4547      if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4548        Error(Loc, "registers must have the same sequential stride");
4549        return MatchOperand_ParseFail;
4550      }
4551
4552      PrevReg = Reg;
4553      ++Count;
4554    }
4555  }
4556
4557  if (parseToken(AsmToken::RCurly, "'}' expected"))
4558    return MatchOperand_ParseFail;
4559
4560  if (Count > 4) {
4561    Error(S, "invalid number of vectors");
4562    return MatchOperand_ParseFail;
4563  }
4564
4565  unsigned NumElements = 0;
4566  unsigned ElementWidth = 0;
4567  if (!Kind.empty()) {
4568    if (const auto &VK = parseVectorKind(Kind, VectorKind))
4569      std::tie(NumElements, ElementWidth) = *VK;
4570  }
4571
4572  Operands.push_back(AArch64Operand::CreateVectorList(
4573      FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4574      getLoc(), getContext()));
4575
4576  return MatchOperand_Success;
4577}
4578
4579/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4580bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4581  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4582  if (ParseRes != MatchOperand_Success)
4583    return true;
4584
4585  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4586}
4587
4588OperandMatchResultTy
4589AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4590  SMLoc StartLoc = getLoc();
4591
4592  MCRegister RegNum;
4593  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4594  if (Res != MatchOperand_Success)
4595    return Res;
4596
4597  if (!parseOptionalToken(AsmToken::Comma)) {
4598    Operands.push_back(AArch64Operand::CreateReg(
4599        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4600    return MatchOperand_Success;
4601  }
4602
4603  parseOptionalToken(AsmToken::Hash);
4604
4605  if (getTok().isNot(AsmToken::Integer)) {
4606    Error(getLoc(), "index must be absent or #0");
4607    return MatchOperand_ParseFail;
4608  }
4609
4610  const MCExpr *ImmVal;
4611  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4612      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4613    Error(getLoc(), "index must be absent or #0");
4614    return MatchOperand_ParseFail;
4615  }
4616
4617  Operands.push_back(AArch64Operand::CreateReg(
4618      RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4619  return MatchOperand_Success;
4620}
4621
4622OperandMatchResultTy
4623AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4624  SMLoc StartLoc = getLoc();
4625  const AsmToken &Tok = getTok();
4626  std::string Name = Tok.getString().lower();
4627
4628  unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4629
4630  if (RegNum == 0)
4631    return MatchOperand_NoMatch;
4632
4633  Operands.push_back(AArch64Operand::CreateReg(
4634      RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4635  Lex(); // Eat identifier token.
4636
4637  // Check if register is followed by an index
4638  if (parseOptionalToken(AsmToken::LBrac)) {
4639    const MCExpr *ImmVal;
4640    if (getParser().parseExpression(ImmVal))
4641      return MatchOperand_NoMatch;
4642    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4643    if (!MCE) {
4644      TokError("immediate value expected for vector index");
4645      return MatchOperand_ParseFail;
4646    }
4647    if (parseToken(AsmToken::RBrac, "']' expected"))
4648      return MatchOperand_ParseFail;
4649
4650    Operands.push_back(AArch64Operand::CreateImm(
4651        MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4652        getLoc(), getContext()));
4653  }
4654
4655  return MatchOperand_Success;
4656}
4657
4658template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4659OperandMatchResultTy
4660AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4661  SMLoc StartLoc = getLoc();
4662
4663  MCRegister RegNum;
4664  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4665  if (Res != MatchOperand_Success)
4666    return Res;
4667
4668  // No shift/extend is the default.
4669  if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4670    Operands.push_back(AArch64Operand::CreateReg(
4671        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4672    return MatchOperand_Success;
4673  }
4674
4675  // Eat the comma
4676  Lex();
4677
4678  // Match the shift
4679  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4680  Res = tryParseOptionalShiftExtend(ExtOpnd);
4681  if (Res != MatchOperand_Success)
4682    return Res;
4683
4684  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4685  Operands.push_back(AArch64Operand::CreateReg(
4686      RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4687      Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4688      Ext->hasShiftExtendAmount()));
4689
4690  return MatchOperand_Success;
4691}
4692
4693bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4694  MCAsmParser &Parser = getParser();
4695
4696  // Some SVE instructions have a decoration after the immediate, i.e.
4697  // "mul vl". We parse them here and add tokens, which must be present in the
4698  // asm string in the tablegen instruction.
4699  bool NextIsVL =
4700      Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4701  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4702  if (!getTok().getString().equals_insensitive("mul") ||
4703      !(NextIsVL || NextIsHash))
4704    return true;
4705
4706  Operands.push_back(
4707      AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4708  Lex(); // Eat the "mul"
4709
4710  if (NextIsVL) {
4711    Operands.push_back(
4712        AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4713    Lex(); // Eat the "vl"
4714    return false;
4715  }
4716
4717  if (NextIsHash) {
4718    Lex(); // Eat the #
4719    SMLoc S = getLoc();
4720
4721    // Parse immediate operand.
4722    const MCExpr *ImmVal;
4723    if (!Parser.parseExpression(ImmVal))
4724      if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4725        Operands.push_back(AArch64Operand::CreateImm(
4726            MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4727            getContext()));
4728        return MatchOperand_Success;
4729      }
4730  }
4731
4732  return Error(getLoc(), "expected 'vl' or '#<imm>'");
4733}
4734
4735bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4736                                              StringRef &VecGroup) {
4737  MCAsmParser &Parser = getParser();
4738  auto Tok = Parser.getTok();
4739  if (Tok.isNot(AsmToken::Identifier))
4740    return true;
4741
4742  StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4743                     .Case("vgx2", "vgx2")
4744                     .Case("vgx4", "vgx4")
4745                     .Default("");
4746
4747  if (VG.empty())
4748    return true;
4749
4750  VecGroup = VG;
4751  Parser.Lex(); // Eat vgx[2|4]
4752  return false;
4753}
4754
4755bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4756  auto Tok = getTok();
4757  if (Tok.isNot(AsmToken::Identifier))
4758    return true;
4759
4760  auto Keyword = Tok.getString();
4761  Keyword = StringSwitch<StringRef>(Keyword.lower())
4762                .Case("sm", "sm")
4763                .Case("za", "za")
4764                .Default(Keyword);
4765  Operands.push_back(
4766      AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4767
4768  Lex();
4769  return false;
4770}
4771
4772/// parseOperand - Parse a arm instruction operand.  For now this parses the
4773/// operand regardless of the mnemonic.
4774bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4775                                  bool invertCondCode) {
4776  MCAsmParser &Parser = getParser();
4777
4778  OperandMatchResultTy ResTy =
4779      MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4780
4781  // Check if the current operand has a custom associated parser, if so, try to
4782  // custom parse the operand, or fallback to the general approach.
4783  if (ResTy == MatchOperand_Success)
4784    return false;
4785  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4786  // there was a match, but an error occurred, in which case, just return that
4787  // the operand parsing failed.
4788  if (ResTy == MatchOperand_ParseFail)
4789    return true;
4790
4791  // Nothing custom, so do general case parsing.
4792  SMLoc S, E;
4793  switch (getLexer().getKind()) {
4794  default: {
4795    SMLoc S = getLoc();
4796    const MCExpr *Expr;
4797    if (parseSymbolicImmVal(Expr))
4798      return Error(S, "invalid operand");
4799
4800    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4801    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4802    return false;
4803  }
4804  case AsmToken::LBrac: {
4805    Operands.push_back(
4806        AArch64Operand::CreateToken("[", getLoc(), getContext()));
4807    Lex(); // Eat '['
4808
4809    // There's no comma after a '[', so we can parse the next operand
4810    // immediately.
4811    return parseOperand(Operands, false, false);
4812  }
4813  case AsmToken::LCurly: {
4814    if (!parseNeonVectorList(Operands))
4815      return false;
4816
4817    Operands.push_back(
4818        AArch64Operand::CreateToken("{", getLoc(), getContext()));
4819    Lex(); // Eat '{'
4820
4821    // There's no comma after a '{', so we can parse the next operand
4822    // immediately.
4823    return parseOperand(Operands, false, false);
4824  }
4825  case AsmToken::Identifier: {
4826    // See if this is a "VG" decoration used by SME instructions.
4827    StringRef VecGroup;
4828    if (!parseOptionalVGOperand(Operands, VecGroup)) {
4829      Operands.push_back(
4830          AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4831      return false;
4832    }
4833    // If we're expecting a Condition Code operand, then just parse that.
4834    if (isCondCode)
4835      return parseCondCode(Operands, invertCondCode);
4836
4837    // If it's a register name, parse it.
4838    if (!parseRegister(Operands))
4839      return false;
4840
4841    // See if this is a "mul vl" decoration or "mul #<int>" operand used
4842    // by SVE instructions.
4843    if (!parseOptionalMulOperand(Operands))
4844      return false;
4845
4846    // If this is an "smstart" or "smstop" instruction, parse its special
4847    // keyword operand as an identifier.
4848    if (Mnemonic == "smstart" || Mnemonic == "smstop")
4849      return parseKeywordOperand(Operands);
4850
4851    // This could be an optional "shift" or "extend" operand.
4852    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4853    // We can only continue if no tokens were eaten.
4854    if (GotShift != MatchOperand_NoMatch)
4855      return GotShift;
4856
4857    // If this is a two-word mnemonic, parse its special keyword
4858    // operand as an identifier.
4859    if (Mnemonic == "brb")
4860      return parseKeywordOperand(Operands);
4861
4862    // This was not a register so parse other operands that start with an
4863    // identifier (like labels) as expressions and create them as immediates.
4864    const MCExpr *IdVal;
4865    S = getLoc();
4866    if (getParser().parseExpression(IdVal))
4867      return true;
4868    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4869    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4870    return false;
4871  }
4872  case AsmToken::Integer:
4873  case AsmToken::Real:
4874  case AsmToken::Hash: {
4875    // #42 -> immediate.
4876    S = getLoc();
4877
4878    parseOptionalToken(AsmToken::Hash);
4879
4880    // Parse a negative sign
4881    bool isNegative = false;
4882    if (getTok().is(AsmToken::Minus)) {
4883      isNegative = true;
4884      // We need to consume this token only when we have a Real, otherwise
4885      // we let parseSymbolicImmVal take care of it
4886      if (Parser.getLexer().peekTok().is(AsmToken::Real))
4887        Lex();
4888    }
4889
4890    // The only Real that should come through here is a literal #0.0 for
4891    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4892    // so convert the value.
4893    const AsmToken &Tok = getTok();
4894    if (Tok.is(AsmToken::Real)) {
4895      APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4896      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4897      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4898          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4899          Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4900        return TokError("unexpected floating point literal");
4901      else if (IntVal != 0 || isNegative)
4902        return TokError("expected floating-point constant #0.0");
4903      Lex(); // Eat the token.
4904
4905      Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4906      Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4907      return false;
4908    }
4909
4910    const MCExpr *ImmVal;
4911    if (parseSymbolicImmVal(ImmVal))
4912      return true;
4913
4914    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4915    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4916    return false;
4917  }
4918  case AsmToken::Equal: {
4919    SMLoc Loc = getLoc();
4920    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4921      return TokError("unexpected token in operand");
4922    Lex(); // Eat '='
4923    const MCExpr *SubExprVal;
4924    if (getParser().parseExpression(SubExprVal))
4925      return true;
4926
4927    if (Operands.size() < 2 ||
4928        !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4929      return Error(Loc, "Only valid when first operand is register");
4930
4931    bool IsXReg =
4932        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4933            Operands[1]->getReg());
4934
4935    MCContext& Ctx = getContext();
4936    E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4937    // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4938    if (isa<MCConstantExpr>(SubExprVal)) {
4939      uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4940      uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4941      while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4942        ShiftAmt += 16;
4943        Imm >>= 16;
4944      }
4945      if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4946        Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4947        Operands.push_back(AArch64Operand::CreateImm(
4948            MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4949        if (ShiftAmt)
4950          Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4951                     ShiftAmt, true, S, E, Ctx));
4952        return false;
4953      }
4954      APInt Simm = APInt(64, Imm << ShiftAmt);
4955      // check if the immediate is an unsigned or signed 32-bit int for W regs
4956      if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4957        return Error(Loc, "Immediate too large for register");
4958    }
4959    // If it is a label or an imm that cannot fit in a movz, put it into CP.
4960    const MCExpr *CPLoc =
4961        getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4962    Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4963    return false;
4964  }
4965  }
4966}
4967
4968bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4969  const MCExpr *Expr = nullptr;
4970  SMLoc L = getLoc();
4971  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4972    return true;
4973  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4974  if (check(!Value, L, "expected constant expression"))
4975    return true;
4976  Out = Value->getValue();
4977  return false;
4978}
4979
4980bool AArch64AsmParser::parseComma() {
4981  if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4982    return true;
4983  // Eat the comma
4984  Lex();
4985  return false;
4986}
4987
4988bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4989                                            unsigned First, unsigned Last) {
4990  MCRegister Reg;
4991  SMLoc Start, End;
4992  if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
4993    return true;
4994
4995  // Special handling for FP and LR; they aren't linearly after x28 in
4996  // the registers enum.
4997  unsigned RangeEnd = Last;
4998  if (Base == AArch64::X0) {
4999    if (Last == AArch64::FP) {
5000      RangeEnd = AArch64::X28;
5001      if (Reg == AArch64::FP) {
5002        Out = 29;
5003        return false;
5004      }
5005    }
5006    if (Last == AArch64::LR) {
5007      RangeEnd = AArch64::X28;
5008      if (Reg == AArch64::FP) {
5009        Out = 29;
5010        return false;
5011      } else if (Reg == AArch64::LR) {
5012        Out = 30;
5013        return false;
5014      }
5015    }
5016  }
5017
5018  if (check(Reg < First || Reg > RangeEnd, Start,
5019            Twine("expected register in range ") +
5020                AArch64InstPrinter::getRegisterName(First) + " to " +
5021                AArch64InstPrinter::getRegisterName(Last)))
5022    return true;
5023  Out = Reg - Base;
5024  return false;
5025}
5026
5027bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5028                                    const MCParsedAsmOperand &Op2) const {
5029  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5030  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5031
5032  if (AOp1.isVectorList() && AOp2.isVectorList())
5033    return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5034           AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5035           AOp1.getVectorListStride() == AOp2.getVectorListStride();
5036
5037  if (!AOp1.isReg() || !AOp2.isReg())
5038    return false;
5039
5040  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5041      AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5042    return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5043
5044  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5045         "Testing equality of non-scalar registers not supported");
5046
5047  // Check if a registers match their sub/super register classes.
5048  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5049    return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5050  if (AOp1.getRegEqualityTy() == EqualsSubReg)
5051    return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5052  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5053    return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5054  if (AOp2.getRegEqualityTy() == EqualsSubReg)
5055    return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5056
5057  return false;
5058}
5059
5060/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5061/// operands.
5062bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5063                                        StringRef Name, SMLoc NameLoc,
5064                                        OperandVector &Operands) {
5065  Name = StringSwitch<StringRef>(Name.lower())
5066             .Case("beq", "b.eq")
5067             .Case("bne", "b.ne")
5068             .Case("bhs", "b.hs")
5069             .Case("bcs", "b.cs")
5070             .Case("blo", "b.lo")
5071             .Case("bcc", "b.cc")
5072             .Case("bmi", "b.mi")
5073             .Case("bpl", "b.pl")
5074             .Case("bvs", "b.vs")
5075             .Case("bvc", "b.vc")
5076             .Case("bhi", "b.hi")
5077             .Case("bls", "b.ls")
5078             .Case("bge", "b.ge")
5079             .Case("blt", "b.lt")
5080             .Case("bgt", "b.gt")
5081             .Case("ble", "b.le")
5082             .Case("bal", "b.al")
5083             .Case("bnv", "b.nv")
5084             .Default(Name);
5085
5086  // First check for the AArch64-specific .req directive.
5087  if (getTok().is(AsmToken::Identifier) &&
5088      getTok().getIdentifier().lower() == ".req") {
5089    parseDirectiveReq(Name, NameLoc);
5090    // We always return 'error' for this, as we're done with this
5091    // statement and don't need to match the 'instruction."
5092    return true;
5093  }
5094
5095  // Create the leading tokens for the mnemonic, split by '.' characters.
5096  size_t Start = 0, Next = Name.find('.');
5097  StringRef Head = Name.slice(Start, Next);
5098
5099  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5100  // the SYS instruction.
5101  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5102      Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5103    return parseSysAlias(Head, NameLoc, Operands);
5104
5105  // TLBIP instructions are aliases for the SYSP instruction.
5106  if (Head == "tlbip")
5107    return parseSyspAlias(Head, NameLoc, Operands);
5108
5109  Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5110  Mnemonic = Head;
5111
5112  // Handle condition codes for a branch mnemonic
5113  if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5114    Start = Next;
5115    Next = Name.find('.', Start + 1);
5116    Head = Name.slice(Start + 1, Next);
5117
5118    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5119                                            (Head.data() - Name.data()));
5120    std::string Suggestion;
5121    AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5122    if (CC == AArch64CC::Invalid) {
5123      std::string Msg = "invalid condition code";
5124      if (!Suggestion.empty())
5125        Msg += ", did you mean " + Suggestion + "?";
5126      return Error(SuffixLoc, Msg);
5127    }
5128    Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5129                                                   /*IsSuffix=*/true));
5130    Operands.push_back(
5131        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5132  }
5133
5134  // Add the remaining tokens in the mnemonic.
5135  while (Next != StringRef::npos) {
5136    Start = Next;
5137    Next = Name.find('.', Start + 1);
5138    Head = Name.slice(Start, Next);
5139    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5140                                            (Head.data() - Name.data()) + 1);
5141    Operands.push_back(AArch64Operand::CreateToken(
5142        Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5143  }
5144
5145  // Conditional compare instructions have a Condition Code operand, which needs
5146  // to be parsed and an immediate operand created.
5147  bool condCodeFourthOperand =
5148      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5149       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5150       Head == "csinc" || Head == "csinv" || Head == "csneg");
5151
5152  // These instructions are aliases to some of the conditional select
5153  // instructions. However, the condition code is inverted in the aliased
5154  // instruction.
5155  //
5156  // FIXME: Is this the correct way to handle these? Or should the parser
5157  //        generate the aliased instructions directly?
5158  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5159  bool condCodeThirdOperand =
5160      (Head == "cinc" || Head == "cinv" || Head == "cneg");
5161
5162  // Read the remaining operands.
5163  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5164
5165    unsigned N = 1;
5166    do {
5167      // Parse and remember the operand.
5168      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5169                                     (N == 3 && condCodeThirdOperand) ||
5170                                     (N == 2 && condCodeSecondOperand),
5171                       condCodeSecondOperand || condCodeThirdOperand)) {
5172        return true;
5173      }
5174
5175      // After successfully parsing some operands there are three special cases
5176      // to consider (i.e. notional operands not separated by commas). Two are
5177      // due to memory specifiers:
5178      //  + An RBrac will end an address for load/store/prefetch
5179      //  + An '!' will indicate a pre-indexed operation.
5180      //
5181      // And a further case is '}', which ends a group of tokens specifying the
5182      // SME accumulator array 'ZA' or tile vector, i.e.
5183      //
5184      //   '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5185      //
5186      // It's someone else's responsibility to make sure these tokens are sane
5187      // in the given context!
5188
5189      if (parseOptionalToken(AsmToken::RBrac))
5190        Operands.push_back(
5191            AArch64Operand::CreateToken("]", getLoc(), getContext()));
5192      if (parseOptionalToken(AsmToken::Exclaim))
5193        Operands.push_back(
5194            AArch64Operand::CreateToken("!", getLoc(), getContext()));
5195      if (parseOptionalToken(AsmToken::RCurly))
5196        Operands.push_back(
5197            AArch64Operand::CreateToken("}", getLoc(), getContext()));
5198
5199      ++N;
5200    } while (parseOptionalToken(AsmToken::Comma));
5201  }
5202
5203  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5204    return true;
5205
5206  return false;
5207}
5208
5209static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5210  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5211  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5212         (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5213         (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5214         (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5215         (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5216         (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5217}
5218
5219// FIXME: This entire function is a giant hack to provide us with decent
5220// operand range validation/diagnostics until TableGen/MC can be extended
5221// to support autogeneration of this kind of validation.
5222bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5223                                           SmallVectorImpl<SMLoc> &Loc) {
5224  const MCRegisterInfo *RI = getContext().getRegisterInfo();
5225  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5226
5227  // A prefix only applies to the instruction following it.  Here we extract
5228  // prefix information for the next instruction before validating the current
5229  // one so that in the case of failure we don't erronously continue using the
5230  // current prefix.
5231  PrefixInfo Prefix = NextPrefix;
5232  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5233
5234  // Before validating the instruction in isolation we run through the rules
5235  // applicable when it follows a prefix instruction.
5236  // NOTE: brk & hlt can be prefixed but require no additional validation.
5237  if (Prefix.isActive() &&
5238      (Inst.getOpcode() != AArch64::BRK) &&
5239      (Inst.getOpcode() != AArch64::HLT)) {
5240
5241    // Prefixed intructions must have a destructive operand.
5242    if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5243        AArch64::NotDestructive)
5244      return Error(IDLoc, "instruction is unpredictable when following a"
5245                   " movprfx, suggest replacing movprfx with mov");
5246
5247    // Destination operands must match.
5248    if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5249      return Error(Loc[0], "instruction is unpredictable when following a"
5250                   " movprfx writing to a different destination");
5251
5252    // Destination operand must not be used in any other location.
5253    for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5254      if (Inst.getOperand(i).isReg() &&
5255          (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5256          isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5257        return Error(Loc[0], "instruction is unpredictable when following a"
5258                     " movprfx and destination also used as non-destructive"
5259                     " source");
5260    }
5261
5262    auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5263    if (Prefix.isPredicated()) {
5264      int PgIdx = -1;
5265
5266      // Find the instructions general predicate.
5267      for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5268        if (Inst.getOperand(i).isReg() &&
5269            PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5270          PgIdx = i;
5271          break;
5272        }
5273
5274      // Instruction must be predicated if the movprfx is predicated.
5275      if (PgIdx == -1 ||
5276          (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5277        return Error(IDLoc, "instruction is unpredictable when following a"
5278                     " predicated movprfx, suggest using unpredicated movprfx");
5279
5280      // Instruction must use same general predicate as the movprfx.
5281      if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5282        return Error(IDLoc, "instruction is unpredictable when following a"
5283                     " predicated movprfx using a different general predicate");
5284
5285      // Instruction element type must match the movprfx.
5286      if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5287        return Error(IDLoc, "instruction is unpredictable when following a"
5288                     " predicated movprfx with a different element size");
5289    }
5290  }
5291
5292  // Check for indexed addressing modes w/ the base register being the
5293  // same as a destination/source register or pair load where
5294  // the Rt == Rt2. All of those are undefined behaviour.
5295  switch (Inst.getOpcode()) {
5296  case AArch64::LDPSWpre:
5297  case AArch64::LDPWpost:
5298  case AArch64::LDPWpre:
5299  case AArch64::LDPXpost:
5300  case AArch64::LDPXpre: {
5301    unsigned Rt = Inst.getOperand(1).getReg();
5302    unsigned Rt2 = Inst.getOperand(2).getReg();
5303    unsigned Rn = Inst.getOperand(3).getReg();
5304    if (RI->isSubRegisterEq(Rn, Rt))
5305      return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5306                           "is also a destination");
5307    if (RI->isSubRegisterEq(Rn, Rt2))
5308      return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5309                           "is also a destination");
5310    [[fallthrough]];
5311  }
5312  case AArch64::LDPDi:
5313  case AArch64::LDPQi:
5314  case AArch64::LDPSi:
5315  case AArch64::LDPSWi:
5316  case AArch64::LDPWi:
5317  case AArch64::LDPXi: {
5318    unsigned Rt = Inst.getOperand(0).getReg();
5319    unsigned Rt2 = Inst.getOperand(1).getReg();
5320    if (Rt == Rt2)
5321      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5322    break;
5323  }
5324  case AArch64::LDPDpost:
5325  case AArch64::LDPDpre:
5326  case AArch64::LDPQpost:
5327  case AArch64::LDPQpre:
5328  case AArch64::LDPSpost:
5329  case AArch64::LDPSpre:
5330  case AArch64::LDPSWpost: {
5331    unsigned Rt = Inst.getOperand(1).getReg();
5332    unsigned Rt2 = Inst.getOperand(2).getReg();
5333    if (Rt == Rt2)
5334      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5335    break;
5336  }
5337  case AArch64::STPDpost:
5338  case AArch64::STPDpre:
5339  case AArch64::STPQpost:
5340  case AArch64::STPQpre:
5341  case AArch64::STPSpost:
5342  case AArch64::STPSpre:
5343  case AArch64::STPWpost:
5344  case AArch64::STPWpre:
5345  case AArch64::STPXpost:
5346  case AArch64::STPXpre: {
5347    unsigned Rt = Inst.getOperand(1).getReg();
5348    unsigned Rt2 = Inst.getOperand(2).getReg();
5349    unsigned Rn = Inst.getOperand(3).getReg();
5350    if (RI->isSubRegisterEq(Rn, Rt))
5351      return Error(Loc[0], "unpredictable STP instruction, writeback base "
5352                           "is also a source");
5353    if (RI->isSubRegisterEq(Rn, Rt2))
5354      return Error(Loc[1], "unpredictable STP instruction, writeback base "
5355                           "is also a source");
5356    break;
5357  }
5358  case AArch64::LDRBBpre:
5359  case AArch64::LDRBpre:
5360  case AArch64::LDRHHpre:
5361  case AArch64::LDRHpre:
5362  case AArch64::LDRSBWpre:
5363  case AArch64::LDRSBXpre:
5364  case AArch64::LDRSHWpre:
5365  case AArch64::LDRSHXpre:
5366  case AArch64::LDRSWpre:
5367  case AArch64::LDRWpre:
5368  case AArch64::LDRXpre:
5369  case AArch64::LDRBBpost:
5370  case AArch64::LDRBpost:
5371  case AArch64::LDRHHpost:
5372  case AArch64::LDRHpost:
5373  case AArch64::LDRSBWpost:
5374  case AArch64::LDRSBXpost:
5375  case AArch64::LDRSHWpost:
5376  case AArch64::LDRSHXpost:
5377  case AArch64::LDRSWpost:
5378  case AArch64::LDRWpost:
5379  case AArch64::LDRXpost: {
5380    unsigned Rt = Inst.getOperand(1).getReg();
5381    unsigned Rn = Inst.getOperand(2).getReg();
5382    if (RI->isSubRegisterEq(Rn, Rt))
5383      return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5384                           "is also a source");
5385    break;
5386  }
5387  case AArch64::STRBBpost:
5388  case AArch64::STRBpost:
5389  case AArch64::STRHHpost:
5390  case AArch64::STRHpost:
5391  case AArch64::STRWpost:
5392  case AArch64::STRXpost:
5393  case AArch64::STRBBpre:
5394  case AArch64::STRBpre:
5395  case AArch64::STRHHpre:
5396  case AArch64::STRHpre:
5397  case AArch64::STRWpre:
5398  case AArch64::STRXpre: {
5399    unsigned Rt = Inst.getOperand(1).getReg();
5400    unsigned Rn = Inst.getOperand(2).getReg();
5401    if (RI->isSubRegisterEq(Rn, Rt))
5402      return Error(Loc[0], "unpredictable STR instruction, writeback base "
5403                           "is also a source");
5404    break;
5405  }
5406  case AArch64::STXRB:
5407  case AArch64::STXRH:
5408  case AArch64::STXRW:
5409  case AArch64::STXRX:
5410  case AArch64::STLXRB:
5411  case AArch64::STLXRH:
5412  case AArch64::STLXRW:
5413  case AArch64::STLXRX: {
5414    unsigned Rs = Inst.getOperand(0).getReg();
5415    unsigned Rt = Inst.getOperand(1).getReg();
5416    unsigned Rn = Inst.getOperand(2).getReg();
5417    if (RI->isSubRegisterEq(Rt, Rs) ||
5418        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5419      return Error(Loc[0],
5420                   "unpredictable STXR instruction, status is also a source");
5421    break;
5422  }
5423  case AArch64::STXPW:
5424  case AArch64::STXPX:
5425  case AArch64::STLXPW:
5426  case AArch64::STLXPX: {
5427    unsigned Rs = Inst.getOperand(0).getReg();
5428    unsigned Rt1 = Inst.getOperand(1).getReg();
5429    unsigned Rt2 = Inst.getOperand(2).getReg();
5430    unsigned Rn = Inst.getOperand(3).getReg();
5431    if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5432        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5433      return Error(Loc[0],
5434                   "unpredictable STXP instruction, status is also a source");
5435    break;
5436  }
5437  case AArch64::LDRABwriteback:
5438  case AArch64::LDRAAwriteback: {
5439    unsigned Xt = Inst.getOperand(0).getReg();
5440    unsigned Xn = Inst.getOperand(1).getReg();
5441    if (Xt == Xn)
5442      return Error(Loc[0],
5443          "unpredictable LDRA instruction, writeback base"
5444          " is also a destination");
5445    break;
5446  }
5447  }
5448
5449  // Check v8.8-A memops instructions.
5450  switch (Inst.getOpcode()) {
5451  case AArch64::CPYFP:
5452  case AArch64::CPYFPWN:
5453  case AArch64::CPYFPRN:
5454  case AArch64::CPYFPN:
5455  case AArch64::CPYFPWT:
5456  case AArch64::CPYFPWTWN:
5457  case AArch64::CPYFPWTRN:
5458  case AArch64::CPYFPWTN:
5459  case AArch64::CPYFPRT:
5460  case AArch64::CPYFPRTWN:
5461  case AArch64::CPYFPRTRN:
5462  case AArch64::CPYFPRTN:
5463  case AArch64::CPYFPT:
5464  case AArch64::CPYFPTWN:
5465  case AArch64::CPYFPTRN:
5466  case AArch64::CPYFPTN:
5467  case AArch64::CPYFM:
5468  case AArch64::CPYFMWN:
5469  case AArch64::CPYFMRN:
5470  case AArch64::CPYFMN:
5471  case AArch64::CPYFMWT:
5472  case AArch64::CPYFMWTWN:
5473  case AArch64::CPYFMWTRN:
5474  case AArch64::CPYFMWTN:
5475  case AArch64::CPYFMRT:
5476  case AArch64::CPYFMRTWN:
5477  case AArch64::CPYFMRTRN:
5478  case AArch64::CPYFMRTN:
5479  case AArch64::CPYFMT:
5480  case AArch64::CPYFMTWN:
5481  case AArch64::CPYFMTRN:
5482  case AArch64::CPYFMTN:
5483  case AArch64::CPYFE:
5484  case AArch64::CPYFEWN:
5485  case AArch64::CPYFERN:
5486  case AArch64::CPYFEN:
5487  case AArch64::CPYFEWT:
5488  case AArch64::CPYFEWTWN:
5489  case AArch64::CPYFEWTRN:
5490  case AArch64::CPYFEWTN:
5491  case AArch64::CPYFERT:
5492  case AArch64::CPYFERTWN:
5493  case AArch64::CPYFERTRN:
5494  case AArch64::CPYFERTN:
5495  case AArch64::CPYFET:
5496  case AArch64::CPYFETWN:
5497  case AArch64::CPYFETRN:
5498  case AArch64::CPYFETN:
5499  case AArch64::CPYP:
5500  case AArch64::CPYPWN:
5501  case AArch64::CPYPRN:
5502  case AArch64::CPYPN:
5503  case AArch64::CPYPWT:
5504  case AArch64::CPYPWTWN:
5505  case AArch64::CPYPWTRN:
5506  case AArch64::CPYPWTN:
5507  case AArch64::CPYPRT:
5508  case AArch64::CPYPRTWN:
5509  case AArch64::CPYPRTRN:
5510  case AArch64::CPYPRTN:
5511  case AArch64::CPYPT:
5512  case AArch64::CPYPTWN:
5513  case AArch64::CPYPTRN:
5514  case AArch64::CPYPTN:
5515  case AArch64::CPYM:
5516  case AArch64::CPYMWN:
5517  case AArch64::CPYMRN:
5518  case AArch64::CPYMN:
5519  case AArch64::CPYMWT:
5520  case AArch64::CPYMWTWN:
5521  case AArch64::CPYMWTRN:
5522  case AArch64::CPYMWTN:
5523  case AArch64::CPYMRT:
5524  case AArch64::CPYMRTWN:
5525  case AArch64::CPYMRTRN:
5526  case AArch64::CPYMRTN:
5527  case AArch64::CPYMT:
5528  case AArch64::CPYMTWN:
5529  case AArch64::CPYMTRN:
5530  case AArch64::CPYMTN:
5531  case AArch64::CPYE:
5532  case AArch64::CPYEWN:
5533  case AArch64::CPYERN:
5534  case AArch64::CPYEN:
5535  case AArch64::CPYEWT:
5536  case AArch64::CPYEWTWN:
5537  case AArch64::CPYEWTRN:
5538  case AArch64::CPYEWTN:
5539  case AArch64::CPYERT:
5540  case AArch64::CPYERTWN:
5541  case AArch64::CPYERTRN:
5542  case AArch64::CPYERTN:
5543  case AArch64::CPYET:
5544  case AArch64::CPYETWN:
5545  case AArch64::CPYETRN:
5546  case AArch64::CPYETN: {
5547    unsigned Xd_wb = Inst.getOperand(0).getReg();
5548    unsigned Xs_wb = Inst.getOperand(1).getReg();
5549    unsigned Xn_wb = Inst.getOperand(2).getReg();
5550    unsigned Xd = Inst.getOperand(3).getReg();
5551    unsigned Xs = Inst.getOperand(4).getReg();
5552    unsigned Xn = Inst.getOperand(5).getReg();
5553    if (Xd_wb != Xd)
5554      return Error(Loc[0],
5555                   "invalid CPY instruction, Xd_wb and Xd do not match");
5556    if (Xs_wb != Xs)
5557      return Error(Loc[0],
5558                   "invalid CPY instruction, Xs_wb and Xs do not match");
5559    if (Xn_wb != Xn)
5560      return Error(Loc[0],
5561                   "invalid CPY instruction, Xn_wb and Xn do not match");
5562    if (Xd == Xs)
5563      return Error(Loc[0], "invalid CPY instruction, destination and source"
5564                           " registers are the same");
5565    if (Xd == Xn)
5566      return Error(Loc[0], "invalid CPY instruction, destination and size"
5567                           " registers are the same");
5568    if (Xs == Xn)
5569      return Error(Loc[0], "invalid CPY instruction, source and size"
5570                           " registers are the same");
5571    break;
5572  }
5573  case AArch64::SETP:
5574  case AArch64::SETPT:
5575  case AArch64::SETPN:
5576  case AArch64::SETPTN:
5577  case AArch64::SETM:
5578  case AArch64::SETMT:
5579  case AArch64::SETMN:
5580  case AArch64::SETMTN:
5581  case AArch64::SETE:
5582  case AArch64::SETET:
5583  case AArch64::SETEN:
5584  case AArch64::SETETN:
5585  case AArch64::SETGP:
5586  case AArch64::SETGPT:
5587  case AArch64::SETGPN:
5588  case AArch64::SETGPTN:
5589  case AArch64::SETGM:
5590  case AArch64::SETGMT:
5591  case AArch64::SETGMN:
5592  case AArch64::SETGMTN:
5593  case AArch64::MOPSSETGE:
5594  case AArch64::MOPSSETGET:
5595  case AArch64::MOPSSETGEN:
5596  case AArch64::MOPSSETGETN: {
5597    unsigned Xd_wb = Inst.getOperand(0).getReg();
5598    unsigned Xn_wb = Inst.getOperand(1).getReg();
5599    unsigned Xd = Inst.getOperand(2).getReg();
5600    unsigned Xn = Inst.getOperand(3).getReg();
5601    unsigned Xm = Inst.getOperand(4).getReg();
5602    if (Xd_wb != Xd)
5603      return Error(Loc[0],
5604                   "invalid SET instruction, Xd_wb and Xd do not match");
5605    if (Xn_wb != Xn)
5606      return Error(Loc[0],
5607                   "invalid SET instruction, Xn_wb and Xn do not match");
5608    if (Xd == Xn)
5609      return Error(Loc[0], "invalid SET instruction, destination and size"
5610                           " registers are the same");
5611    if (Xd == Xm)
5612      return Error(Loc[0], "invalid SET instruction, destination and source"
5613                           " registers are the same");
5614    if (Xn == Xm)
5615      return Error(Loc[0], "invalid SET instruction, source and size"
5616                           " registers are the same");
5617    break;
5618  }
5619  }
5620
5621  // Now check immediate ranges. Separate from the above as there is overlap
5622  // in the instructions being checked and this keeps the nested conditionals
5623  // to a minimum.
5624  switch (Inst.getOpcode()) {
5625  case AArch64::ADDSWri:
5626  case AArch64::ADDSXri:
5627  case AArch64::ADDWri:
5628  case AArch64::ADDXri:
5629  case AArch64::SUBSWri:
5630  case AArch64::SUBSXri:
5631  case AArch64::SUBWri:
5632  case AArch64::SUBXri: {
5633    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5634    // some slight duplication here.
5635    if (Inst.getOperand(2).isExpr()) {
5636      const MCExpr *Expr = Inst.getOperand(2).getExpr();
5637      AArch64MCExpr::VariantKind ELFRefKind;
5638      MCSymbolRefExpr::VariantKind DarwinRefKind;
5639      int64_t Addend;
5640      if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5641
5642        // Only allow these with ADDXri.
5643        if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5644             DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5645            Inst.getOpcode() == AArch64::ADDXri)
5646          return false;
5647
5648        // Only allow these with ADDXri/ADDWri
5649        if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5650             ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5651             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5652             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5653             ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5654             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5655             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5656             ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5657             ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5658             ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5659            (Inst.getOpcode() == AArch64::ADDXri ||
5660             Inst.getOpcode() == AArch64::ADDWri))
5661          return false;
5662
5663        // Don't allow symbol refs in the immediate field otherwise
5664        // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5665        // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5666        // 'cmp w0, 'borked')
5667        return Error(Loc.back(), "invalid immediate expression");
5668      }
5669      // We don't validate more complex expressions here
5670    }
5671    return false;
5672  }
5673  default:
5674    return false;
5675  }
5676}
5677
5678static std::string AArch64MnemonicSpellCheck(StringRef S,
5679                                             const FeatureBitset &FBS,
5680                                             unsigned VariantID = 0);
5681
5682bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5683                                      uint64_t ErrorInfo,
5684                                      OperandVector &Operands) {
5685  switch (ErrCode) {
5686  case Match_InvalidTiedOperand: {
5687    auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5688    if (Op.isVectorList())
5689      return Error(Loc, "operand must match destination register list");
5690
5691    assert(Op.isReg() && "Unexpected operand type");
5692    switch (Op.getRegEqualityTy()) {
5693    case RegConstraintEqualityTy::EqualsSubReg:
5694      return Error(Loc, "operand must be 64-bit form of destination register");
5695    case RegConstraintEqualityTy::EqualsSuperReg:
5696      return Error(Loc, "operand must be 32-bit form of destination register");
5697    case RegConstraintEqualityTy::EqualsReg:
5698      return Error(Loc, "operand must match destination register");
5699    }
5700    llvm_unreachable("Unknown RegConstraintEqualityTy");
5701  }
5702  case Match_MissingFeature:
5703    return Error(Loc,
5704                 "instruction requires a CPU feature not currently enabled");
5705  case Match_InvalidOperand:
5706    return Error(Loc, "invalid operand for instruction");
5707  case Match_InvalidSuffix:
5708    return Error(Loc, "invalid type suffix for instruction");
5709  case Match_InvalidCondCode:
5710    return Error(Loc, "expected AArch64 condition code");
5711  case Match_AddSubRegExtendSmall:
5712    return Error(Loc,
5713      "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5714  case Match_AddSubRegExtendLarge:
5715    return Error(Loc,
5716      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5717  case Match_AddSubSecondSource:
5718    return Error(Loc,
5719      "expected compatible register, symbol or integer in range [0, 4095]");
5720  case Match_LogicalSecondSource:
5721    return Error(Loc, "expected compatible register or logical immediate");
5722  case Match_InvalidMovImm32Shift:
5723    return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5724  case Match_InvalidMovImm64Shift:
5725    return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5726  case Match_AddSubRegShift32:
5727    return Error(Loc,
5728       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5729  case Match_AddSubRegShift64:
5730    return Error(Loc,
5731       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5732  case Match_InvalidFPImm:
5733    return Error(Loc,
5734                 "expected compatible register or floating-point constant");
5735  case Match_InvalidMemoryIndexedSImm6:
5736    return Error(Loc, "index must be an integer in range [-32, 31].");
5737  case Match_InvalidMemoryIndexedSImm5:
5738    return Error(Loc, "index must be an integer in range [-16, 15].");
5739  case Match_InvalidMemoryIndexed1SImm4:
5740    return Error(Loc, "index must be an integer in range [-8, 7].");
5741  case Match_InvalidMemoryIndexed2SImm4:
5742    return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5743  case Match_InvalidMemoryIndexed3SImm4:
5744    return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5745  case Match_InvalidMemoryIndexed4SImm4:
5746    return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5747  case Match_InvalidMemoryIndexed16SImm4:
5748    return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5749  case Match_InvalidMemoryIndexed32SImm4:
5750    return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5751  case Match_InvalidMemoryIndexed1SImm6:
5752    return Error(Loc, "index must be an integer in range [-32, 31].");
5753  case Match_InvalidMemoryIndexedSImm8:
5754    return Error(Loc, "index must be an integer in range [-128, 127].");
5755  case Match_InvalidMemoryIndexedSImm9:
5756    return Error(Loc, "index must be an integer in range [-256, 255].");
5757  case Match_InvalidMemoryIndexed16SImm9:
5758    return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5759  case Match_InvalidMemoryIndexed8SImm10:
5760    return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5761  case Match_InvalidMemoryIndexed4SImm7:
5762    return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5763  case Match_InvalidMemoryIndexed8SImm7:
5764    return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5765  case Match_InvalidMemoryIndexed16SImm7:
5766    return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5767  case Match_InvalidMemoryIndexed8UImm5:
5768    return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5769  case Match_InvalidMemoryIndexed8UImm3:
5770    return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5771  case Match_InvalidMemoryIndexed4UImm5:
5772    return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5773  case Match_InvalidMemoryIndexed2UImm5:
5774    return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5775  case Match_InvalidMemoryIndexed8UImm6:
5776    return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5777  case Match_InvalidMemoryIndexed16UImm6:
5778    return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5779  case Match_InvalidMemoryIndexed4UImm6:
5780    return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5781  case Match_InvalidMemoryIndexed2UImm6:
5782    return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5783  case Match_InvalidMemoryIndexed1UImm6:
5784    return Error(Loc, "index must be in range [0, 63].");
5785  case Match_InvalidMemoryWExtend8:
5786    return Error(Loc,
5787                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5788  case Match_InvalidMemoryWExtend16:
5789    return Error(Loc,
5790                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5791  case Match_InvalidMemoryWExtend32:
5792    return Error(Loc,
5793                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5794  case Match_InvalidMemoryWExtend64:
5795    return Error(Loc,
5796                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5797  case Match_InvalidMemoryWExtend128:
5798    return Error(Loc,
5799                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5800  case Match_InvalidMemoryXExtend8:
5801    return Error(Loc,
5802                 "expected 'lsl' or 'sxtx' with optional shift of #0");
5803  case Match_InvalidMemoryXExtend16:
5804    return Error(Loc,
5805                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5806  case Match_InvalidMemoryXExtend32:
5807    return Error(Loc,
5808                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5809  case Match_InvalidMemoryXExtend64:
5810    return Error(Loc,
5811                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5812  case Match_InvalidMemoryXExtend128:
5813    return Error(Loc,
5814                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5815  case Match_InvalidMemoryIndexed1:
5816    return Error(Loc, "index must be an integer in range [0, 4095].");
5817  case Match_InvalidMemoryIndexed2:
5818    return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5819  case Match_InvalidMemoryIndexed4:
5820    return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5821  case Match_InvalidMemoryIndexed8:
5822    return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5823  case Match_InvalidMemoryIndexed16:
5824    return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5825  case Match_InvalidImm0_0:
5826    return Error(Loc, "immediate must be 0.");
5827  case Match_InvalidImm0_1:
5828    return Error(Loc, "immediate must be an integer in range [0, 1].");
5829  case Match_InvalidImm0_3:
5830    return Error(Loc, "immediate must be an integer in range [0, 3].");
5831  case Match_InvalidImm0_7:
5832    return Error(Loc, "immediate must be an integer in range [0, 7].");
5833  case Match_InvalidImm0_15:
5834    return Error(Loc, "immediate must be an integer in range [0, 15].");
5835  case Match_InvalidImm0_31:
5836    return Error(Loc, "immediate must be an integer in range [0, 31].");
5837  case Match_InvalidImm0_63:
5838    return Error(Loc, "immediate must be an integer in range [0, 63].");
5839  case Match_InvalidImm0_127:
5840    return Error(Loc, "immediate must be an integer in range [0, 127].");
5841  case Match_InvalidImm0_255:
5842    return Error(Loc, "immediate must be an integer in range [0, 255].");
5843  case Match_InvalidImm0_65535:
5844    return Error(Loc, "immediate must be an integer in range [0, 65535].");
5845  case Match_InvalidImm1_8:
5846    return Error(Loc, "immediate must be an integer in range [1, 8].");
5847  case Match_InvalidImm1_16:
5848    return Error(Loc, "immediate must be an integer in range [1, 16].");
5849  case Match_InvalidImm1_32:
5850    return Error(Loc, "immediate must be an integer in range [1, 32].");
5851  case Match_InvalidImm1_64:
5852    return Error(Loc, "immediate must be an integer in range [1, 64].");
5853  case Match_InvalidMemoryIndexedRange2UImm0:
5854    return Error(Loc, "vector select offset must be the immediate range 0:1.");
5855  case Match_InvalidMemoryIndexedRange2UImm1:
5856    return Error(Loc, "vector select offset must be an immediate range of the "
5857                      "form <immf>:<imml>, where the first "
5858                      "immediate is a multiple of 2 in the range [0, 2], and "
5859                      "the second immediate is immf + 1.");
5860  case Match_InvalidMemoryIndexedRange2UImm2:
5861  case Match_InvalidMemoryIndexedRange2UImm3:
5862    return Error(
5863        Loc,
5864        "vector select offset must be an immediate range of the form "
5865        "<immf>:<imml>, "
5866        "where the first immediate is a multiple of 2 in the range [0, 6] or "
5867        "[0, 14] "
5868        "depending on the instruction, and the second immediate is immf + 1.");
5869  case Match_InvalidMemoryIndexedRange4UImm0:
5870    return Error(Loc, "vector select offset must be the immediate range 0:3.");
5871  case Match_InvalidMemoryIndexedRange4UImm1:
5872  case Match_InvalidMemoryIndexedRange4UImm2:
5873    return Error(
5874        Loc,
5875        "vector select offset must be an immediate range of the form "
5876        "<immf>:<imml>, "
5877        "where the first immediate is a multiple of 4 in the range [0, 4] or "
5878        "[0, 12] "
5879        "depending on the instruction, and the second immediate is immf + 3.");
5880  case Match_InvalidSVEAddSubImm8:
5881    return Error(Loc, "immediate must be an integer in range [0, 255]"
5882                      " with a shift amount of 0");
5883  case Match_InvalidSVEAddSubImm16:
5884  case Match_InvalidSVEAddSubImm32:
5885  case Match_InvalidSVEAddSubImm64:
5886    return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5887                      "multiple of 256 in range [256, 65280]");
5888  case Match_InvalidSVECpyImm8:
5889    return Error(Loc, "immediate must be an integer in range [-128, 255]"
5890                      " with a shift amount of 0");
5891  case Match_InvalidSVECpyImm16:
5892    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5893                      "multiple of 256 in range [-32768, 65280]");
5894  case Match_InvalidSVECpyImm32:
5895  case Match_InvalidSVECpyImm64:
5896    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5897                      "multiple of 256 in range [-32768, 32512]");
5898  case Match_InvalidIndexRange0_0:
5899    return Error(Loc, "expected lane specifier '[0]'");
5900  case Match_InvalidIndexRange1_1:
5901    return Error(Loc, "expected lane specifier '[1]'");
5902  case Match_InvalidIndexRange0_15:
5903    return Error(Loc, "vector lane must be an integer in range [0, 15].");
5904  case Match_InvalidIndexRange0_7:
5905    return Error(Loc, "vector lane must be an integer in range [0, 7].");
5906  case Match_InvalidIndexRange0_3:
5907    return Error(Loc, "vector lane must be an integer in range [0, 3].");
5908  case Match_InvalidIndexRange0_1:
5909    return Error(Loc, "vector lane must be an integer in range [0, 1].");
5910  case Match_InvalidSVEIndexRange0_63:
5911    return Error(Loc, "vector lane must be an integer in range [0, 63].");
5912  case Match_InvalidSVEIndexRange0_31:
5913    return Error(Loc, "vector lane must be an integer in range [0, 31].");
5914  case Match_InvalidSVEIndexRange0_15:
5915    return Error(Loc, "vector lane must be an integer in range [0, 15].");
5916  case Match_InvalidSVEIndexRange0_7:
5917    return Error(Loc, "vector lane must be an integer in range [0, 7].");
5918  case Match_InvalidSVEIndexRange0_3:
5919    return Error(Loc, "vector lane must be an integer in range [0, 3].");
5920  case Match_InvalidLabel:
5921    return Error(Loc, "expected label or encodable integer pc offset");
5922  case Match_MRS:
5923    return Error(Loc, "expected readable system register");
5924  case Match_MSR:
5925  case Match_InvalidSVCR:
5926    return Error(Loc, "expected writable system register or pstate");
5927  case Match_InvalidComplexRotationEven:
5928    return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5929  case Match_InvalidComplexRotationOdd:
5930    return Error(Loc, "complex rotation must be 90 or 270.");
5931  case Match_MnemonicFail: {
5932    std::string Suggestion = AArch64MnemonicSpellCheck(
5933        ((AArch64Operand &)*Operands[0]).getToken(),
5934        ComputeAvailableFeatures(STI->getFeatureBits()));
5935    return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5936  }
5937  case Match_InvalidGPR64shifted8:
5938    return Error(Loc, "register must be x0..x30 or xzr, without shift");
5939  case Match_InvalidGPR64shifted16:
5940    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5941  case Match_InvalidGPR64shifted32:
5942    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5943  case Match_InvalidGPR64shifted64:
5944    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5945  case Match_InvalidGPR64shifted128:
5946    return Error(
5947        Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5948  case Match_InvalidGPR64NoXZRshifted8:
5949    return Error(Loc, "register must be x0..x30 without shift");
5950  case Match_InvalidGPR64NoXZRshifted16:
5951    return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5952  case Match_InvalidGPR64NoXZRshifted32:
5953    return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5954  case Match_InvalidGPR64NoXZRshifted64:
5955    return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5956  case Match_InvalidGPR64NoXZRshifted128:
5957    return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5958  case Match_InvalidZPR32UXTW8:
5959  case Match_InvalidZPR32SXTW8:
5960    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5961  case Match_InvalidZPR32UXTW16:
5962  case Match_InvalidZPR32SXTW16:
5963    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5964  case Match_InvalidZPR32UXTW32:
5965  case Match_InvalidZPR32SXTW32:
5966    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5967  case Match_InvalidZPR32UXTW64:
5968  case Match_InvalidZPR32SXTW64:
5969    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5970  case Match_InvalidZPR64UXTW8:
5971  case Match_InvalidZPR64SXTW8:
5972    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5973  case Match_InvalidZPR64UXTW16:
5974  case Match_InvalidZPR64SXTW16:
5975    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5976  case Match_InvalidZPR64UXTW32:
5977  case Match_InvalidZPR64SXTW32:
5978    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5979  case Match_InvalidZPR64UXTW64:
5980  case Match_InvalidZPR64SXTW64:
5981    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5982  case Match_InvalidZPR32LSL8:
5983    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5984  case Match_InvalidZPR32LSL16:
5985    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5986  case Match_InvalidZPR32LSL32:
5987    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5988  case Match_InvalidZPR32LSL64:
5989    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5990  case Match_InvalidZPR64LSL8:
5991    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5992  case Match_InvalidZPR64LSL16:
5993    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5994  case Match_InvalidZPR64LSL32:
5995    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5996  case Match_InvalidZPR64LSL64:
5997    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5998  case Match_InvalidZPR0:
5999    return Error(Loc, "expected register without element width suffix");
6000  case Match_InvalidZPR8:
6001  case Match_InvalidZPR16:
6002  case Match_InvalidZPR32:
6003  case Match_InvalidZPR64:
6004  case Match_InvalidZPR128:
6005    return Error(Loc, "invalid element width");
6006  case Match_InvalidZPR_3b8:
6007    return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6008  case Match_InvalidZPR_3b16:
6009    return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6010  case Match_InvalidZPR_3b32:
6011    return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6012  case Match_InvalidZPR_4b8:
6013    return Error(Loc,
6014                 "Invalid restricted vector register, expected z0.b..z15.b");
6015  case Match_InvalidZPR_4b16:
6016    return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6017  case Match_InvalidZPR_4b32:
6018    return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6019  case Match_InvalidZPR_4b64:
6020    return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6021  case Match_InvalidSVEPattern:
6022    return Error(Loc, "invalid predicate pattern");
6023  case Match_InvalidSVEPredicateAnyReg:
6024  case Match_InvalidSVEPredicateBReg:
6025  case Match_InvalidSVEPredicateHReg:
6026  case Match_InvalidSVEPredicateSReg:
6027  case Match_InvalidSVEPredicateDReg:
6028    return Error(Loc, "invalid predicate register.");
6029  case Match_InvalidSVEPredicate3bAnyReg:
6030    return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6031  case Match_InvalidSVEPNPredicateB_p8to15Reg:
6032  case Match_InvalidSVEPNPredicateH_p8to15Reg:
6033  case Match_InvalidSVEPNPredicateS_p8to15Reg:
6034  case Match_InvalidSVEPNPredicateD_p8to15Reg:
6035    return Error(Loc, "Invalid predicate register, expected PN in range "
6036                      "pn8..pn15 with element suffix.");
6037  case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6038    return Error(Loc, "invalid restricted predicate-as-counter register "
6039                      "expected pn8..pn15");
6040  case Match_InvalidSVEPNPredicateBReg:
6041  case Match_InvalidSVEPNPredicateHReg:
6042  case Match_InvalidSVEPNPredicateSReg:
6043  case Match_InvalidSVEPNPredicateDReg:
6044    return Error(Loc, "Invalid predicate register, expected PN in range "
6045                      "pn0..pn15 with element suffix.");
6046  case Match_InvalidSVEVecLenSpecifier:
6047    return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6048  case Match_InvalidSVEPredicateListMul2x8:
6049  case Match_InvalidSVEPredicateListMul2x16:
6050  case Match_InvalidSVEPredicateListMul2x32:
6051  case Match_InvalidSVEPredicateListMul2x64:
6052    return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6053                      "predicate registers, where the first vector is a multiple of 2 "
6054                      "and with correct element type");
6055  case Match_InvalidSVEExactFPImmOperandHalfOne:
6056    return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6057  case Match_InvalidSVEExactFPImmOperandHalfTwo:
6058    return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6059  case Match_InvalidSVEExactFPImmOperandZeroOne:
6060    return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6061  case Match_InvalidMatrixTileVectorH8:
6062  case Match_InvalidMatrixTileVectorV8:
6063    return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6064  case Match_InvalidMatrixTileVectorH16:
6065  case Match_InvalidMatrixTileVectorV16:
6066    return Error(Loc,
6067                 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6068  case Match_InvalidMatrixTileVectorH32:
6069  case Match_InvalidMatrixTileVectorV32:
6070    return Error(Loc,
6071                 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6072  case Match_InvalidMatrixTileVectorH64:
6073  case Match_InvalidMatrixTileVectorV64:
6074    return Error(Loc,
6075                 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6076  case Match_InvalidMatrixTileVectorH128:
6077  case Match_InvalidMatrixTileVectorV128:
6078    return Error(Loc,
6079                 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6080  case Match_InvalidMatrixTile32:
6081    return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6082  case Match_InvalidMatrixTile64:
6083    return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6084  case Match_InvalidMatrix:
6085    return Error(Loc, "invalid matrix operand, expected za");
6086  case Match_InvalidMatrix8:
6087    return Error(Loc, "invalid matrix operand, expected suffix .b");
6088  case Match_InvalidMatrix16:
6089    return Error(Loc, "invalid matrix operand, expected suffix .h");
6090  case Match_InvalidMatrix32:
6091    return Error(Loc, "invalid matrix operand, expected suffix .s");
6092  case Match_InvalidMatrix64:
6093    return Error(Loc, "invalid matrix operand, expected suffix .d");
6094  case Match_InvalidMatrixIndexGPR32_12_15:
6095    return Error(Loc, "operand must be a register in range [w12, w15]");
6096  case Match_InvalidMatrixIndexGPR32_8_11:
6097    return Error(Loc, "operand must be a register in range [w8, w11]");
6098  case Match_InvalidSVEVectorListMul2x8:
6099  case Match_InvalidSVEVectorListMul2x16:
6100  case Match_InvalidSVEVectorListMul2x32:
6101  case Match_InvalidSVEVectorListMul2x64:
6102    return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6103                      "SVE vectors, where the first vector is a multiple of 2 "
6104                      "and with matching element types");
6105  case Match_InvalidSVEVectorListMul4x8:
6106  case Match_InvalidSVEVectorListMul4x16:
6107  case Match_InvalidSVEVectorListMul4x32:
6108  case Match_InvalidSVEVectorListMul4x64:
6109    return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6110                      "SVE vectors, where the first vector is a multiple of 4 "
6111                      "and with matching element types");
6112  case Match_InvalidLookupTable:
6113    return Error(Loc, "Invalid lookup table, expected zt0");
6114  case Match_InvalidSVEVectorListStrided2x8:
6115  case Match_InvalidSVEVectorListStrided2x16:
6116  case Match_InvalidSVEVectorListStrided2x32:
6117  case Match_InvalidSVEVectorListStrided2x64:
6118    return Error(
6119        Loc,
6120        "Invalid vector list, expected list with each SVE vector in the list "
6121        "8 registers apart, and the first register in the range [z0, z7] or "
6122        "[z16, z23] and with correct element type");
6123  case Match_InvalidSVEVectorListStrided4x8:
6124  case Match_InvalidSVEVectorListStrided4x16:
6125  case Match_InvalidSVEVectorListStrided4x32:
6126  case Match_InvalidSVEVectorListStrided4x64:
6127    return Error(
6128        Loc,
6129        "Invalid vector list, expected list with each SVE vector in the list "
6130        "4 registers apart, and the first register in the range [z0, z3] or "
6131        "[z16, z19] and with correct element type");
6132  default:
6133    llvm_unreachable("unexpected error code!");
6134  }
6135}
6136
6137static const char *getSubtargetFeatureName(uint64_t Val);
6138
6139bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6140                                               OperandVector &Operands,
6141                                               MCStreamer &Out,
6142                                               uint64_t &ErrorInfo,
6143                                               bool MatchingInlineAsm) {
6144  assert(!Operands.empty() && "Unexpect empty operand list!");
6145  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6146  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6147
6148  StringRef Tok = Op.getToken();
6149  unsigned NumOperands = Operands.size();
6150
6151  if (NumOperands == 4 && Tok == "lsl") {
6152    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6153    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6154    if (Op2.isScalarReg() && Op3.isImm()) {
6155      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6156      if (Op3CE) {
6157        uint64_t Op3Val = Op3CE->getValue();
6158        uint64_t NewOp3Val = 0;
6159        uint64_t NewOp4Val = 0;
6160        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6161                Op2.getReg())) {
6162          NewOp3Val = (32 - Op3Val) & 0x1f;
6163          NewOp4Val = 31 - Op3Val;
6164        } else {
6165          NewOp3Val = (64 - Op3Val) & 0x3f;
6166          NewOp4Val = 63 - Op3Val;
6167        }
6168
6169        const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6170        const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6171
6172        Operands[0] =
6173            AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6174        Operands.push_back(AArch64Operand::CreateImm(
6175            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6176        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6177                                                Op3.getEndLoc(), getContext());
6178      }
6179    }
6180  } else if (NumOperands == 4 && Tok == "bfc") {
6181    // FIXME: Horrible hack to handle BFC->BFM alias.
6182    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6183    AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6184    AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6185
6186    if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6187      const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6188      const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6189
6190      if (LSBCE && WidthCE) {
6191        uint64_t LSB = LSBCE->getValue();
6192        uint64_t Width = WidthCE->getValue();
6193
6194        uint64_t RegWidth = 0;
6195        if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6196                Op1.getReg()))
6197          RegWidth = 64;
6198        else
6199          RegWidth = 32;
6200
6201        if (LSB >= RegWidth)
6202          return Error(LSBOp.getStartLoc(),
6203                       "expected integer in range [0, 31]");
6204        if (Width < 1 || Width > RegWidth)
6205          return Error(WidthOp.getStartLoc(),
6206                       "expected integer in range [1, 32]");
6207
6208        uint64_t ImmR = 0;
6209        if (RegWidth == 32)
6210          ImmR = (32 - LSB) & 0x1f;
6211        else
6212          ImmR = (64 - LSB) & 0x3f;
6213
6214        uint64_t ImmS = Width - 1;
6215
6216        if (ImmR != 0 && ImmS >= ImmR)
6217          return Error(WidthOp.getStartLoc(),
6218                       "requested insert overflows register");
6219
6220        const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6221        const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6222        Operands[0] =
6223            AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6224        Operands[2] = AArch64Operand::CreateReg(
6225            RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6226            SMLoc(), SMLoc(), getContext());
6227        Operands[3] = AArch64Operand::CreateImm(
6228            ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6229        Operands.emplace_back(
6230            AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6231                                      WidthOp.getEndLoc(), getContext()));
6232      }
6233    }
6234  } else if (NumOperands == 5) {
6235    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6236    // UBFIZ -> UBFM aliases.
6237    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6238      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6239      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6240      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6241
6242      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6243        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6244        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6245
6246        if (Op3CE && Op4CE) {
6247          uint64_t Op3Val = Op3CE->getValue();
6248          uint64_t Op4Val = Op4CE->getValue();
6249
6250          uint64_t RegWidth = 0;
6251          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6252                  Op1.getReg()))
6253            RegWidth = 64;
6254          else
6255            RegWidth = 32;
6256
6257          if (Op3Val >= RegWidth)
6258            return Error(Op3.getStartLoc(),
6259                         "expected integer in range [0, 31]");
6260          if (Op4Val < 1 || Op4Val > RegWidth)
6261            return Error(Op4.getStartLoc(),
6262                         "expected integer in range [1, 32]");
6263
6264          uint64_t NewOp3Val = 0;
6265          if (RegWidth == 32)
6266            NewOp3Val = (32 - Op3Val) & 0x1f;
6267          else
6268            NewOp3Val = (64 - Op3Val) & 0x3f;
6269
6270          uint64_t NewOp4Val = Op4Val - 1;
6271
6272          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6273            return Error(Op4.getStartLoc(),
6274                         "requested insert overflows register");
6275
6276          const MCExpr *NewOp3 =
6277              MCConstantExpr::create(NewOp3Val, getContext());
6278          const MCExpr *NewOp4 =
6279              MCConstantExpr::create(NewOp4Val, getContext());
6280          Operands[3] = AArch64Operand::CreateImm(
6281              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6282          Operands[4] = AArch64Operand::CreateImm(
6283              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6284          if (Tok == "bfi")
6285            Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6286                                                      getContext());
6287          else if (Tok == "sbfiz")
6288            Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6289                                                      getContext());
6290          else if (Tok == "ubfiz")
6291            Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6292                                                      getContext());
6293          else
6294            llvm_unreachable("No valid mnemonic for alias?");
6295        }
6296      }
6297
6298      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6299      // UBFX -> UBFM aliases.
6300    } else if (NumOperands == 5 &&
6301               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6302      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6303      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6304      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6305
6306      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6307        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6308        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6309
6310        if (Op3CE && Op4CE) {
6311          uint64_t Op3Val = Op3CE->getValue();
6312          uint64_t Op4Val = Op4CE->getValue();
6313
6314          uint64_t RegWidth = 0;
6315          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6316                  Op1.getReg()))
6317            RegWidth = 64;
6318          else
6319            RegWidth = 32;
6320
6321          if (Op3Val >= RegWidth)
6322            return Error(Op3.getStartLoc(),
6323                         "expected integer in range [0, 31]");
6324          if (Op4Val < 1 || Op4Val > RegWidth)
6325            return Error(Op4.getStartLoc(),
6326                         "expected integer in range [1, 32]");
6327
6328          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6329
6330          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6331            return Error(Op4.getStartLoc(),
6332                         "requested extract overflows register");
6333
6334          const MCExpr *NewOp4 =
6335              MCConstantExpr::create(NewOp4Val, getContext());
6336          Operands[4] = AArch64Operand::CreateImm(
6337              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6338          if (Tok == "bfxil")
6339            Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6340                                                      getContext());
6341          else if (Tok == "sbfx")
6342            Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6343                                                      getContext());
6344          else if (Tok == "ubfx")
6345            Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6346                                                      getContext());
6347          else
6348            llvm_unreachable("No valid mnemonic for alias?");
6349        }
6350      }
6351    }
6352  }
6353
6354  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6355  // instruction for FP registers correctly in some rare circumstances. Convert
6356  // it to a safe instruction and warn (because silently changing someone's
6357  // assembly is rude).
6358  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
6359      NumOperands == 4 && Tok == "movi") {
6360    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6361    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6362    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6363    if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6364        (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6365      StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6366      if (Suffix.lower() == ".2d" &&
6367          cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6368        Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6369                " correctly on this CPU, converting to equivalent movi.16b");
6370        // Switch the suffix to .16b.
6371        unsigned Idx = Op1.isToken() ? 1 : 2;
6372        Operands[Idx] =
6373            AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6374      }
6375    }
6376  }
6377
6378  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6379  //        InstAlias can't quite handle this since the reg classes aren't
6380  //        subclasses.
6381  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6382    // The source register can be Wn here, but the matcher expects a
6383    // GPR64. Twiddle it here if necessary.
6384    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6385    if (Op.isScalarReg()) {
6386      unsigned Reg = getXRegFromWReg(Op.getReg());
6387      Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6388                                              Op.getStartLoc(), Op.getEndLoc(),
6389                                              getContext());
6390    }
6391  }
6392  // FIXME: Likewise for sxt[bh] with a Xd dst operand
6393  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6394    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6395    if (Op.isScalarReg() &&
6396        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6397            Op.getReg())) {
6398      // The source register can be Wn here, but the matcher expects a
6399      // GPR64. Twiddle it here if necessary.
6400      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6401      if (Op.isScalarReg()) {
6402        unsigned Reg = getXRegFromWReg(Op.getReg());
6403        Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6404                                                Op.getStartLoc(),
6405                                                Op.getEndLoc(), getContext());
6406      }
6407    }
6408  }
6409  // FIXME: Likewise for uxt[bh] with a Xd dst operand
6410  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6411    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6412    if (Op.isScalarReg() &&
6413        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6414            Op.getReg())) {
6415      // The source register can be Wn here, but the matcher expects a
6416      // GPR32. Twiddle it here if necessary.
6417      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6418      if (Op.isScalarReg()) {
6419        unsigned Reg = getWRegFromXReg(Op.getReg());
6420        Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6421                                                Op.getStartLoc(),
6422                                                Op.getEndLoc(), getContext());
6423      }
6424    }
6425  }
6426
6427  MCInst Inst;
6428  FeatureBitset MissingFeatures;
6429  // First try to match against the secondary set of tables containing the
6430  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6431  unsigned MatchResult =
6432      MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6433                           MatchingInlineAsm, 1);
6434
6435  // If that fails, try against the alternate table containing long-form NEON:
6436  // "fadd v0.2s, v1.2s, v2.2s"
6437  if (MatchResult != Match_Success) {
6438    // But first, save the short-form match result: we can use it in case the
6439    // long-form match also fails.
6440    auto ShortFormNEONErrorInfo = ErrorInfo;
6441    auto ShortFormNEONMatchResult = MatchResult;
6442    auto ShortFormNEONMissingFeatures = MissingFeatures;
6443
6444    MatchResult =
6445        MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6446                             MatchingInlineAsm, 0);
6447
6448    // Now, both matches failed, and the long-form match failed on the mnemonic
6449    // suffix token operand.  The short-form match failure is probably more
6450    // relevant: use it instead.
6451    if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6452        Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6453        ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6454      MatchResult = ShortFormNEONMatchResult;
6455      ErrorInfo = ShortFormNEONErrorInfo;
6456      MissingFeatures = ShortFormNEONMissingFeatures;
6457    }
6458  }
6459
6460  switch (MatchResult) {
6461  case Match_Success: {
6462    // Perform range checking and other semantic validations
6463    SmallVector<SMLoc, 8> OperandLocs;
6464    NumOperands = Operands.size();
6465    for (unsigned i = 1; i < NumOperands; ++i)
6466      OperandLocs.push_back(Operands[i]->getStartLoc());
6467    if (validateInstruction(Inst, IDLoc, OperandLocs))
6468      return true;
6469
6470    Inst.setLoc(IDLoc);
6471    Out.emitInstruction(Inst, getSTI());
6472    return false;
6473  }
6474  case Match_MissingFeature: {
6475    assert(MissingFeatures.any() && "Unknown missing feature!");
6476    // Special case the error message for the very common case where only
6477    // a single subtarget feature is missing (neon, e.g.).
6478    std::string Msg = "instruction requires:";
6479    for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6480      if (MissingFeatures[i]) {
6481        Msg += " ";
6482        Msg += getSubtargetFeatureName(i);
6483      }
6484    }
6485    return Error(IDLoc, Msg);
6486  }
6487  case Match_MnemonicFail:
6488    return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6489  case Match_InvalidOperand: {
6490    SMLoc ErrorLoc = IDLoc;
6491
6492    if (ErrorInfo != ~0ULL) {
6493      if (ErrorInfo >= Operands.size())
6494        return Error(IDLoc, "too few operands for instruction",
6495                     SMRange(IDLoc, getTok().getLoc()));
6496
6497      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6498      if (ErrorLoc == SMLoc())
6499        ErrorLoc = IDLoc;
6500    }
6501    // If the match failed on a suffix token operand, tweak the diagnostic
6502    // accordingly.
6503    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6504        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6505      MatchResult = Match_InvalidSuffix;
6506
6507    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6508  }
6509  case Match_InvalidTiedOperand:
6510  case Match_InvalidMemoryIndexed1:
6511  case Match_InvalidMemoryIndexed2:
6512  case Match_InvalidMemoryIndexed4:
6513  case Match_InvalidMemoryIndexed8:
6514  case Match_InvalidMemoryIndexed16:
6515  case Match_InvalidCondCode:
6516  case Match_AddSubRegExtendSmall:
6517  case Match_AddSubRegExtendLarge:
6518  case Match_AddSubSecondSource:
6519  case Match_LogicalSecondSource:
6520  case Match_AddSubRegShift32:
6521  case Match_AddSubRegShift64:
6522  case Match_InvalidMovImm32Shift:
6523  case Match_InvalidMovImm64Shift:
6524  case Match_InvalidFPImm:
6525  case Match_InvalidMemoryWExtend8:
6526  case Match_InvalidMemoryWExtend16:
6527  case Match_InvalidMemoryWExtend32:
6528  case Match_InvalidMemoryWExtend64:
6529  case Match_InvalidMemoryWExtend128:
6530  case Match_InvalidMemoryXExtend8:
6531  case Match_InvalidMemoryXExtend16:
6532  case Match_InvalidMemoryXExtend32:
6533  case Match_InvalidMemoryXExtend64:
6534  case Match_InvalidMemoryXExtend128:
6535  case Match_InvalidMemoryIndexed1SImm4:
6536  case Match_InvalidMemoryIndexed2SImm4:
6537  case Match_InvalidMemoryIndexed3SImm4:
6538  case Match_InvalidMemoryIndexed4SImm4:
6539  case Match_InvalidMemoryIndexed1SImm6:
6540  case Match_InvalidMemoryIndexed16SImm4:
6541  case Match_InvalidMemoryIndexed32SImm4:
6542  case Match_InvalidMemoryIndexed4SImm7:
6543  case Match_InvalidMemoryIndexed8SImm7:
6544  case Match_InvalidMemoryIndexed16SImm7:
6545  case Match_InvalidMemoryIndexed8UImm5:
6546  case Match_InvalidMemoryIndexed8UImm3:
6547  case Match_InvalidMemoryIndexed4UImm5:
6548  case Match_InvalidMemoryIndexed2UImm5:
6549  case Match_InvalidMemoryIndexed1UImm6:
6550  case Match_InvalidMemoryIndexed2UImm6:
6551  case Match_InvalidMemoryIndexed4UImm6:
6552  case Match_InvalidMemoryIndexed8UImm6:
6553  case Match_InvalidMemoryIndexed16UImm6:
6554  case Match_InvalidMemoryIndexedSImm6:
6555  case Match_InvalidMemoryIndexedSImm5:
6556  case Match_InvalidMemoryIndexedSImm8:
6557  case Match_InvalidMemoryIndexedSImm9:
6558  case Match_InvalidMemoryIndexed16SImm9:
6559  case Match_InvalidMemoryIndexed8SImm10:
6560  case Match_InvalidImm0_0:
6561  case Match_InvalidImm0_1:
6562  case Match_InvalidImm0_3:
6563  case Match_InvalidImm0_7:
6564  case Match_InvalidImm0_15:
6565  case Match_InvalidImm0_31:
6566  case Match_InvalidImm0_63:
6567  case Match_InvalidImm0_127:
6568  case Match_InvalidImm0_255:
6569  case Match_InvalidImm0_65535:
6570  case Match_InvalidImm1_8:
6571  case Match_InvalidImm1_16:
6572  case Match_InvalidImm1_32:
6573  case Match_InvalidImm1_64:
6574  case Match_InvalidMemoryIndexedRange2UImm0:
6575  case Match_InvalidMemoryIndexedRange2UImm1:
6576  case Match_InvalidMemoryIndexedRange2UImm2:
6577  case Match_InvalidMemoryIndexedRange2UImm3:
6578  case Match_InvalidMemoryIndexedRange4UImm0:
6579  case Match_InvalidMemoryIndexedRange4UImm1:
6580  case Match_InvalidMemoryIndexedRange4UImm2:
6581  case Match_InvalidSVEAddSubImm8:
6582  case Match_InvalidSVEAddSubImm16:
6583  case Match_InvalidSVEAddSubImm32:
6584  case Match_InvalidSVEAddSubImm64:
6585  case Match_InvalidSVECpyImm8:
6586  case Match_InvalidSVECpyImm16:
6587  case Match_InvalidSVECpyImm32:
6588  case Match_InvalidSVECpyImm64:
6589  case Match_InvalidIndexRange0_0:
6590  case Match_InvalidIndexRange1_1:
6591  case Match_InvalidIndexRange0_15:
6592  case Match_InvalidIndexRange0_7:
6593  case Match_InvalidIndexRange0_3:
6594  case Match_InvalidIndexRange0_1:
6595  case Match_InvalidSVEIndexRange0_63:
6596  case Match_InvalidSVEIndexRange0_31:
6597  case Match_InvalidSVEIndexRange0_15:
6598  case Match_InvalidSVEIndexRange0_7:
6599  case Match_InvalidSVEIndexRange0_3:
6600  case Match_InvalidLabel:
6601  case Match_InvalidComplexRotationEven:
6602  case Match_InvalidComplexRotationOdd:
6603  case Match_InvalidGPR64shifted8:
6604  case Match_InvalidGPR64shifted16:
6605  case Match_InvalidGPR64shifted32:
6606  case Match_InvalidGPR64shifted64:
6607  case Match_InvalidGPR64shifted128:
6608  case Match_InvalidGPR64NoXZRshifted8:
6609  case Match_InvalidGPR64NoXZRshifted16:
6610  case Match_InvalidGPR64NoXZRshifted32:
6611  case Match_InvalidGPR64NoXZRshifted64:
6612  case Match_InvalidGPR64NoXZRshifted128:
6613  case Match_InvalidZPR32UXTW8:
6614  case Match_InvalidZPR32UXTW16:
6615  case Match_InvalidZPR32UXTW32:
6616  case Match_InvalidZPR32UXTW64:
6617  case Match_InvalidZPR32SXTW8:
6618  case Match_InvalidZPR32SXTW16:
6619  case Match_InvalidZPR32SXTW32:
6620  case Match_InvalidZPR32SXTW64:
6621  case Match_InvalidZPR64UXTW8:
6622  case Match_InvalidZPR64SXTW8:
6623  case Match_InvalidZPR64UXTW16:
6624  case Match_InvalidZPR64SXTW16:
6625  case Match_InvalidZPR64UXTW32:
6626  case Match_InvalidZPR64SXTW32:
6627  case Match_InvalidZPR64UXTW64:
6628  case Match_InvalidZPR64SXTW64:
6629  case Match_InvalidZPR32LSL8:
6630  case Match_InvalidZPR32LSL16:
6631  case Match_InvalidZPR32LSL32:
6632  case Match_InvalidZPR32LSL64:
6633  case Match_InvalidZPR64LSL8:
6634  case Match_InvalidZPR64LSL16:
6635  case Match_InvalidZPR64LSL32:
6636  case Match_InvalidZPR64LSL64:
6637  case Match_InvalidZPR0:
6638  case Match_InvalidZPR8:
6639  case Match_InvalidZPR16:
6640  case Match_InvalidZPR32:
6641  case Match_InvalidZPR64:
6642  case Match_InvalidZPR128:
6643  case Match_InvalidZPR_3b8:
6644  case Match_InvalidZPR_3b16:
6645  case Match_InvalidZPR_3b32:
6646  case Match_InvalidZPR_4b8:
6647  case Match_InvalidZPR_4b16:
6648  case Match_InvalidZPR_4b32:
6649  case Match_InvalidZPR_4b64:
6650  case Match_InvalidSVEPredicateAnyReg:
6651  case Match_InvalidSVEPattern:
6652  case Match_InvalidSVEVecLenSpecifier:
6653  case Match_InvalidSVEPredicateBReg:
6654  case Match_InvalidSVEPredicateHReg:
6655  case Match_InvalidSVEPredicateSReg:
6656  case Match_InvalidSVEPredicateDReg:
6657  case Match_InvalidSVEPredicate3bAnyReg:
6658  case Match_InvalidSVEPNPredicateB_p8to15Reg:
6659  case Match_InvalidSVEPNPredicateH_p8to15Reg:
6660  case Match_InvalidSVEPNPredicateS_p8to15Reg:
6661  case Match_InvalidSVEPNPredicateD_p8to15Reg:
6662  case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6663  case Match_InvalidSVEPNPredicateBReg:
6664  case Match_InvalidSVEPNPredicateHReg:
6665  case Match_InvalidSVEPNPredicateSReg:
6666  case Match_InvalidSVEPNPredicateDReg:
6667  case Match_InvalidSVEPredicateListMul2x8:
6668  case Match_InvalidSVEPredicateListMul2x16:
6669  case Match_InvalidSVEPredicateListMul2x32:
6670  case Match_InvalidSVEPredicateListMul2x64:
6671  case Match_InvalidSVEExactFPImmOperandHalfOne:
6672  case Match_InvalidSVEExactFPImmOperandHalfTwo:
6673  case Match_InvalidSVEExactFPImmOperandZeroOne:
6674  case Match_InvalidMatrixTile32:
6675  case Match_InvalidMatrixTile64:
6676  case Match_InvalidMatrix:
6677  case Match_InvalidMatrix8:
6678  case Match_InvalidMatrix16:
6679  case Match_InvalidMatrix32:
6680  case Match_InvalidMatrix64:
6681  case Match_InvalidMatrixTileVectorH8:
6682  case Match_InvalidMatrixTileVectorH16:
6683  case Match_InvalidMatrixTileVectorH32:
6684  case Match_InvalidMatrixTileVectorH64:
6685  case Match_InvalidMatrixTileVectorH128:
6686  case Match_InvalidMatrixTileVectorV8:
6687  case Match_InvalidMatrixTileVectorV16:
6688  case Match_InvalidMatrixTileVectorV32:
6689  case Match_InvalidMatrixTileVectorV64:
6690  case Match_InvalidMatrixTileVectorV128:
6691  case Match_InvalidSVCR:
6692  case Match_InvalidMatrixIndexGPR32_12_15:
6693  case Match_InvalidMatrixIndexGPR32_8_11:
6694  case Match_InvalidLookupTable:
6695  case Match_InvalidSVEVectorListMul2x8:
6696  case Match_InvalidSVEVectorListMul2x16:
6697  case Match_InvalidSVEVectorListMul2x32:
6698  case Match_InvalidSVEVectorListMul2x64:
6699  case Match_InvalidSVEVectorListMul4x8:
6700  case Match_InvalidSVEVectorListMul4x16:
6701  case Match_InvalidSVEVectorListMul4x32:
6702  case Match_InvalidSVEVectorListMul4x64:
6703  case Match_InvalidSVEVectorListStrided2x8:
6704  case Match_InvalidSVEVectorListStrided2x16:
6705  case Match_InvalidSVEVectorListStrided2x32:
6706  case Match_InvalidSVEVectorListStrided2x64:
6707  case Match_InvalidSVEVectorListStrided4x8:
6708  case Match_InvalidSVEVectorListStrided4x16:
6709  case Match_InvalidSVEVectorListStrided4x32:
6710  case Match_InvalidSVEVectorListStrided4x64:
6711  case Match_MSR:
6712  case Match_MRS: {
6713    if (ErrorInfo >= Operands.size())
6714      return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6715    // Any time we get here, there's nothing fancy to do. Just get the
6716    // operand SMLoc and display the diagnostic.
6717    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6718    if (ErrorLoc == SMLoc())
6719      ErrorLoc = IDLoc;
6720    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6721  }
6722  }
6723
6724  llvm_unreachable("Implement any new match types added!");
6725}
6726
6727/// ParseDirective parses the arm specific directives
6728bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6729  const MCContext::Environment Format = getContext().getObjectFileType();
6730  bool IsMachO = Format == MCContext::IsMachO;
6731  bool IsCOFF = Format == MCContext::IsCOFF;
6732
6733  auto IDVal = DirectiveID.getIdentifier().lower();
6734  SMLoc Loc = DirectiveID.getLoc();
6735  if (IDVal == ".arch")
6736    parseDirectiveArch(Loc);
6737  else if (IDVal == ".cpu")
6738    parseDirectiveCPU(Loc);
6739  else if (IDVal == ".tlsdesccall")
6740    parseDirectiveTLSDescCall(Loc);
6741  else if (IDVal == ".ltorg" || IDVal == ".pool")
6742    parseDirectiveLtorg(Loc);
6743  else if (IDVal == ".unreq")
6744    parseDirectiveUnreq(Loc);
6745  else if (IDVal == ".inst")
6746    parseDirectiveInst(Loc);
6747  else if (IDVal == ".cfi_negate_ra_state")
6748    parseDirectiveCFINegateRAState();
6749  else if (IDVal == ".cfi_b_key_frame")
6750    parseDirectiveCFIBKeyFrame();
6751  else if (IDVal == ".cfi_mte_tagged_frame")
6752    parseDirectiveCFIMTETaggedFrame();
6753  else if (IDVal == ".arch_extension")
6754    parseDirectiveArchExtension(Loc);
6755  else if (IDVal == ".variant_pcs")
6756    parseDirectiveVariantPCS(Loc);
6757  else if (IsMachO) {
6758    if (IDVal == MCLOHDirectiveName())
6759      parseDirectiveLOH(IDVal, Loc);
6760    else
6761      return true;
6762  } else if (IsCOFF) {
6763    if (IDVal == ".seh_stackalloc")
6764      parseDirectiveSEHAllocStack(Loc);
6765    else if (IDVal == ".seh_endprologue")
6766      parseDirectiveSEHPrologEnd(Loc);
6767    else if (IDVal == ".seh_save_r19r20_x")
6768      parseDirectiveSEHSaveR19R20X(Loc);
6769    else if (IDVal == ".seh_save_fplr")
6770      parseDirectiveSEHSaveFPLR(Loc);
6771    else if (IDVal == ".seh_save_fplr_x")
6772      parseDirectiveSEHSaveFPLRX(Loc);
6773    else if (IDVal == ".seh_save_reg")
6774      parseDirectiveSEHSaveReg(Loc);
6775    else if (IDVal == ".seh_save_reg_x")
6776      parseDirectiveSEHSaveRegX(Loc);
6777    else if (IDVal == ".seh_save_regp")
6778      parseDirectiveSEHSaveRegP(Loc);
6779    else if (IDVal == ".seh_save_regp_x")
6780      parseDirectiveSEHSaveRegPX(Loc);
6781    else if (IDVal == ".seh_save_lrpair")
6782      parseDirectiveSEHSaveLRPair(Loc);
6783    else if (IDVal == ".seh_save_freg")
6784      parseDirectiveSEHSaveFReg(Loc);
6785    else if (IDVal == ".seh_save_freg_x")
6786      parseDirectiveSEHSaveFRegX(Loc);
6787    else if (IDVal == ".seh_save_fregp")
6788      parseDirectiveSEHSaveFRegP(Loc);
6789    else if (IDVal == ".seh_save_fregp_x")
6790      parseDirectiveSEHSaveFRegPX(Loc);
6791    else if (IDVal == ".seh_set_fp")
6792      parseDirectiveSEHSetFP(Loc);
6793    else if (IDVal == ".seh_add_fp")
6794      parseDirectiveSEHAddFP(Loc);
6795    else if (IDVal == ".seh_nop")
6796      parseDirectiveSEHNop(Loc);
6797    else if (IDVal == ".seh_save_next")
6798      parseDirectiveSEHSaveNext(Loc);
6799    else if (IDVal == ".seh_startepilogue")
6800      parseDirectiveSEHEpilogStart(Loc);
6801    else if (IDVal == ".seh_endepilogue")
6802      parseDirectiveSEHEpilogEnd(Loc);
6803    else if (IDVal == ".seh_trap_frame")
6804      parseDirectiveSEHTrapFrame(Loc);
6805    else if (IDVal == ".seh_pushframe")
6806      parseDirectiveSEHMachineFrame(Loc);
6807    else if (IDVal == ".seh_context")
6808      parseDirectiveSEHContext(Loc);
6809    else if (IDVal == ".seh_clear_unwound_to_call")
6810      parseDirectiveSEHClearUnwoundToCall(Loc);
6811    else if (IDVal == ".seh_pac_sign_lr")
6812      parseDirectiveSEHPACSignLR(Loc);
6813    else if (IDVal == ".seh_save_any_reg")
6814      parseDirectiveSEHSaveAnyReg(Loc, false, false);
6815    else if (IDVal == ".seh_save_any_reg_p")
6816      parseDirectiveSEHSaveAnyReg(Loc, true, false);
6817    else if (IDVal == ".seh_save_any_reg_x")
6818      parseDirectiveSEHSaveAnyReg(Loc, false, true);
6819    else if (IDVal == ".seh_save_any_reg_px")
6820      parseDirectiveSEHSaveAnyReg(Loc, true, true);
6821    else
6822      return true;
6823  } else
6824    return true;
6825  return false;
6826}
6827
6828static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6829                            SmallVector<StringRef, 4> &RequestedExtensions) {
6830  const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6831  const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6832
6833  if (!NoCrypto && Crypto) {
6834    // Map 'generic' (and others) to sha2 and aes, because
6835    // that was the traditional meaning of crypto.
6836    if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6837        ArchInfo == AArch64::ARMV8_3A) {
6838      RequestedExtensions.push_back("sha2");
6839      RequestedExtensions.push_back("aes");
6840    }
6841    if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6842        ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6843        ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6844        ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6845        ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6846        ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6847      RequestedExtensions.push_back("sm4");
6848      RequestedExtensions.push_back("sha3");
6849      RequestedExtensions.push_back("sha2");
6850      RequestedExtensions.push_back("aes");
6851    }
6852  } else if (NoCrypto) {
6853    // Map 'generic' (and others) to sha2 and aes, because
6854    // that was the traditional meaning of crypto.
6855    if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6856        ArchInfo == AArch64::ARMV8_3A) {
6857      RequestedExtensions.push_back("nosha2");
6858      RequestedExtensions.push_back("noaes");
6859    }
6860    if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6861        ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6862        ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6863        ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6864        ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6865        ArchInfo == AArch64::ARMV9_4A) {
6866      RequestedExtensions.push_back("nosm4");
6867      RequestedExtensions.push_back("nosha3");
6868      RequestedExtensions.push_back("nosha2");
6869      RequestedExtensions.push_back("noaes");
6870    }
6871  }
6872}
6873
6874/// parseDirectiveArch
6875///   ::= .arch token
6876bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6877  SMLoc ArchLoc = getLoc();
6878
6879  StringRef Arch, ExtensionString;
6880  std::tie(Arch, ExtensionString) =
6881      getParser().parseStringToEndOfStatement().trim().split('+');
6882
6883  const AArch64::ArchInfo &ArchInfo = AArch64::parseArch(Arch);
6884  if (ArchInfo == AArch64::INVALID)
6885    return Error(ArchLoc, "unknown arch name");
6886
6887  if (parseToken(AsmToken::EndOfStatement))
6888    return true;
6889
6890  // Get the architecture and extension features.
6891  std::vector<StringRef> AArch64Features;
6892  AArch64Features.push_back(ArchInfo.ArchFeature);
6893  AArch64::getExtensionFeatures(
6894      AArch64::getDefaultExtensions("generic", ArchInfo), AArch64Features);
6895
6896  MCSubtargetInfo &STI = copySTI();
6897  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6898  STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6899                         join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6900
6901  SmallVector<StringRef, 4> RequestedExtensions;
6902  if (!ExtensionString.empty())
6903    ExtensionString.split(RequestedExtensions, '+');
6904
6905  ExpandCryptoAEK(ArchInfo, RequestedExtensions);
6906
6907  FeatureBitset Features = STI.getFeatureBits();
6908  for (auto Name : RequestedExtensions) {
6909    bool EnableFeature = true;
6910
6911    if (Name.startswith_insensitive("no")) {
6912      EnableFeature = false;
6913      Name = Name.substr(2);
6914    }
6915
6916    for (const auto &Extension : ExtensionMap) {
6917      if (Extension.Name != Name)
6918        continue;
6919
6920      if (Extension.Features.none())
6921        report_fatal_error("unsupported architectural extension: " + Name);
6922
6923      FeatureBitset ToggleFeatures =
6924          EnableFeature
6925              ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6926              : STI.ToggleFeature(Features & Extension.Features);
6927      setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6928      break;
6929    }
6930  }
6931  return false;
6932}
6933
6934/// parseDirectiveArchExtension
6935///   ::= .arch_extension [no]feature
6936bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6937  SMLoc ExtLoc = getLoc();
6938
6939  StringRef Name = getParser().parseStringToEndOfStatement().trim();
6940
6941  if (parseEOL())
6942    return true;
6943
6944  bool EnableFeature = true;
6945  if (Name.startswith_insensitive("no")) {
6946    EnableFeature = false;
6947    Name = Name.substr(2);
6948  }
6949
6950  MCSubtargetInfo &STI = copySTI();
6951  FeatureBitset Features = STI.getFeatureBits();
6952  for (const auto &Extension : ExtensionMap) {
6953    if (Extension.Name != Name)
6954      continue;
6955
6956    if (Extension.Features.none())
6957      return Error(ExtLoc, "unsupported architectural extension: " + Name);
6958
6959    FeatureBitset ToggleFeatures =
6960        EnableFeature
6961            ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6962            : STI.ToggleFeature(Features & Extension.Features);
6963    setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6964    return false;
6965  }
6966
6967  return Error(ExtLoc, "unknown architectural extension: " + Name);
6968}
6969
6970static SMLoc incrementLoc(SMLoc L, int Offset) {
6971  return SMLoc::getFromPointer(L.getPointer() + Offset);
6972}
6973
6974/// parseDirectiveCPU
6975///   ::= .cpu id
6976bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6977  SMLoc CurLoc = getLoc();
6978
6979  StringRef CPU, ExtensionString;
6980  std::tie(CPU, ExtensionString) =
6981      getParser().parseStringToEndOfStatement().trim().split('+');
6982
6983  if (parseToken(AsmToken::EndOfStatement))
6984    return true;
6985
6986  SmallVector<StringRef, 4> RequestedExtensions;
6987  if (!ExtensionString.empty())
6988    ExtensionString.split(RequestedExtensions, '+');
6989
6990  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
6991  // once that is tablegen'ed
6992  if (!getSTI().isCPUStringValid(CPU)) {
6993    Error(CurLoc, "unknown CPU name");
6994    return false;
6995  }
6996
6997  MCSubtargetInfo &STI = copySTI();
6998  STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6999  CurLoc = incrementLoc(CurLoc, CPU.size());
7000
7001  ExpandCryptoAEK(llvm::AArch64::getArchForCpu(CPU), RequestedExtensions);
7002
7003  for (auto Name : RequestedExtensions) {
7004    // Advance source location past '+'.
7005    CurLoc = incrementLoc(CurLoc, 1);
7006
7007    bool EnableFeature = true;
7008
7009    if (Name.startswith_insensitive("no")) {
7010      EnableFeature = false;
7011      Name = Name.substr(2);
7012    }
7013
7014    bool FoundExtension = false;
7015    for (const auto &Extension : ExtensionMap) {
7016      if (Extension.Name != Name)
7017        continue;
7018
7019      if (Extension.Features.none())
7020        report_fatal_error("unsupported architectural extension: " + Name);
7021
7022      FeatureBitset Features = STI.getFeatureBits();
7023      FeatureBitset ToggleFeatures =
7024          EnableFeature
7025              ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
7026              : STI.ToggleFeature(Features & Extension.Features);
7027      setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7028      FoundExtension = true;
7029
7030      break;
7031    }
7032
7033    if (!FoundExtension)
7034      Error(CurLoc, "unsupported architectural extension");
7035
7036    CurLoc = incrementLoc(CurLoc, Name.size());
7037  }
7038  return false;
7039}
7040
7041/// parseDirectiveInst
7042///  ::= .inst opcode [, ...]
7043bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7044  if (getLexer().is(AsmToken::EndOfStatement))
7045    return Error(Loc, "expected expression following '.inst' directive");
7046
7047  auto parseOp = [&]() -> bool {
7048    SMLoc L = getLoc();
7049    const MCExpr *Expr = nullptr;
7050    if (check(getParser().parseExpression(Expr), L, "expected expression"))
7051      return true;
7052    const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7053    if (check(!Value, L, "expected constant expression"))
7054      return true;
7055    getTargetStreamer().emitInst(Value->getValue());
7056    return false;
7057  };
7058
7059  return parseMany(parseOp);
7060}
7061
7062// parseDirectiveTLSDescCall:
7063//   ::= .tlsdesccall symbol
7064bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7065  StringRef Name;
7066  if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7067      parseToken(AsmToken::EndOfStatement))
7068    return true;
7069
7070  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7071  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7072  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7073
7074  MCInst Inst;
7075  Inst.setOpcode(AArch64::TLSDESCCALL);
7076  Inst.addOperand(MCOperand::createExpr(Expr));
7077
7078  getParser().getStreamer().emitInstruction(Inst, getSTI());
7079  return false;
7080}
7081
7082/// ::= .loh <lohName | lohId> label1, ..., labelN
7083/// The number of arguments depends on the loh identifier.
7084bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7085  MCLOHType Kind;
7086  if (getTok().isNot(AsmToken::Identifier)) {
7087    if (getTok().isNot(AsmToken::Integer))
7088      return TokError("expected an identifier or a number in directive");
7089    // We successfully get a numeric value for the identifier.
7090    // Check if it is valid.
7091    int64_t Id = getTok().getIntVal();
7092    if (Id <= -1U && !isValidMCLOHType(Id))
7093      return TokError("invalid numeric identifier in directive");
7094    Kind = (MCLOHType)Id;
7095  } else {
7096    StringRef Name = getTok().getIdentifier();
7097    // We successfully parse an identifier.
7098    // Check if it is a recognized one.
7099    int Id = MCLOHNameToId(Name);
7100
7101    if (Id == -1)
7102      return TokError("invalid identifier in directive");
7103    Kind = (MCLOHType)Id;
7104  }
7105  // Consume the identifier.
7106  Lex();
7107  // Get the number of arguments of this LOH.
7108  int NbArgs = MCLOHIdToNbArgs(Kind);
7109
7110  assert(NbArgs != -1 && "Invalid number of arguments");
7111
7112  SmallVector<MCSymbol *, 3> Args;
7113  for (int Idx = 0; Idx < NbArgs; ++Idx) {
7114    StringRef Name;
7115    if (getParser().parseIdentifier(Name))
7116      return TokError("expected identifier in directive");
7117    Args.push_back(getContext().getOrCreateSymbol(Name));
7118
7119    if (Idx + 1 == NbArgs)
7120      break;
7121    if (parseComma())
7122      return true;
7123  }
7124  if (parseEOL())
7125    return true;
7126
7127  getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7128  return false;
7129}
7130
7131/// parseDirectiveLtorg
7132///  ::= .ltorg | .pool
7133bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7134  if (parseEOL())
7135    return true;
7136  getTargetStreamer().emitCurrentConstantPool();
7137  return false;
7138}
7139
7140/// parseDirectiveReq
7141///  ::= name .req registername
7142bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7143  Lex(); // Eat the '.req' token.
7144  SMLoc SRegLoc = getLoc();
7145  RegKind RegisterKind = RegKind::Scalar;
7146  MCRegister RegNum;
7147  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
7148
7149  if (ParseRes != MatchOperand_Success) {
7150    StringRef Kind;
7151    RegisterKind = RegKind::NeonVector;
7152    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7153
7154    if (ParseRes == MatchOperand_ParseFail)
7155      return true;
7156
7157    if (ParseRes == MatchOperand_Success && !Kind.empty())
7158      return Error(SRegLoc, "vector register without type specifier expected");
7159  }
7160
7161  if (ParseRes != MatchOperand_Success) {
7162    StringRef Kind;
7163    RegisterKind = RegKind::SVEDataVector;
7164    ParseRes =
7165        tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7166
7167    if (ParseRes == MatchOperand_ParseFail)
7168      return true;
7169
7170    if (ParseRes == MatchOperand_Success && !Kind.empty())
7171      return Error(SRegLoc,
7172                   "sve vector register without type specifier expected");
7173  }
7174
7175  if (ParseRes != MatchOperand_Success) {
7176    StringRef Kind;
7177    RegisterKind = RegKind::SVEPredicateVector;
7178    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7179
7180    if (ParseRes == MatchOperand_ParseFail)
7181      return true;
7182
7183    if (ParseRes == MatchOperand_Success && !Kind.empty())
7184      return Error(SRegLoc,
7185                   "sve predicate register without type specifier expected");
7186  }
7187
7188  if (ParseRes != MatchOperand_Success)
7189    return Error(SRegLoc, "register name or alias expected");
7190
7191  // Shouldn't be anything else.
7192  if (parseEOL())
7193    return true;
7194
7195  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7196  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7197    Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7198
7199  return false;
7200}
7201
7202/// parseDirectiveUneq
7203///  ::= .unreq registername
7204bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7205  if (getTok().isNot(AsmToken::Identifier))
7206    return TokError("unexpected input in .unreq directive.");
7207  RegisterReqs.erase(getTok().getIdentifier().lower());
7208  Lex(); // Eat the identifier.
7209  return parseToken(AsmToken::EndOfStatement);
7210}
7211
7212bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7213  if (parseEOL())
7214    return true;
7215  getStreamer().emitCFINegateRAState();
7216  return false;
7217}
7218
7219/// parseDirectiveCFIBKeyFrame
7220/// ::= .cfi_b_key
7221bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7222  if (parseEOL())
7223    return true;
7224  getStreamer().emitCFIBKeyFrame();
7225  return false;
7226}
7227
7228/// parseDirectiveCFIMTETaggedFrame
7229/// ::= .cfi_mte_tagged_frame
7230bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7231  if (parseEOL())
7232    return true;
7233  getStreamer().emitCFIMTETaggedFrame();
7234  return false;
7235}
7236
7237/// parseDirectiveVariantPCS
7238/// ::= .variant_pcs symbolname
7239bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7240  StringRef Name;
7241  if (getParser().parseIdentifier(Name))
7242    return TokError("expected symbol name");
7243  if (parseEOL())
7244    return true;
7245  getTargetStreamer().emitDirectiveVariantPCS(
7246      getContext().getOrCreateSymbol(Name));
7247  return false;
7248}
7249
7250/// parseDirectiveSEHAllocStack
7251/// ::= .seh_stackalloc
7252bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7253  int64_t Size;
7254  if (parseImmExpr(Size))
7255    return true;
7256  getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7257  return false;
7258}
7259
7260/// parseDirectiveSEHPrologEnd
7261/// ::= .seh_endprologue
7262bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7263  getTargetStreamer().emitARM64WinCFIPrologEnd();
7264  return false;
7265}
7266
7267/// parseDirectiveSEHSaveR19R20X
7268/// ::= .seh_save_r19r20_x
7269bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7270  int64_t Offset;
7271  if (parseImmExpr(Offset))
7272    return true;
7273  getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7274  return false;
7275}
7276
7277/// parseDirectiveSEHSaveFPLR
7278/// ::= .seh_save_fplr
7279bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7280  int64_t Offset;
7281  if (parseImmExpr(Offset))
7282    return true;
7283  getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7284  return false;
7285}
7286
7287/// parseDirectiveSEHSaveFPLRX
7288/// ::= .seh_save_fplr_x
7289bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7290  int64_t Offset;
7291  if (parseImmExpr(Offset))
7292    return true;
7293  getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7294  return false;
7295}
7296
7297/// parseDirectiveSEHSaveReg
7298/// ::= .seh_save_reg
7299bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7300  unsigned Reg;
7301  int64_t Offset;
7302  if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7303      parseComma() || parseImmExpr(Offset))
7304    return true;
7305  getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7306  return false;
7307}
7308
7309/// parseDirectiveSEHSaveRegX
7310/// ::= .seh_save_reg_x
7311bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7312  unsigned Reg;
7313  int64_t Offset;
7314  if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7315      parseComma() || parseImmExpr(Offset))
7316    return true;
7317  getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7318  return false;
7319}
7320
7321/// parseDirectiveSEHSaveRegP
7322/// ::= .seh_save_regp
7323bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7324  unsigned Reg;
7325  int64_t Offset;
7326  if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7327      parseComma() || parseImmExpr(Offset))
7328    return true;
7329  getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7330  return false;
7331}
7332
7333/// parseDirectiveSEHSaveRegPX
7334/// ::= .seh_save_regp_x
7335bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7336  unsigned Reg;
7337  int64_t Offset;
7338  if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7339      parseComma() || parseImmExpr(Offset))
7340    return true;
7341  getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7342  return false;
7343}
7344
7345/// parseDirectiveSEHSaveLRPair
7346/// ::= .seh_save_lrpair
7347bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7348  unsigned Reg;
7349  int64_t Offset;
7350  L = getLoc();
7351  if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7352      parseComma() || parseImmExpr(Offset))
7353    return true;
7354  if (check(((Reg - 19) % 2 != 0), L,
7355            "expected register with even offset from x19"))
7356    return true;
7357  getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7358  return false;
7359}
7360
7361/// parseDirectiveSEHSaveFReg
7362/// ::= .seh_save_freg
7363bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7364  unsigned Reg;
7365  int64_t Offset;
7366  if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7367      parseComma() || parseImmExpr(Offset))
7368    return true;
7369  getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7370  return false;
7371}
7372
7373/// parseDirectiveSEHSaveFRegX
7374/// ::= .seh_save_freg_x
7375bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7376  unsigned Reg;
7377  int64_t Offset;
7378  if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7379      parseComma() || parseImmExpr(Offset))
7380    return true;
7381  getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7382  return false;
7383}
7384
7385/// parseDirectiveSEHSaveFRegP
7386/// ::= .seh_save_fregp
7387bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7388  unsigned Reg;
7389  int64_t Offset;
7390  if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7391      parseComma() || parseImmExpr(Offset))
7392    return true;
7393  getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7394  return false;
7395}
7396
7397/// parseDirectiveSEHSaveFRegPX
7398/// ::= .seh_save_fregp_x
7399bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7400  unsigned Reg;
7401  int64_t Offset;
7402  if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7403      parseComma() || parseImmExpr(Offset))
7404    return true;
7405  getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7406  return false;
7407}
7408
7409/// parseDirectiveSEHSetFP
7410/// ::= .seh_set_fp
7411bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7412  getTargetStreamer().emitARM64WinCFISetFP();
7413  return false;
7414}
7415
7416/// parseDirectiveSEHAddFP
7417/// ::= .seh_add_fp
7418bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7419  int64_t Size;
7420  if (parseImmExpr(Size))
7421    return true;
7422  getTargetStreamer().emitARM64WinCFIAddFP(Size);
7423  return false;
7424}
7425
7426/// parseDirectiveSEHNop
7427/// ::= .seh_nop
7428bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7429  getTargetStreamer().emitARM64WinCFINop();
7430  return false;
7431}
7432
7433/// parseDirectiveSEHSaveNext
7434/// ::= .seh_save_next
7435bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7436  getTargetStreamer().emitARM64WinCFISaveNext();
7437  return false;
7438}
7439
7440/// parseDirectiveSEHEpilogStart
7441/// ::= .seh_startepilogue
7442bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7443  getTargetStreamer().emitARM64WinCFIEpilogStart();
7444  return false;
7445}
7446
7447/// parseDirectiveSEHEpilogEnd
7448/// ::= .seh_endepilogue
7449bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7450  getTargetStreamer().emitARM64WinCFIEpilogEnd();
7451  return false;
7452}
7453
7454/// parseDirectiveSEHTrapFrame
7455/// ::= .seh_trap_frame
7456bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7457  getTargetStreamer().emitARM64WinCFITrapFrame();
7458  return false;
7459}
7460
7461/// parseDirectiveSEHMachineFrame
7462/// ::= .seh_pushframe
7463bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7464  getTargetStreamer().emitARM64WinCFIMachineFrame();
7465  return false;
7466}
7467
7468/// parseDirectiveSEHContext
7469/// ::= .seh_context
7470bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7471  getTargetStreamer().emitARM64WinCFIContext();
7472  return false;
7473}
7474
7475/// parseDirectiveSEHClearUnwoundToCall
7476/// ::= .seh_clear_unwound_to_call
7477bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7478  getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7479  return false;
7480}
7481
7482/// parseDirectiveSEHPACSignLR
7483/// ::= .seh_pac_sign_lr
7484bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7485  getTargetStreamer().emitARM64WinCFIPACSignLR();
7486  return false;
7487}
7488
7489/// parseDirectiveSEHSaveAnyReg
7490/// ::= .seh_save_any_reg
7491/// ::= .seh_save_any_reg_p
7492/// ::= .seh_save_any_reg_x
7493/// ::= .seh_save_any_reg_px
7494bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7495                                                   bool Writeback) {
7496  MCRegister Reg;
7497  SMLoc Start, End;
7498  int64_t Offset;
7499  if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7500      parseComma() || parseImmExpr(Offset))
7501    return true;
7502
7503  if (Reg == AArch64::FP || Reg == AArch64::LR ||
7504      (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7505    if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7506      return Error(L, "invalid save_any_reg offset");
7507    unsigned EncodedReg;
7508    if (Reg == AArch64::FP)
7509      EncodedReg = 29;
7510    else if (Reg == AArch64::LR)
7511      EncodedReg = 30;
7512    else
7513      EncodedReg = Reg - AArch64::X0;
7514    if (Paired) {
7515      if (Reg == AArch64::LR)
7516        return Error(Start, "lr cannot be paired with another register");
7517      if (Writeback)
7518        getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7519      else
7520        getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7521    } else {
7522      if (Writeback)
7523        getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7524      else
7525        getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7526    }
7527  } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7528    unsigned EncodedReg = Reg - AArch64::D0;
7529    if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7530      return Error(L, "invalid save_any_reg offset");
7531    if (Paired) {
7532      if (Reg == AArch64::D31)
7533        return Error(Start, "d31 cannot be paired with another register");
7534      if (Writeback)
7535        getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7536      else
7537        getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7538    } else {
7539      if (Writeback)
7540        getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7541      else
7542        getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7543    }
7544  } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7545    unsigned EncodedReg = Reg - AArch64::Q0;
7546    if (Offset < 0 || Offset % 16)
7547      return Error(L, "invalid save_any_reg offset");
7548    if (Paired) {
7549      if (Reg == AArch64::Q31)
7550        return Error(Start, "q31 cannot be paired with another register");
7551      if (Writeback)
7552        getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7553      else
7554        getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7555    } else {
7556      if (Writeback)
7557        getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7558      else
7559        getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7560    }
7561  } else {
7562    return Error(Start, "save_any_reg register must be x, q or d register");
7563  }
7564  return false;
7565}
7566
7567bool
7568AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7569                                    AArch64MCExpr::VariantKind &ELFRefKind,
7570                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
7571                                    int64_t &Addend) {
7572  ELFRefKind = AArch64MCExpr::VK_INVALID;
7573  DarwinRefKind = MCSymbolRefExpr::VK_None;
7574  Addend = 0;
7575
7576  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7577    ELFRefKind = AE->getKind();
7578    Expr = AE->getSubExpr();
7579  }
7580
7581  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7582  if (SE) {
7583    // It's a simple symbol reference with no addend.
7584    DarwinRefKind = SE->getKind();
7585    return true;
7586  }
7587
7588  // Check that it looks like a symbol + an addend
7589  MCValue Res;
7590  bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7591  if (!Relocatable || Res.getSymB())
7592    return false;
7593
7594  // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7595  // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7596  if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7597    return false;
7598
7599  if (Res.getSymA())
7600    DarwinRefKind = Res.getSymA()->getKind();
7601  Addend = Res.getConstant();
7602
7603  // It's some symbol reference + a constant addend, but really
7604  // shouldn't use both Darwin and ELF syntax.
7605  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7606         DarwinRefKind == MCSymbolRefExpr::VK_None;
7607}
7608
7609/// Force static initialization.
7610extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
7611  RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
7612  RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
7613  RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
7614  RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
7615  RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
7616}
7617
7618#define GET_REGISTER_MATCHER
7619#define GET_SUBTARGET_FEATURE_NAME
7620#define GET_MATCHER_IMPLEMENTATION
7621#define GET_MNEMONIC_SPELL_CHECKER
7622#include "AArch64GenAsmMatcher.inc"
7623
7624// Define this matcher function after the auto-generated include so we
7625// have the match class enum definitions.
7626unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7627                                                      unsigned Kind) {
7628  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7629  // If the kind is a token for a literal immediate, check if our asm
7630  // operand matches. This is for InstAliases which have a fixed-value
7631  // immediate in the syntax.
7632  int64_t ExpectedVal;
7633  switch (Kind) {
7634  default:
7635    return Match_InvalidOperand;
7636  case MCK__HASH_0:
7637    ExpectedVal = 0;
7638    break;
7639  case MCK__HASH_1:
7640    ExpectedVal = 1;
7641    break;
7642  case MCK__HASH_12:
7643    ExpectedVal = 12;
7644    break;
7645  case MCK__HASH_16:
7646    ExpectedVal = 16;
7647    break;
7648  case MCK__HASH_2:
7649    ExpectedVal = 2;
7650    break;
7651  case MCK__HASH_24:
7652    ExpectedVal = 24;
7653    break;
7654  case MCK__HASH_3:
7655    ExpectedVal = 3;
7656    break;
7657  case MCK__HASH_32:
7658    ExpectedVal = 32;
7659    break;
7660  case MCK__HASH_4:
7661    ExpectedVal = 4;
7662    break;
7663  case MCK__HASH_48:
7664    ExpectedVal = 48;
7665    break;
7666  case MCK__HASH_6:
7667    ExpectedVal = 6;
7668    break;
7669  case MCK__HASH_64:
7670    ExpectedVal = 64;
7671    break;
7672  case MCK__HASH_8:
7673    ExpectedVal = 8;
7674    break;
7675  case MCK__HASH__MINUS_4:
7676    ExpectedVal = -4;
7677    break;
7678  case MCK__HASH__MINUS_8:
7679    ExpectedVal = -8;
7680    break;
7681  case MCK__HASH__MINUS_16:
7682    ExpectedVal = -16;
7683    break;
7684  case MCK_MPR:
7685    // If the Kind is a token for the MPR register class which has the "za"
7686    // register (SME accumulator array), check if the asm is a literal "za"
7687    // token. This is for the "smstart za" alias that defines the register
7688    // as a literal token.
7689    if (Op.isTokenEqual("za"))
7690      return Match_Success;
7691    return Match_InvalidOperand;
7692  }
7693  if (!Op.isImm())
7694    return Match_InvalidOperand;
7695  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7696  if (!CE)
7697    return Match_InvalidOperand;
7698  if (CE->getValue() == ExpectedVal)
7699    return Match_Success;
7700  return Match_InvalidOperand;
7701}
7702
7703OperandMatchResultTy
7704AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7705
7706  SMLoc S = getLoc();
7707
7708  if (getTok().isNot(AsmToken::Identifier)) {
7709    Error(S, "expected register");
7710    return MatchOperand_ParseFail;
7711  }
7712
7713  MCRegister FirstReg;
7714  OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
7715  if (Res != MatchOperand_Success) {
7716    Error(S, "expected first even register of a "
7717             "consecutive same-size even/odd register pair");
7718    return MatchOperand_ParseFail;
7719  }
7720
7721  const MCRegisterClass &WRegClass =
7722      AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7723  const MCRegisterClass &XRegClass =
7724      AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7725
7726  bool isXReg = XRegClass.contains(FirstReg),
7727       isWReg = WRegClass.contains(FirstReg);
7728  if (!isXReg && !isWReg) {
7729    Error(S, "expected first even register of a "
7730             "consecutive same-size even/odd register pair");
7731    return MatchOperand_ParseFail;
7732  }
7733
7734  const MCRegisterInfo *RI = getContext().getRegisterInfo();
7735  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7736
7737  if (FirstEncoding & 0x1) {
7738    Error(S, "expected first even register of a "
7739             "consecutive same-size even/odd register pair");
7740    return MatchOperand_ParseFail;
7741  }
7742
7743  if (getTok().isNot(AsmToken::Comma)) {
7744    Error(getLoc(), "expected comma");
7745    return MatchOperand_ParseFail;
7746  }
7747  // Eat the comma
7748  Lex();
7749
7750  SMLoc E = getLoc();
7751  MCRegister SecondReg;
7752  Res = tryParseScalarRegister(SecondReg);
7753  if (Res != MatchOperand_Success) {
7754    Error(E, "expected second odd register of a "
7755             "consecutive same-size even/odd register pair");
7756    return MatchOperand_ParseFail;
7757  }
7758
7759  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7760      (isXReg && !XRegClass.contains(SecondReg)) ||
7761      (isWReg && !WRegClass.contains(SecondReg))) {
7762    Error(E, "expected second odd register of a "
7763             "consecutive same-size even/odd register pair");
7764    return MatchOperand_ParseFail;
7765  }
7766
7767  unsigned Pair = 0;
7768  if (isXReg) {
7769    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7770           &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7771  } else {
7772    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7773           &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7774  }
7775
7776  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7777      getLoc(), getContext()));
7778
7779  return MatchOperand_Success;
7780}
7781
7782template <bool ParseShiftExtend, bool ParseSuffix>
7783OperandMatchResultTy
7784AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7785  const SMLoc S = getLoc();
7786  // Check for a SVE vector register specifier first.
7787  MCRegister RegNum;
7788  StringRef Kind;
7789
7790  OperandMatchResultTy Res =
7791      tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7792
7793  if (Res != MatchOperand_Success)
7794    return Res;
7795
7796  if (ParseSuffix && Kind.empty())
7797    return MatchOperand_NoMatch;
7798
7799  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7800  if (!KindRes)
7801    return MatchOperand_NoMatch;
7802
7803  unsigned ElementWidth = KindRes->second;
7804
7805  // No shift/extend is the default.
7806  if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7807    Operands.push_back(AArch64Operand::CreateVectorReg(
7808        RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7809
7810    OperandMatchResultTy Res = tryParseVectorIndex(Operands);
7811    if (Res == MatchOperand_ParseFail)
7812      return MatchOperand_ParseFail;
7813    return MatchOperand_Success;
7814  }
7815
7816  // Eat the comma
7817  Lex();
7818
7819  // Match the shift
7820  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
7821  Res = tryParseOptionalShiftExtend(ExtOpnd);
7822  if (Res != MatchOperand_Success)
7823    return Res;
7824
7825  auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7826  Operands.push_back(AArch64Operand::CreateVectorReg(
7827      RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7828      getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7829      Ext->hasShiftExtendAmount()));
7830
7831  return MatchOperand_Success;
7832}
7833
7834OperandMatchResultTy
7835AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7836  MCAsmParser &Parser = getParser();
7837
7838  SMLoc SS = getLoc();
7839  const AsmToken &TokE = getTok();
7840  bool IsHash = TokE.is(AsmToken::Hash);
7841
7842  if (!IsHash && TokE.isNot(AsmToken::Identifier))
7843    return MatchOperand_NoMatch;
7844
7845  int64_t Pattern;
7846  if (IsHash) {
7847    Lex(); // Eat hash
7848
7849    // Parse the immediate operand.
7850    const MCExpr *ImmVal;
7851    SS = getLoc();
7852    if (Parser.parseExpression(ImmVal))
7853      return MatchOperand_ParseFail;
7854
7855    auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7856    if (!MCE)
7857      return MatchOperand_ParseFail;
7858
7859    Pattern = MCE->getValue();
7860  } else {
7861    // Parse the pattern
7862    auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7863    if (!Pat)
7864      return MatchOperand_NoMatch;
7865
7866    Lex();
7867    Pattern = Pat->Encoding;
7868    assert(Pattern >= 0 && Pattern < 32);
7869  }
7870
7871  Operands.push_back(
7872      AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7873                                SS, getLoc(), getContext()));
7874
7875  return MatchOperand_Success;
7876}
7877
7878OperandMatchResultTy
7879AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
7880  int64_t Pattern;
7881  SMLoc SS = getLoc();
7882  const AsmToken &TokE = getTok();
7883  // Parse the pattern
7884  auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
7885      TokE.getString());
7886  if (!Pat)
7887    return MatchOperand_NoMatch;
7888
7889  Lex();
7890  Pattern = Pat->Encoding;
7891  assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
7892
7893  Operands.push_back(
7894      AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7895                                SS, getLoc(), getContext()));
7896
7897  return MatchOperand_Success;
7898}
7899
7900OperandMatchResultTy
7901AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
7902  SMLoc SS = getLoc();
7903
7904  MCRegister XReg;
7905  if (tryParseScalarRegister(XReg) != MatchOperand_Success)
7906    return MatchOperand_NoMatch;
7907
7908  MCContext &ctx = getContext();
7909  const MCRegisterInfo *RI = ctx.getRegisterInfo();
7910  int X8Reg = RI->getMatchingSuperReg(
7911      XReg, AArch64::x8sub_0,
7912      &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
7913  if (!X8Reg) {
7914    Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
7915    return MatchOperand_ParseFail;
7916  }
7917
7918  Operands.push_back(
7919      AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
7920  return MatchOperand_Success;
7921}
7922
7923OperandMatchResultTy
7924AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
7925  SMLoc S = getLoc();
7926
7927  if (getTok().isNot(AsmToken::Integer))
7928    return MatchOperand_NoMatch;
7929
7930  if (getLexer().peekTok().isNot(AsmToken::Colon))
7931    return MatchOperand_NoMatch;
7932
7933  const MCExpr *ImmF;
7934  if (getParser().parseExpression(ImmF))
7935    return MatchOperand_NoMatch;
7936
7937  if (getTok().isNot(AsmToken::Colon))
7938    return MatchOperand_NoMatch;
7939
7940  Lex(); // Eat ':'
7941  if (getTok().isNot(AsmToken::Integer))
7942    return MatchOperand_NoMatch;
7943
7944  SMLoc E = getTok().getLoc();
7945  const MCExpr *ImmL;
7946  if (getParser().parseExpression(ImmL))
7947    return MatchOperand_NoMatch;
7948
7949  unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue();
7950  unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue();
7951
7952  Operands.push_back(
7953      AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
7954  return MatchOperand_Success;
7955}
7956