AArch64AsmParser.cpp revision 360784
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64MCExpr.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64TargetStreamer.h"
13#include "TargetInfo/AArch64TargetInfo.h"
14#include "AArch64InstrInfo.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCLinkerOptimizationHint.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCAsmParserExtension.h"
34#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35#include "llvm/MC/MCParser/MCTargetAsmParser.h"
36#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
39#include "llvm/MC/MCSymbol.h"
40#include "llvm/MC/MCTargetOptions.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/MC/MCValue.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetParser.h"
49#include "llvm/Support/TargetRegistry.h"
50#include "llvm/Support/raw_ostream.h"
51#include <cassert>
52#include <cctype>
53#include <cstdint>
54#include <cstdio>
55#include <string>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62namespace {
63
64enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69};
70
71enum RegConstraintEqualityTy {
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75};
76
77class AArch64AsmParser : public MCTargetAsmParser {
78private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80
81  // Map of register aliases registers via the .req directive.
82  StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83
84  class PrefixInfo {
85  public:
86    static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87      PrefixInfo Prefix;
88      switch (Inst.getOpcode()) {
89      case AArch64::MOVPRFX_ZZ:
90        Prefix.Active = true;
91        Prefix.Dst = Inst.getOperand(0).getReg();
92        break;
93      case AArch64::MOVPRFX_ZPmZ_B:
94      case AArch64::MOVPRFX_ZPmZ_H:
95      case AArch64::MOVPRFX_ZPmZ_S:
96      case AArch64::MOVPRFX_ZPmZ_D:
97        Prefix.Active = true;
98        Prefix.Predicated = true;
99        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101               "No destructive element size set for movprfx");
102        Prefix.Dst = Inst.getOperand(0).getReg();
103        Prefix.Pg = Inst.getOperand(2).getReg();
104        break;
105      case AArch64::MOVPRFX_ZPzZ_B:
106      case AArch64::MOVPRFX_ZPzZ_H:
107      case AArch64::MOVPRFX_ZPzZ_S:
108      case AArch64::MOVPRFX_ZPzZ_D:
109        Prefix.Active = true;
110        Prefix.Predicated = true;
111        Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112        assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113               "No destructive element size set for movprfx");
114        Prefix.Dst = Inst.getOperand(0).getReg();
115        Prefix.Pg = Inst.getOperand(1).getReg();
116        break;
117      default:
118        break;
119      }
120
121      return Prefix;
122    }
123
124    PrefixInfo() : Active(false), Predicated(false) {}
125    bool isActive() const { return Active; }
126    bool isPredicated() const { return Predicated; }
127    unsigned getElementSize() const {
128      assert(Predicated);
129      return ElementSize;
130    }
131    unsigned getDstReg() const { return Dst; }
132    unsigned getPgReg() const {
133      assert(Predicated);
134      return Pg;
135    }
136
137  private:
138    bool Active;
139    bool Predicated;
140    unsigned ElementSize;
141    unsigned Dst;
142    unsigned Pg;
143  } NextPrefix;
144
145  AArch64TargetStreamer &getTargetStreamer() {
146    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147    return static_cast<AArch64TargetStreamer &>(TS);
148  }
149
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162                    bool invertCondCode);
163
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165                      OperandVector &Operands);
166
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveArchExtension(SMLoc L);
169  bool parseDirectiveCPU(SMLoc L);
170  bool parseDirectiveInst(SMLoc L);
171
172  bool parseDirectiveTLSDescCall(SMLoc L);
173
174  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175  bool parseDirectiveLtorg(SMLoc L);
176
177  bool parseDirectiveReq(StringRef Name, SMLoc L);
178  bool parseDirectiveUnreq(SMLoc L);
179  bool parseDirectiveCFINegateRAState();
180  bool parseDirectiveCFIBKeyFrame();
181
182  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
183                           SmallVectorImpl<SMLoc> &Loc);
184  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185                               OperandVector &Operands, MCStreamer &Out,
186                               uint64_t &ErrorInfo,
187                               bool MatchingInlineAsm) override;
188/// @name Auto-generated Match Functions
189/// {
190
191#define GET_ASSEMBLER_HEADER
192#include "AArch64GenAsmMatcher.inc"
193
194  /// }
195
196  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198                                              RegKind MatchKind);
199  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204  template <bool IsSVEPrefetch = false>
205  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210  template<bool AddFPZeroAsLiteral>
211  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214  bool tryParseNeonVectorRegister(OperandVector &Operands);
215  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217  template <bool ParseShiftExtend,
218            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220  template <bool ParseShiftExtend, bool ParseSuffix>
221  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223  template <RegKind VectorKind>
224  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225                                          bool ExpectMatch = false);
226  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227
228public:
229  enum AArch64MatchResultTy {
230    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231#define GET_OPERAND_DIAGNOSTIC_TYPES
232#include "AArch64GenAsmMatcher.inc"
233  };
234  bool IsILP32;
235
236  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237                   const MCInstrInfo &MII, const MCTargetOptions &Options)
238    : MCTargetAsmParser(Options, STI, MII) {
239    IsILP32 = Options.getABIName() == "ilp32";
240    MCAsmParserExtension::Initialize(Parser);
241    MCStreamer &S = getParser().getStreamer();
242    if (S.getTargetStreamer() == nullptr)
243      new AArch64TargetStreamer(S);
244
245    // Alias .hword/.word/.[dx]word to the target-independent
246    // .2byte/.4byte/.8byte directives as they have the same form and
247    // semantics:
248    ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249    Parser.addAliasForDirective(".hword", ".2byte");
250    Parser.addAliasForDirective(".word", ".4byte");
251    Parser.addAliasForDirective(".dword", ".8byte");
252    Parser.addAliasForDirective(".xword", ".8byte");
253
254    // Initialize the set of available features.
255    setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
256  }
257
258  bool regsEqual(const MCParsedAsmOperand &Op1,
259                 const MCParsedAsmOperand &Op2) const override;
260  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
261                        SMLoc NameLoc, OperandVector &Operands) override;
262  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
263  bool ParseDirective(AsmToken DirectiveID) override;
264  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
265                                      unsigned Kind) override;
266
267  static bool classifySymbolRef(const MCExpr *Expr,
268                                AArch64MCExpr::VariantKind &ELFRefKind,
269                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
270                                int64_t &Addend);
271};
272
273/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
274/// instruction.
275class AArch64Operand : public MCParsedAsmOperand {
276private:
277  enum KindTy {
278    k_Immediate,
279    k_ShiftedImm,
280    k_CondCode,
281    k_Register,
282    k_VectorList,
283    k_VectorIndex,
284    k_Token,
285    k_SysReg,
286    k_SysCR,
287    k_Prefetch,
288    k_ShiftExtend,
289    k_FPImm,
290    k_Barrier,
291    k_PSBHint,
292    k_BTIHint,
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296
297  struct TokOp {
298    const char *Data;
299    unsigned Length;
300    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
301  };
302
303  // Separate shift/extend operand.
304  struct ShiftExtendOp {
305    AArch64_AM::ShiftExtendType Type;
306    unsigned Amount;
307    bool HasExplicitAmount;
308  };
309
310  struct RegOp {
311    unsigned RegNum;
312    RegKind Kind;
313    int ElementWidth;
314
315    // The register may be allowed as a different register class,
316    // e.g. for GPR64as32 or GPR32as64.
317    RegConstraintEqualityTy EqualityTy;
318
319    // In some cases the shift/extend needs to be explicitly parsed together
320    // with the register, rather than as a separate operand. This is needed
321    // for addressing modes where the instruction as a whole dictates the
322    // scaling/extend, rather than specific bits in the instruction.
323    // By parsing them as a single operand, we avoid the need to pass an
324    // extra operand in all CodeGen patterns (because all operands need to
325    // have an associated value), and we avoid the need to update TableGen to
326    // accept operands that have no associated bits in the instruction.
327    //
328    // An added benefit of parsing them together is that the assembler
329    // can give a sensible diagnostic if the scaling is not correct.
330    //
331    // The default is 'lsl #0' (HasExplicitAmount = false) if no
332    // ShiftExtend is specified.
333    ShiftExtendOp ShiftExtend;
334  };
335
336  struct VectorListOp {
337    unsigned RegNum;
338    unsigned Count;
339    unsigned NumElements;
340    unsigned ElementWidth;
341    RegKind  RegisterKind;
342  };
343
344  struct VectorIndexOp {
345    unsigned Val;
346  };
347
348  struct ImmOp {
349    const MCExpr *Val;
350  };
351
352  struct ShiftedImmOp {
353    const MCExpr *Val;
354    unsigned ShiftAmount;
355  };
356
357  struct CondCodeOp {
358    AArch64CC::CondCode Code;
359  };
360
361  struct FPImmOp {
362    uint64_t Val; // APFloat value bitcasted to uint64_t.
363    bool IsExact; // describes whether parsed value was exact.
364  };
365
366  struct BarrierOp {
367    const char *Data;
368    unsigned Length;
369    unsigned Val; // Not the enum since not all values have names.
370  };
371
372  struct SysRegOp {
373    const char *Data;
374    unsigned Length;
375    uint32_t MRSReg;
376    uint32_t MSRReg;
377    uint32_t PStateField;
378  };
379
380  struct SysCRImmOp {
381    unsigned Val;
382  };
383
384  struct PrefetchOp {
385    const char *Data;
386    unsigned Length;
387    unsigned Val;
388  };
389
390  struct PSBHintOp {
391    const char *Data;
392    unsigned Length;
393    unsigned Val;
394  };
395
396  struct BTIHintOp {
397    const char *Data;
398    unsigned Length;
399    unsigned Val;
400  };
401
402  struct ExtendOp {
403    unsigned Val;
404  };
405
406  union {
407    struct TokOp Tok;
408    struct RegOp Reg;
409    struct VectorListOp VectorList;
410    struct VectorIndexOp VectorIndex;
411    struct ImmOp Imm;
412    struct ShiftedImmOp ShiftedImm;
413    struct CondCodeOp CondCode;
414    struct FPImmOp FPImm;
415    struct BarrierOp Barrier;
416    struct SysRegOp SysReg;
417    struct SysCRImmOp SysCRImm;
418    struct PrefetchOp Prefetch;
419    struct PSBHintOp PSBHint;
420    struct BTIHintOp BTIHint;
421    struct ShiftExtendOp ShiftExtend;
422  };
423
424  // Keep the MCContext around as the MCExprs may need manipulated during
425  // the add<>Operands() calls.
426  MCContext &Ctx;
427
428public:
429  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
430
431  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
432    Kind = o.Kind;
433    StartLoc = o.StartLoc;
434    EndLoc = o.EndLoc;
435    switch (Kind) {
436    case k_Token:
437      Tok = o.Tok;
438      break;
439    case k_Immediate:
440      Imm = o.Imm;
441      break;
442    case k_ShiftedImm:
443      ShiftedImm = o.ShiftedImm;
444      break;
445    case k_CondCode:
446      CondCode = o.CondCode;
447      break;
448    case k_FPImm:
449      FPImm = o.FPImm;
450      break;
451    case k_Barrier:
452      Barrier = o.Barrier;
453      break;
454    case k_Register:
455      Reg = o.Reg;
456      break;
457    case k_VectorList:
458      VectorList = o.VectorList;
459      break;
460    case k_VectorIndex:
461      VectorIndex = o.VectorIndex;
462      break;
463    case k_SysReg:
464      SysReg = o.SysReg;
465      break;
466    case k_SysCR:
467      SysCRImm = o.SysCRImm;
468      break;
469    case k_Prefetch:
470      Prefetch = o.Prefetch;
471      break;
472    case k_PSBHint:
473      PSBHint = o.PSBHint;
474      break;
475    case k_BTIHint:
476      BTIHint = o.BTIHint;
477      break;
478    case k_ShiftExtend:
479      ShiftExtend = o.ShiftExtend;
480      break;
481    }
482  }
483
484  /// getStartLoc - Get the location of the first token of this operand.
485  SMLoc getStartLoc() const override { return StartLoc; }
486  /// getEndLoc - Get the location of the last token of this operand.
487  SMLoc getEndLoc() const override { return EndLoc; }
488
489  StringRef getToken() const {
490    assert(Kind == k_Token && "Invalid access!");
491    return StringRef(Tok.Data, Tok.Length);
492  }
493
494  bool isTokenSuffix() const {
495    assert(Kind == k_Token && "Invalid access!");
496    return Tok.IsSuffix;
497  }
498
499  const MCExpr *getImm() const {
500    assert(Kind == k_Immediate && "Invalid access!");
501    return Imm.Val;
502  }
503
504  const MCExpr *getShiftedImmVal() const {
505    assert(Kind == k_ShiftedImm && "Invalid access!");
506    return ShiftedImm.Val;
507  }
508
509  unsigned getShiftedImmShift() const {
510    assert(Kind == k_ShiftedImm && "Invalid access!");
511    return ShiftedImm.ShiftAmount;
512  }
513
514  AArch64CC::CondCode getCondCode() const {
515    assert(Kind == k_CondCode && "Invalid access!");
516    return CondCode.Code;
517  }
518
519  APFloat getFPImm() const {
520    assert (Kind == k_FPImm && "Invalid access!");
521    return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
522  }
523
524  bool getFPImmIsExact() const {
525    assert (Kind == k_FPImm && "Invalid access!");
526    return FPImm.IsExact;
527  }
528
529  unsigned getBarrier() const {
530    assert(Kind == k_Barrier && "Invalid access!");
531    return Barrier.Val;
532  }
533
534  StringRef getBarrierName() const {
535    assert(Kind == k_Barrier && "Invalid access!");
536    return StringRef(Barrier.Data, Barrier.Length);
537  }
538
539  unsigned getReg() const override {
540    assert(Kind == k_Register && "Invalid access!");
541    return Reg.RegNum;
542  }
543
544  RegConstraintEqualityTy getRegEqualityTy() const {
545    assert(Kind == k_Register && "Invalid access!");
546    return Reg.EqualityTy;
547  }
548
549  unsigned getVectorListStart() const {
550    assert(Kind == k_VectorList && "Invalid access!");
551    return VectorList.RegNum;
552  }
553
554  unsigned getVectorListCount() const {
555    assert(Kind == k_VectorList && "Invalid access!");
556    return VectorList.Count;
557  }
558
559  unsigned getVectorIndex() const {
560    assert(Kind == k_VectorIndex && "Invalid access!");
561    return VectorIndex.Val;
562  }
563
564  StringRef getSysReg() const {
565    assert(Kind == k_SysReg && "Invalid access!");
566    return StringRef(SysReg.Data, SysReg.Length);
567  }
568
569  unsigned getSysCR() const {
570    assert(Kind == k_SysCR && "Invalid access!");
571    return SysCRImm.Val;
572  }
573
574  unsigned getPrefetch() const {
575    assert(Kind == k_Prefetch && "Invalid access!");
576    return Prefetch.Val;
577  }
578
579  unsigned getPSBHint() const {
580    assert(Kind == k_PSBHint && "Invalid access!");
581    return PSBHint.Val;
582  }
583
584  StringRef getPSBHintName() const {
585    assert(Kind == k_PSBHint && "Invalid access!");
586    return StringRef(PSBHint.Data, PSBHint.Length);
587  }
588
589  unsigned getBTIHint() const {
590    assert(Kind == k_BTIHint && "Invalid access!");
591    return BTIHint.Val;
592  }
593
594  StringRef getBTIHintName() const {
595    assert(Kind == k_BTIHint && "Invalid access!");
596    return StringRef(BTIHint.Data, BTIHint.Length);
597  }
598
599  StringRef getPrefetchName() const {
600    assert(Kind == k_Prefetch && "Invalid access!");
601    return StringRef(Prefetch.Data, Prefetch.Length);
602  }
603
604  AArch64_AM::ShiftExtendType getShiftExtendType() const {
605    if (Kind == k_ShiftExtend)
606      return ShiftExtend.Type;
607    if (Kind == k_Register)
608      return Reg.ShiftExtend.Type;
609    llvm_unreachable("Invalid access!");
610  }
611
612  unsigned getShiftExtendAmount() const {
613    if (Kind == k_ShiftExtend)
614      return ShiftExtend.Amount;
615    if (Kind == k_Register)
616      return Reg.ShiftExtend.Amount;
617    llvm_unreachable("Invalid access!");
618  }
619
620  bool hasShiftExtendAmount() const {
621    if (Kind == k_ShiftExtend)
622      return ShiftExtend.HasExplicitAmount;
623    if (Kind == k_Register)
624      return Reg.ShiftExtend.HasExplicitAmount;
625    llvm_unreachable("Invalid access!");
626  }
627
628  bool isImm() const override { return Kind == k_Immediate; }
629  bool isMem() const override { return false; }
630
631  bool isUImm6() const {
632    if (!isImm())
633      return false;
634    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635    if (!MCE)
636      return false;
637    int64_t Val = MCE->getValue();
638    return (Val >= 0 && Val < 64);
639  }
640
641  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
642
643  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
644    return isImmScaled<Bits, Scale>(true);
645  }
646
647  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
648    return isImmScaled<Bits, Scale>(false);
649  }
650
651  template <int Bits, int Scale>
652  DiagnosticPredicate isImmScaled(bool Signed) const {
653    if (!isImm())
654      return DiagnosticPredicateTy::NoMatch;
655
656    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
657    if (!MCE)
658      return DiagnosticPredicateTy::NoMatch;
659
660    int64_t MinVal, MaxVal;
661    if (Signed) {
662      int64_t Shift = Bits - 1;
663      MinVal = (int64_t(1) << Shift) * -Scale;
664      MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
665    } else {
666      MinVal = 0;
667      MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
668    }
669
670    int64_t Val = MCE->getValue();
671    if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
672      return DiagnosticPredicateTy::Match;
673
674    return DiagnosticPredicateTy::NearMatch;
675  }
676
677  DiagnosticPredicate isSVEPattern() const {
678    if (!isImm())
679      return DiagnosticPredicateTy::NoMatch;
680    auto *MCE = dyn_cast<MCConstantExpr>(getImm());
681    if (!MCE)
682      return DiagnosticPredicateTy::NoMatch;
683    int64_t Val = MCE->getValue();
684    if (Val >= 0 && Val < 32)
685      return DiagnosticPredicateTy::Match;
686    return DiagnosticPredicateTy::NearMatch;
687  }
688
689  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
690    AArch64MCExpr::VariantKind ELFRefKind;
691    MCSymbolRefExpr::VariantKind DarwinRefKind;
692    int64_t Addend;
693    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
694                                           Addend)) {
695      // If we don't understand the expression, assume the best and
696      // let the fixup and relocation code deal with it.
697      return true;
698    }
699
700    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
701        ELFRefKind == AArch64MCExpr::VK_LO12 ||
702        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
703        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
704        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
705        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
706        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
707        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
708        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
709        ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
710        ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
711      // Note that we don't range-check the addend. It's adjusted modulo page
712      // size when converted, so there is no "out of range" condition when using
713      // @pageoff.
714      return true;
715    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
716               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
717      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
718      return Addend == 0;
719    }
720
721    return false;
722  }
723
724  template <int Scale> bool isUImm12Offset() const {
725    if (!isImm())
726      return false;
727
728    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729    if (!MCE)
730      return isSymbolicUImm12Offset(getImm());
731
732    int64_t Val = MCE->getValue();
733    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
734  }
735
736  template <int N, int M>
737  bool isImmInRange() const {
738    if (!isImm())
739      return false;
740    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
741    if (!MCE)
742      return false;
743    int64_t Val = MCE->getValue();
744    return (Val >= N && Val <= M);
745  }
746
747  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
748  // a logical immediate can always be represented when inverted.
749  template <typename T>
750  bool isLogicalImm() const {
751    if (!isImm())
752      return false;
753    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
754    if (!MCE)
755      return false;
756
757    int64_t Val = MCE->getValue();
758    int64_t SVal = typename std::make_signed<T>::type(Val);
759    int64_t UVal = typename std::make_unsigned<T>::type(Val);
760    if (Val != SVal && Val != UVal)
761      return false;
762
763    return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
764  }
765
766  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
767
768  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
769  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
770  /// immediate that can be shifted by 'Shift'.
771  template <unsigned Width>
772  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
773    if (isShiftedImm() && Width == getShiftedImmShift())
774      if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
775        return std::make_pair(CE->getValue(), Width);
776
777    if (isImm())
778      if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
779        int64_t Val = CE->getValue();
780        if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
781          return std::make_pair(Val >> Width, Width);
782        else
783          return std::make_pair(Val, 0u);
784      }
785
786    return {};
787  }
788
789  bool isAddSubImm() const {
790    if (!isShiftedImm() && !isImm())
791      return false;
792
793    const MCExpr *Expr;
794
795    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
796    if (isShiftedImm()) {
797      unsigned Shift = ShiftedImm.ShiftAmount;
798      Expr = ShiftedImm.Val;
799      if (Shift != 0 && Shift != 12)
800        return false;
801    } else {
802      Expr = getImm();
803    }
804
805    AArch64MCExpr::VariantKind ELFRefKind;
806    MCSymbolRefExpr::VariantKind DarwinRefKind;
807    int64_t Addend;
808    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
809                                          DarwinRefKind, Addend)) {
810      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
811          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
812          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
813          || ELFRefKind == AArch64MCExpr::VK_LO12
814          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
815          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
816          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
817          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
818          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
819          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
820          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
821          || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
822          || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
823    }
824
825    // If it's a constant, it should be a real immediate in range.
826    if (auto ShiftedVal = getShiftedVal<12>())
827      return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
828
829    // If it's an expression, we hope for the best and let the fixup/relocation
830    // code deal with it.
831    return true;
832  }
833
834  bool isAddSubImmNeg() const {
835    if (!isShiftedImm() && !isImm())
836      return false;
837
838    // Otherwise it should be a real negative immediate in range.
839    if (auto ShiftedVal = getShiftedVal<12>())
840      return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
841
842    return false;
843  }
844
845  // Signed value in the range -128 to +127. For element widths of
846  // 16 bits or higher it may also be a signed multiple of 256 in the
847  // range -32768 to +32512.
848  // For element-width of 8 bits a range of -128 to 255 is accepted,
849  // since a copy of a byte can be either signed/unsigned.
850  template <typename T>
851  DiagnosticPredicate isSVECpyImm() const {
852    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
853      return DiagnosticPredicateTy::NoMatch;
854
855    bool IsByte =
856        std::is_same<int8_t, typename std::make_signed<T>::type>::value;
857    if (auto ShiftedImm = getShiftedVal<8>())
858      if (!(IsByte && ShiftedImm->second) &&
859          AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
860                                     << ShiftedImm->second))
861        return DiagnosticPredicateTy::Match;
862
863    return DiagnosticPredicateTy::NearMatch;
864  }
865
866  // Unsigned value in the range 0 to 255. For element widths of
867  // 16 bits or higher it may also be a signed multiple of 256 in the
868  // range 0 to 65280.
869  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
870    if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
871      return DiagnosticPredicateTy::NoMatch;
872
873    bool IsByte =
874        std::is_same<int8_t, typename std::make_signed<T>::type>::value;
875    if (auto ShiftedImm = getShiftedVal<8>())
876      if (!(IsByte && ShiftedImm->second) &&
877          AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
878                                        << ShiftedImm->second))
879        return DiagnosticPredicateTy::Match;
880
881    return DiagnosticPredicateTy::NearMatch;
882  }
883
884  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
885    if (isLogicalImm<T>() && !isSVECpyImm<T>())
886      return DiagnosticPredicateTy::Match;
887    return DiagnosticPredicateTy::NoMatch;
888  }
889
890  bool isCondCode() const { return Kind == k_CondCode; }
891
892  bool isSIMDImmType10() const {
893    if (!isImm())
894      return false;
895    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
896    if (!MCE)
897      return false;
898    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
899  }
900
901  template<int N>
902  bool isBranchTarget() const {
903    if (!isImm())
904      return false;
905    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
906    if (!MCE)
907      return true;
908    int64_t Val = MCE->getValue();
909    if (Val & 0x3)
910      return false;
911    assert(N > 0 && "Branch target immediate cannot be 0 bits!");
912    return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
913  }
914
915  bool
916  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
917    if (!isImm())
918      return false;
919
920    AArch64MCExpr::VariantKind ELFRefKind;
921    MCSymbolRefExpr::VariantKind DarwinRefKind;
922    int64_t Addend;
923    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
924                                             DarwinRefKind, Addend)) {
925      return false;
926    }
927    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
928      return false;
929
930    for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
931      if (ELFRefKind == AllowedModifiers[i])
932        return true;
933    }
934
935    return false;
936  }
937
938  bool isMovWSymbolG3() const {
939    return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
940  }
941
942  bool isMovWSymbolG2() const {
943    return isMovWSymbol(
944        {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
945         AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
946         AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
947         AArch64MCExpr::VK_DTPREL_G2});
948  }
949
950  bool isMovWSymbolG1() const {
951    return isMovWSymbol(
952        {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
953         AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
954         AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
955         AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
956         AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
957  }
958
959  bool isMovWSymbolG0() const {
960    return isMovWSymbol(
961        {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
962         AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
963         AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
964         AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
965         AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
966  }
967
968  template<int RegWidth, int Shift>
969  bool isMOVZMovAlias() const {
970    if (!isImm()) return false;
971
972    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
973    if (!CE) return false;
974    uint64_t Value = CE->getValue();
975
976    return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
977  }
978
979  template<int RegWidth, int Shift>
980  bool isMOVNMovAlias() const {
981    if (!isImm()) return false;
982
983    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984    if (!CE) return false;
985    uint64_t Value = CE->getValue();
986
987    return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
988  }
989
990  bool isFPImm() const {
991    return Kind == k_FPImm &&
992           AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
993  }
994
995  bool isBarrier() const { return Kind == k_Barrier; }
996  bool isSysReg() const { return Kind == k_SysReg; }
997
998  bool isMRSSystemRegister() const {
999    if (!isSysReg()) return false;
1000
1001    return SysReg.MRSReg != -1U;
1002  }
1003
1004  bool isMSRSystemRegister() const {
1005    if (!isSysReg()) return false;
1006    return SysReg.MSRReg != -1U;
1007  }
1008
1009  bool isSystemPStateFieldWithImm0_1() const {
1010    if (!isSysReg()) return false;
1011    return (SysReg.PStateField == AArch64PState::PAN ||
1012            SysReg.PStateField == AArch64PState::DIT ||
1013            SysReg.PStateField == AArch64PState::UAO ||
1014            SysReg.PStateField == AArch64PState::SSBS);
1015  }
1016
1017  bool isSystemPStateFieldWithImm0_15() const {
1018    if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1019    return SysReg.PStateField != -1U;
1020  }
1021
1022  bool isReg() const override {
1023    return Kind == k_Register;
1024  }
1025
1026  bool isScalarReg() const {
1027    return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1028  }
1029
1030  bool isNeonVectorReg() const {
1031    return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1032  }
1033
1034  bool isNeonVectorRegLo() const {
1035    return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1036           AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1037               Reg.RegNum);
1038  }
1039
1040  template <unsigned Class> bool isSVEVectorReg() const {
1041    RegKind RK;
1042    switch (Class) {
1043    case AArch64::ZPRRegClassID:
1044    case AArch64::ZPR_3bRegClassID:
1045    case AArch64::ZPR_4bRegClassID:
1046      RK = RegKind::SVEDataVector;
1047      break;
1048    case AArch64::PPRRegClassID:
1049    case AArch64::PPR_3bRegClassID:
1050      RK = RegKind::SVEPredicateVector;
1051      break;
1052    default:
1053      llvm_unreachable("Unsupport register class");
1054    }
1055
1056    return (Kind == k_Register && Reg.Kind == RK) &&
1057           AArch64MCRegisterClasses[Class].contains(getReg());
1058  }
1059
1060  template <unsigned Class> bool isFPRasZPR() const {
1061    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1062           AArch64MCRegisterClasses[Class].contains(getReg());
1063  }
1064
1065  template <int ElementWidth, unsigned Class>
1066  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1067    if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1068      return DiagnosticPredicateTy::NoMatch;
1069
1070    if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1071      return DiagnosticPredicateTy::Match;
1072
1073    return DiagnosticPredicateTy::NearMatch;
1074  }
1075
1076  template <int ElementWidth, unsigned Class>
1077  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1078    if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1079      return DiagnosticPredicateTy::NoMatch;
1080
1081    if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1082      return DiagnosticPredicateTy::Match;
1083
1084    return DiagnosticPredicateTy::NearMatch;
1085  }
1086
1087  template <int ElementWidth, unsigned Class,
1088            AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1089            bool ShiftWidthAlwaysSame>
1090  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1091    auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1092    if (!VectorMatch.isMatch())
1093      return DiagnosticPredicateTy::NoMatch;
1094
1095    // Give a more specific diagnostic when the user has explicitly typed in
1096    // a shift-amount that does not match what is expected, but for which
1097    // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1098    bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1099    if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1100                        ShiftExtendTy == AArch64_AM::SXTW) &&
1101        !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1102      return DiagnosticPredicateTy::NoMatch;
1103
1104    if (MatchShift && ShiftExtendTy == getShiftExtendType())
1105      return DiagnosticPredicateTy::Match;
1106
1107    return DiagnosticPredicateTy::NearMatch;
1108  }
1109
1110  bool isGPR32as64() const {
1111    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1112      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1113  }
1114
1115  bool isGPR64as32() const {
1116    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1117      AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1118  }
1119
1120  bool isWSeqPair() const {
1121    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1122           AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1123               Reg.RegNum);
1124  }
1125
1126  bool isXSeqPair() const {
1127    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1128           AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1129               Reg.RegNum);
1130  }
1131
1132  template<int64_t Angle, int64_t Remainder>
1133  DiagnosticPredicate isComplexRotation() const {
1134    if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1135
1136    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1137    if (!CE) return DiagnosticPredicateTy::NoMatch;
1138    uint64_t Value = CE->getValue();
1139
1140    if (Value % Angle == Remainder && Value <= 270)
1141      return DiagnosticPredicateTy::Match;
1142    return DiagnosticPredicateTy::NearMatch;
1143  }
1144
1145  template <unsigned RegClassID> bool isGPR64() const {
1146    return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1147           AArch64MCRegisterClasses[RegClassID].contains(getReg());
1148  }
1149
1150  template <unsigned RegClassID, int ExtWidth>
1151  DiagnosticPredicate isGPR64WithShiftExtend() const {
1152    if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1153      return DiagnosticPredicateTy::NoMatch;
1154
1155    if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1156        getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1157      return DiagnosticPredicateTy::Match;
1158    return DiagnosticPredicateTy::NearMatch;
1159  }
1160
1161  /// Is this a vector list with the type implicit (presumably attached to the
1162  /// instruction itself)?
1163  template <RegKind VectorKind, unsigned NumRegs>
1164  bool isImplicitlyTypedVectorList() const {
1165    return Kind == k_VectorList && VectorList.Count == NumRegs &&
1166           VectorList.NumElements == 0 &&
1167           VectorList.RegisterKind == VectorKind;
1168  }
1169
1170  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1171            unsigned ElementWidth>
1172  bool isTypedVectorList() const {
1173    if (Kind != k_VectorList)
1174      return false;
1175    if (VectorList.Count != NumRegs)
1176      return false;
1177    if (VectorList.RegisterKind != VectorKind)
1178      return false;
1179    if (VectorList.ElementWidth != ElementWidth)
1180      return false;
1181    return VectorList.NumElements == NumElements;
1182  }
1183
1184  template <int Min, int Max>
1185  DiagnosticPredicate isVectorIndex() const {
1186    if (Kind != k_VectorIndex)
1187      return DiagnosticPredicateTy::NoMatch;
1188    if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1189      return DiagnosticPredicateTy::Match;
1190    return DiagnosticPredicateTy::NearMatch;
1191  }
1192
1193  bool isToken() const override { return Kind == k_Token; }
1194
1195  bool isTokenEqual(StringRef Str) const {
1196    return Kind == k_Token && getToken() == Str;
1197  }
1198  bool isSysCR() const { return Kind == k_SysCR; }
1199  bool isPrefetch() const { return Kind == k_Prefetch; }
1200  bool isPSBHint() const { return Kind == k_PSBHint; }
1201  bool isBTIHint() const { return Kind == k_BTIHint; }
1202  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1203  bool isShifter() const {
1204    if (!isShiftExtend())
1205      return false;
1206
1207    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1208    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1209            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1210            ST == AArch64_AM::MSL);
1211  }
1212
1213  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1214    if (Kind != k_FPImm)
1215      return DiagnosticPredicateTy::NoMatch;
1216
1217    if (getFPImmIsExact()) {
1218      // Lookup the immediate from table of supported immediates.
1219      auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1220      assert(Desc && "Unknown enum value");
1221
1222      // Calculate its FP value.
1223      APFloat RealVal(APFloat::IEEEdouble());
1224      auto StatusOrErr =
1225          RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1226      if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1227        llvm_unreachable("FP immediate is not exact");
1228
1229      if (getFPImm().bitwiseIsEqual(RealVal))
1230        return DiagnosticPredicateTy::Match;
1231    }
1232
1233    return DiagnosticPredicateTy::NearMatch;
1234  }
1235
1236  template <unsigned ImmA, unsigned ImmB>
1237  DiagnosticPredicate isExactFPImm() const {
1238    DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1239    if ((Res = isExactFPImm<ImmA>()))
1240      return DiagnosticPredicateTy::Match;
1241    if ((Res = isExactFPImm<ImmB>()))
1242      return DiagnosticPredicateTy::Match;
1243    return Res;
1244  }
1245
1246  bool isExtend() const {
1247    if (!isShiftExtend())
1248      return false;
1249
1250    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1251    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1252            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1253            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1254            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1255            ET == AArch64_AM::LSL) &&
1256           getShiftExtendAmount() <= 4;
1257  }
1258
1259  bool isExtend64() const {
1260    if (!isExtend())
1261      return false;
1262    // Make sure the extend expects a 32-bit source register.
1263    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1264    return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1265           ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1266           ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1267  }
1268
1269  bool isExtendLSL64() const {
1270    if (!isExtend())
1271      return false;
1272    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1273    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1274            ET == AArch64_AM::LSL) &&
1275           getShiftExtendAmount() <= 4;
1276  }
1277
1278  template<int Width> bool isMemXExtend() const {
1279    if (!isExtend())
1280      return false;
1281    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1282    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1283           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1284            getShiftExtendAmount() == 0);
1285  }
1286
1287  template<int Width> bool isMemWExtend() const {
1288    if (!isExtend())
1289      return false;
1290    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1291    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1292           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1293            getShiftExtendAmount() == 0);
1294  }
1295
1296  template <unsigned width>
1297  bool isArithmeticShifter() const {
1298    if (!isShifter())
1299      return false;
1300
1301    // An arithmetic shifter is LSL, LSR, or ASR.
1302    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1303    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1304            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1305  }
1306
1307  template <unsigned width>
1308  bool isLogicalShifter() const {
1309    if (!isShifter())
1310      return false;
1311
1312    // A logical shifter is LSL, LSR, ASR or ROR.
1313    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1314    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1315            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1316           getShiftExtendAmount() < width;
1317  }
1318
1319  bool isMovImm32Shifter() const {
1320    if (!isShifter())
1321      return false;
1322
1323    // A MOVi shifter is LSL of 0, 16, 32, or 48.
1324    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1325    if (ST != AArch64_AM::LSL)
1326      return false;
1327    uint64_t Val = getShiftExtendAmount();
1328    return (Val == 0 || Val == 16);
1329  }
1330
1331  bool isMovImm64Shifter() const {
1332    if (!isShifter())
1333      return false;
1334
1335    // A MOVi shifter is LSL of 0 or 16.
1336    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1337    if (ST != AArch64_AM::LSL)
1338      return false;
1339    uint64_t Val = getShiftExtendAmount();
1340    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1341  }
1342
1343  bool isLogicalVecShifter() const {
1344    if (!isShifter())
1345      return false;
1346
1347    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1348    unsigned Shift = getShiftExtendAmount();
1349    return getShiftExtendType() == AArch64_AM::LSL &&
1350           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1351  }
1352
1353  bool isLogicalVecHalfWordShifter() const {
1354    if (!isLogicalVecShifter())
1355      return false;
1356
1357    // A logical vector shifter is a left shift by 0 or 8.
1358    unsigned Shift = getShiftExtendAmount();
1359    return getShiftExtendType() == AArch64_AM::LSL &&
1360           (Shift == 0 || Shift == 8);
1361  }
1362
1363  bool isMoveVecShifter() const {
1364    if (!isShiftExtend())
1365      return false;
1366
1367    // A logical vector shifter is a left shift by 8 or 16.
1368    unsigned Shift = getShiftExtendAmount();
1369    return getShiftExtendType() == AArch64_AM::MSL &&
1370           (Shift == 8 || Shift == 16);
1371  }
1372
1373  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1374  // to LDUR/STUR when the offset is not legal for the former but is for
1375  // the latter. As such, in addition to checking for being a legal unscaled
1376  // address, also check that it is not a legal scaled address. This avoids
1377  // ambiguity in the matcher.
1378  template<int Width>
1379  bool isSImm9OffsetFB() const {
1380    return isSImm<9>() && !isUImm12Offset<Width / 8>();
1381  }
1382
1383  bool isAdrpLabel() const {
1384    // Validation was handled during parsing, so we just sanity check that
1385    // something didn't go haywire.
1386    if (!isImm())
1387        return false;
1388
1389    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1390      int64_t Val = CE->getValue();
1391      int64_t Min = - (4096 * (1LL << (21 - 1)));
1392      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1393      return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1394    }
1395
1396    return true;
1397  }
1398
1399  bool isAdrLabel() const {
1400    // Validation was handled during parsing, so we just sanity check that
1401    // something didn't go haywire.
1402    if (!isImm())
1403        return false;
1404
1405    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1406      int64_t Val = CE->getValue();
1407      int64_t Min = - (1LL << (21 - 1));
1408      int64_t Max = ((1LL << (21 - 1)) - 1);
1409      return Val >= Min && Val <= Max;
1410    }
1411
1412    return true;
1413  }
1414
1415  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1416    // Add as immediates when possible.  Null MCExpr = 0.
1417    if (!Expr)
1418      Inst.addOperand(MCOperand::createImm(0));
1419    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1420      Inst.addOperand(MCOperand::createImm(CE->getValue()));
1421    else
1422      Inst.addOperand(MCOperand::createExpr(Expr));
1423  }
1424
1425  void addRegOperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    Inst.addOperand(MCOperand::createReg(getReg()));
1428  }
1429
1430  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1431    assert(N == 1 && "Invalid number of operands!");
1432    assert(
1433        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1434
1435    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1436    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1437        RI->getEncodingValue(getReg()));
1438
1439    Inst.addOperand(MCOperand::createReg(Reg));
1440  }
1441
1442  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1443    assert(N == 1 && "Invalid number of operands!");
1444    assert(
1445        AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1446
1447    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1448    uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1449        RI->getEncodingValue(getReg()));
1450
1451    Inst.addOperand(MCOperand::createReg(Reg));
1452  }
1453
1454  template <int Width>
1455  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1456    unsigned Base;
1457    switch (Width) {
1458    case 8:   Base = AArch64::B0; break;
1459    case 16:  Base = AArch64::H0; break;
1460    case 32:  Base = AArch64::S0; break;
1461    case 64:  Base = AArch64::D0; break;
1462    case 128: Base = AArch64::Q0; break;
1463    default:
1464      llvm_unreachable("Unsupported width");
1465    }
1466    Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1467  }
1468
1469  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1470    assert(N == 1 && "Invalid number of operands!");
1471    assert(
1472        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1473    Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1474  }
1475
1476  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1477    assert(N == 1 && "Invalid number of operands!");
1478    assert(
1479        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1480    Inst.addOperand(MCOperand::createReg(getReg()));
1481  }
1482
1483  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    Inst.addOperand(MCOperand::createReg(getReg()));
1486  }
1487
1488  enum VecListIndexType {
1489    VecListIdx_DReg = 0,
1490    VecListIdx_QReg = 1,
1491    VecListIdx_ZReg = 2,
1492  };
1493
1494  template <VecListIndexType RegTy, unsigned NumRegs>
1495  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    static const unsigned FirstRegs[][5] = {
1498      /* DReg */ { AArch64::Q0,
1499                   AArch64::D0,       AArch64::D0_D1,
1500                   AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1501      /* QReg */ { AArch64::Q0,
1502                   AArch64::Q0,       AArch64::Q0_Q1,
1503                   AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1504      /* ZReg */ { AArch64::Z0,
1505                   AArch64::Z0,       AArch64::Z0_Z1,
1506                   AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1507    };
1508
1509    assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1510           " NumRegs must be <= 4 for ZRegs");
1511
1512    unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1513    Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1514                                         FirstRegs[(unsigned)RegTy][0]));
1515  }
1516
1517  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1518    assert(N == 1 && "Invalid number of operands!");
1519    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1520  }
1521
1522  template <unsigned ImmIs0, unsigned ImmIs1>
1523  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1524    assert(N == 1 && "Invalid number of operands!");
1525    assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1526    Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1527  }
1528
1529  void addImmOperands(MCInst &Inst, unsigned N) const {
1530    assert(N == 1 && "Invalid number of operands!");
1531    // If this is a pageoff symrefexpr with an addend, adjust the addend
1532    // to be only the page-offset portion. Otherwise, just add the expr
1533    // as-is.
1534    addExpr(Inst, getImm());
1535  }
1536
1537  template <int Shift>
1538  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1539    assert(N == 2 && "Invalid number of operands!");
1540    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1541      Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1542      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1543    } else if (isShiftedImm()) {
1544      addExpr(Inst, getShiftedImmVal());
1545      Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1546    } else {
1547      addExpr(Inst, getImm());
1548      Inst.addOperand(MCOperand::createImm(0));
1549    }
1550  }
1551
1552  template <int Shift>
1553  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1554    assert(N == 2 && "Invalid number of operands!");
1555    if (auto ShiftedVal = getShiftedVal<Shift>()) {
1556      Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1557      Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1558    } else
1559      llvm_unreachable("Not a shifted negative immediate");
1560  }
1561
1562  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1563    assert(N == 1 && "Invalid number of operands!");
1564    Inst.addOperand(MCOperand::createImm(getCondCode()));
1565  }
1566
1567  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1568    assert(N == 1 && "Invalid number of operands!");
1569    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1570    if (!MCE)
1571      addExpr(Inst, getImm());
1572    else
1573      Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1574  }
1575
1576  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1577    addImmOperands(Inst, N);
1578  }
1579
1580  template<int Scale>
1581  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1582    assert(N == 1 && "Invalid number of operands!");
1583    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1584
1585    if (!MCE) {
1586      Inst.addOperand(MCOperand::createExpr(getImm()));
1587      return;
1588    }
1589    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1590  }
1591
1592  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1593    assert(N == 1 && "Invalid number of operands!");
1594    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1595    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1596  }
1597
1598  template <int Scale>
1599  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1600    assert(N == 1 && "Invalid number of operands!");
1601    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1602    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1603  }
1604
1605  template <typename T>
1606  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1607    assert(N == 1 && "Invalid number of operands!");
1608    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1609    typename std::make_unsigned<T>::type Val = MCE->getValue();
1610    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1611    Inst.addOperand(MCOperand::createImm(encoding));
1612  }
1613
1614  template <typename T>
1615  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1616    assert(N == 1 && "Invalid number of operands!");
1617    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1618    typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1619    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1620    Inst.addOperand(MCOperand::createImm(encoding));
1621  }
1622
1623  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1624    assert(N == 1 && "Invalid number of operands!");
1625    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1626    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1627    Inst.addOperand(MCOperand::createImm(encoding));
1628  }
1629
1630  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1631    // Branch operands don't encode the low bits, so shift them off
1632    // here. If it's a label, however, just put it on directly as there's
1633    // not enough information now to do anything.
1634    assert(N == 1 && "Invalid number of operands!");
1635    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1636    if (!MCE) {
1637      addExpr(Inst, getImm());
1638      return;
1639    }
1640    assert(MCE && "Invalid constant immediate operand!");
1641    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1642  }
1643
1644  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1645    // Branch operands don't encode the low bits, so shift them off
1646    // here. If it's a label, however, just put it on directly as there's
1647    // not enough information now to do anything.
1648    assert(N == 1 && "Invalid number of operands!");
1649    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1650    if (!MCE) {
1651      addExpr(Inst, getImm());
1652      return;
1653    }
1654    assert(MCE && "Invalid constant immediate operand!");
1655    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1656  }
1657
1658  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1659    // Branch operands don't encode the low bits, so shift them off
1660    // here. If it's a label, however, just put it on directly as there's
1661    // not enough information now to do anything.
1662    assert(N == 1 && "Invalid number of operands!");
1663    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1664    if (!MCE) {
1665      addExpr(Inst, getImm());
1666      return;
1667    }
1668    assert(MCE && "Invalid constant immediate operand!");
1669    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1670  }
1671
1672  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1673    assert(N == 1 && "Invalid number of operands!");
1674    Inst.addOperand(MCOperand::createImm(
1675        AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1676  }
1677
1678  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1679    assert(N == 1 && "Invalid number of operands!");
1680    Inst.addOperand(MCOperand::createImm(getBarrier()));
1681  }
1682
1683  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1684    assert(N == 1 && "Invalid number of operands!");
1685
1686    Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1687  }
1688
1689  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1690    assert(N == 1 && "Invalid number of operands!");
1691
1692    Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1693  }
1694
1695  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1696    assert(N == 1 && "Invalid number of operands!");
1697
1698    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1699  }
1700
1701  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1702    assert(N == 1 && "Invalid number of operands!");
1703
1704    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1705  }
1706
1707  void addSysCROperands(MCInst &Inst, unsigned N) const {
1708    assert(N == 1 && "Invalid number of operands!");
1709    Inst.addOperand(MCOperand::createImm(getSysCR()));
1710  }
1711
1712  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 1 && "Invalid number of operands!");
1714    Inst.addOperand(MCOperand::createImm(getPrefetch()));
1715  }
1716
1717  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1718    assert(N == 1 && "Invalid number of operands!");
1719    Inst.addOperand(MCOperand::createImm(getPSBHint()));
1720  }
1721
1722  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1723    assert(N == 1 && "Invalid number of operands!");
1724    Inst.addOperand(MCOperand::createImm(getBTIHint()));
1725  }
1726
1727  void addShifterOperands(MCInst &Inst, unsigned N) const {
1728    assert(N == 1 && "Invalid number of operands!");
1729    unsigned Imm =
1730        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1731    Inst.addOperand(MCOperand::createImm(Imm));
1732  }
1733
1734  void addExtendOperands(MCInst &Inst, unsigned N) const {
1735    assert(N == 1 && "Invalid number of operands!");
1736    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1737    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1738    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1739    Inst.addOperand(MCOperand::createImm(Imm));
1740  }
1741
1742  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1743    assert(N == 1 && "Invalid number of operands!");
1744    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1745    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1746    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1747    Inst.addOperand(MCOperand::createImm(Imm));
1748  }
1749
1750  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1751    assert(N == 2 && "Invalid number of operands!");
1752    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1753    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1754    Inst.addOperand(MCOperand::createImm(IsSigned));
1755    Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1756  }
1757
1758  // For 8-bit load/store instructions with a register offset, both the
1759  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1760  // they're disambiguated by whether the shift was explicit or implicit rather
1761  // than its size.
1762  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1763    assert(N == 2 && "Invalid number of operands!");
1764    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1765    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1766    Inst.addOperand(MCOperand::createImm(IsSigned));
1767    Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1768  }
1769
1770  template<int Shift>
1771  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 1 && "Invalid number of operands!");
1773
1774    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1775    uint64_t Value = CE->getValue();
1776    Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1777  }
1778
1779  template<int Shift>
1780  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782
1783    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1784    uint64_t Value = CE->getValue();
1785    Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1786  }
1787
1788  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1791    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1792  }
1793
1794  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 1 && "Invalid number of operands!");
1796    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1797    Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1798  }
1799
1800  void print(raw_ostream &OS) const override;
1801
1802  static std::unique_ptr<AArch64Operand>
1803  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1804    auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1805    Op->Tok.Data = Str.data();
1806    Op->Tok.Length = Str.size();
1807    Op->Tok.IsSuffix = IsSuffix;
1808    Op->StartLoc = S;
1809    Op->EndLoc = S;
1810    return Op;
1811  }
1812
1813  static std::unique_ptr<AArch64Operand>
1814  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1815            RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1816            AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1817            unsigned ShiftAmount = 0,
1818            unsigned HasExplicitAmount = false) {
1819    auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1820    Op->Reg.RegNum = RegNum;
1821    Op->Reg.Kind = Kind;
1822    Op->Reg.ElementWidth = 0;
1823    Op->Reg.EqualityTy = EqTy;
1824    Op->Reg.ShiftExtend.Type = ExtTy;
1825    Op->Reg.ShiftExtend.Amount = ShiftAmount;
1826    Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1827    Op->StartLoc = S;
1828    Op->EndLoc = E;
1829    return Op;
1830  }
1831
1832  static std::unique_ptr<AArch64Operand>
1833  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1834                  SMLoc S, SMLoc E, MCContext &Ctx,
1835                  AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1836                  unsigned ShiftAmount = 0,
1837                  unsigned HasExplicitAmount = false) {
1838    assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1839            Kind == RegKind::SVEPredicateVector) &&
1840           "Invalid vector kind");
1841    auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1842                        HasExplicitAmount);
1843    Op->Reg.ElementWidth = ElementWidth;
1844    return Op;
1845  }
1846
1847  static std::unique_ptr<AArch64Operand>
1848  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1849                   unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1850                   MCContext &Ctx) {
1851    auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1852    Op->VectorList.RegNum = RegNum;
1853    Op->VectorList.Count = Count;
1854    Op->VectorList.NumElements = NumElements;
1855    Op->VectorList.ElementWidth = ElementWidth;
1856    Op->VectorList.RegisterKind = RegisterKind;
1857    Op->StartLoc = S;
1858    Op->EndLoc = E;
1859    return Op;
1860  }
1861
1862  static std::unique_ptr<AArch64Operand>
1863  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1864    auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1865    Op->VectorIndex.Val = Idx;
1866    Op->StartLoc = S;
1867    Op->EndLoc = E;
1868    return Op;
1869  }
1870
1871  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1872                                                   SMLoc E, MCContext &Ctx) {
1873    auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1874    Op->Imm.Val = Val;
1875    Op->StartLoc = S;
1876    Op->EndLoc = E;
1877    return Op;
1878  }
1879
1880  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1881                                                          unsigned ShiftAmount,
1882                                                          SMLoc S, SMLoc E,
1883                                                          MCContext &Ctx) {
1884    auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1885    Op->ShiftedImm .Val = Val;
1886    Op->ShiftedImm.ShiftAmount = ShiftAmount;
1887    Op->StartLoc = S;
1888    Op->EndLoc = E;
1889    return Op;
1890  }
1891
1892  static std::unique_ptr<AArch64Operand>
1893  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1894    auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1895    Op->CondCode.Code = Code;
1896    Op->StartLoc = S;
1897    Op->EndLoc = E;
1898    return Op;
1899  }
1900
1901  static std::unique_ptr<AArch64Operand>
1902  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1903    auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1904    Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1905    Op->FPImm.IsExact = IsExact;
1906    Op->StartLoc = S;
1907    Op->EndLoc = S;
1908    return Op;
1909  }
1910
1911  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1912                                                       StringRef Str,
1913                                                       SMLoc S,
1914                                                       MCContext &Ctx) {
1915    auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1916    Op->Barrier.Val = Val;
1917    Op->Barrier.Data = Str.data();
1918    Op->Barrier.Length = Str.size();
1919    Op->StartLoc = S;
1920    Op->EndLoc = S;
1921    return Op;
1922  }
1923
1924  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1925                                                      uint32_t MRSReg,
1926                                                      uint32_t MSRReg,
1927                                                      uint32_t PStateField,
1928                                                      MCContext &Ctx) {
1929    auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1930    Op->SysReg.Data = Str.data();
1931    Op->SysReg.Length = Str.size();
1932    Op->SysReg.MRSReg = MRSReg;
1933    Op->SysReg.MSRReg = MSRReg;
1934    Op->SysReg.PStateField = PStateField;
1935    Op->StartLoc = S;
1936    Op->EndLoc = S;
1937    return Op;
1938  }
1939
1940  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1941                                                     SMLoc E, MCContext &Ctx) {
1942    auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1943    Op->SysCRImm.Val = Val;
1944    Op->StartLoc = S;
1945    Op->EndLoc = E;
1946    return Op;
1947  }
1948
1949  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1950                                                        StringRef Str,
1951                                                        SMLoc S,
1952                                                        MCContext &Ctx) {
1953    auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1954    Op->Prefetch.Val = Val;
1955    Op->Barrier.Data = Str.data();
1956    Op->Barrier.Length = Str.size();
1957    Op->StartLoc = S;
1958    Op->EndLoc = S;
1959    return Op;
1960  }
1961
1962  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1963                                                       StringRef Str,
1964                                                       SMLoc S,
1965                                                       MCContext &Ctx) {
1966    auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
1967    Op->PSBHint.Val = Val;
1968    Op->PSBHint.Data = Str.data();
1969    Op->PSBHint.Length = Str.size();
1970    Op->StartLoc = S;
1971    Op->EndLoc = S;
1972    return Op;
1973  }
1974
1975  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1976                                                       StringRef Str,
1977                                                       SMLoc S,
1978                                                       MCContext &Ctx) {
1979    auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
1980    Op->BTIHint.Val = Val << 1 | 32;
1981    Op->BTIHint.Data = Str.data();
1982    Op->BTIHint.Length = Str.size();
1983    Op->StartLoc = S;
1984    Op->EndLoc = S;
1985    return Op;
1986  }
1987
1988  static std::unique_ptr<AArch64Operand>
1989  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1990                    bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1991    auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1992    Op->ShiftExtend.Type = ShOp;
1993    Op->ShiftExtend.Amount = Val;
1994    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995    Op->StartLoc = S;
1996    Op->EndLoc = E;
1997    return Op;
1998  }
1999};
2000
2001} // end anonymous namespace.
2002
2003void AArch64Operand::print(raw_ostream &OS) const {
2004  switch (Kind) {
2005  case k_FPImm:
2006    OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2007    if (!getFPImmIsExact())
2008      OS << " (inexact)";
2009    OS << ">";
2010    break;
2011  case k_Barrier: {
2012    StringRef Name = getBarrierName();
2013    if (!Name.empty())
2014      OS << "<barrier " << Name << ">";
2015    else
2016      OS << "<barrier invalid #" << getBarrier() << ">";
2017    break;
2018  }
2019  case k_Immediate:
2020    OS << *getImm();
2021    break;
2022  case k_ShiftedImm: {
2023    unsigned Shift = getShiftedImmShift();
2024    OS << "<shiftedimm ";
2025    OS << *getShiftedImmVal();
2026    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2027    break;
2028  }
2029  case k_CondCode:
2030    OS << "<condcode " << getCondCode() << ">";
2031    break;
2032  case k_VectorList: {
2033    OS << "<vectorlist ";
2034    unsigned Reg = getVectorListStart();
2035    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2036      OS << Reg + i << " ";
2037    OS << ">";
2038    break;
2039  }
2040  case k_VectorIndex:
2041    OS << "<vectorindex " << getVectorIndex() << ">";
2042    break;
2043  case k_SysReg:
2044    OS << "<sysreg: " << getSysReg() << '>';
2045    break;
2046  case k_Token:
2047    OS << "'" << getToken() << "'";
2048    break;
2049  case k_SysCR:
2050    OS << "c" << getSysCR();
2051    break;
2052  case k_Prefetch: {
2053    StringRef Name = getPrefetchName();
2054    if (!Name.empty())
2055      OS << "<prfop " << Name << ">";
2056    else
2057      OS << "<prfop invalid #" << getPrefetch() << ">";
2058    break;
2059  }
2060  case k_PSBHint:
2061    OS << getPSBHintName();
2062    break;
2063  case k_Register:
2064    OS << "<register " << getReg() << ">";
2065    if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2066      break;
2067    LLVM_FALLTHROUGH;
2068  case k_BTIHint:
2069    OS << getBTIHintName();
2070    break;
2071  case k_ShiftExtend:
2072    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2073       << getShiftExtendAmount();
2074    if (!hasShiftExtendAmount())
2075      OS << "<imp>";
2076    OS << '>';
2077    break;
2078  }
2079}
2080
2081/// @name Auto-generated Match Functions
2082/// {
2083
2084static unsigned MatchRegisterName(StringRef Name);
2085
2086/// }
2087
2088static unsigned MatchNeonVectorRegName(StringRef Name) {
2089  return StringSwitch<unsigned>(Name.lower())
2090      .Case("v0", AArch64::Q0)
2091      .Case("v1", AArch64::Q1)
2092      .Case("v2", AArch64::Q2)
2093      .Case("v3", AArch64::Q3)
2094      .Case("v4", AArch64::Q4)
2095      .Case("v5", AArch64::Q5)
2096      .Case("v6", AArch64::Q6)
2097      .Case("v7", AArch64::Q7)
2098      .Case("v8", AArch64::Q8)
2099      .Case("v9", AArch64::Q9)
2100      .Case("v10", AArch64::Q10)
2101      .Case("v11", AArch64::Q11)
2102      .Case("v12", AArch64::Q12)
2103      .Case("v13", AArch64::Q13)
2104      .Case("v14", AArch64::Q14)
2105      .Case("v15", AArch64::Q15)
2106      .Case("v16", AArch64::Q16)
2107      .Case("v17", AArch64::Q17)
2108      .Case("v18", AArch64::Q18)
2109      .Case("v19", AArch64::Q19)
2110      .Case("v20", AArch64::Q20)
2111      .Case("v21", AArch64::Q21)
2112      .Case("v22", AArch64::Q22)
2113      .Case("v23", AArch64::Q23)
2114      .Case("v24", AArch64::Q24)
2115      .Case("v25", AArch64::Q25)
2116      .Case("v26", AArch64::Q26)
2117      .Case("v27", AArch64::Q27)
2118      .Case("v28", AArch64::Q28)
2119      .Case("v29", AArch64::Q29)
2120      .Case("v30", AArch64::Q30)
2121      .Case("v31", AArch64::Q31)
2122      .Default(0);
2123}
2124
2125/// Returns an optional pair of (#elements, element-width) if Suffix
2126/// is a valid vector kind. Where the number of elements in a vector
2127/// or the vector width is implicit or explicitly unknown (but still a
2128/// valid suffix kind), 0 is used.
2129static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2130                                                     RegKind VectorKind) {
2131  std::pair<int, int> Res = {-1, -1};
2132
2133  switch (VectorKind) {
2134  case RegKind::NeonVector:
2135    Res =
2136        StringSwitch<std::pair<int, int>>(Suffix.lower())
2137            .Case("", {0, 0})
2138            .Case(".1d", {1, 64})
2139            .Case(".1q", {1, 128})
2140            // '.2h' needed for fp16 scalar pairwise reductions
2141            .Case(".2h", {2, 16})
2142            .Case(".2s", {2, 32})
2143            .Case(".2d", {2, 64})
2144            // '.4b' is another special case for the ARMv8.2a dot product
2145            // operand
2146            .Case(".4b", {4, 8})
2147            .Case(".4h", {4, 16})
2148            .Case(".4s", {4, 32})
2149            .Case(".8b", {8, 8})
2150            .Case(".8h", {8, 16})
2151            .Case(".16b", {16, 8})
2152            // Accept the width neutral ones, too, for verbose syntax. If those
2153            // aren't used in the right places, the token operand won't match so
2154            // all will work out.
2155            .Case(".b", {0, 8})
2156            .Case(".h", {0, 16})
2157            .Case(".s", {0, 32})
2158            .Case(".d", {0, 64})
2159            .Default({-1, -1});
2160    break;
2161  case RegKind::SVEPredicateVector:
2162  case RegKind::SVEDataVector:
2163    Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2164              .Case("", {0, 0})
2165              .Case(".b", {0, 8})
2166              .Case(".h", {0, 16})
2167              .Case(".s", {0, 32})
2168              .Case(".d", {0, 64})
2169              .Case(".q", {0, 128})
2170              .Default({-1, -1});
2171    break;
2172  default:
2173    llvm_unreachable("Unsupported RegKind");
2174  }
2175
2176  if (Res == std::make_pair(-1, -1))
2177    return Optional<std::pair<int, int>>();
2178
2179  return Optional<std::pair<int, int>>(Res);
2180}
2181
2182static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2183  return parseVectorKind(Suffix, VectorKind).hasValue();
2184}
2185
2186static unsigned matchSVEDataVectorRegName(StringRef Name) {
2187  return StringSwitch<unsigned>(Name.lower())
2188      .Case("z0", AArch64::Z0)
2189      .Case("z1", AArch64::Z1)
2190      .Case("z2", AArch64::Z2)
2191      .Case("z3", AArch64::Z3)
2192      .Case("z4", AArch64::Z4)
2193      .Case("z5", AArch64::Z5)
2194      .Case("z6", AArch64::Z6)
2195      .Case("z7", AArch64::Z7)
2196      .Case("z8", AArch64::Z8)
2197      .Case("z9", AArch64::Z9)
2198      .Case("z10", AArch64::Z10)
2199      .Case("z11", AArch64::Z11)
2200      .Case("z12", AArch64::Z12)
2201      .Case("z13", AArch64::Z13)
2202      .Case("z14", AArch64::Z14)
2203      .Case("z15", AArch64::Z15)
2204      .Case("z16", AArch64::Z16)
2205      .Case("z17", AArch64::Z17)
2206      .Case("z18", AArch64::Z18)
2207      .Case("z19", AArch64::Z19)
2208      .Case("z20", AArch64::Z20)
2209      .Case("z21", AArch64::Z21)
2210      .Case("z22", AArch64::Z22)
2211      .Case("z23", AArch64::Z23)
2212      .Case("z24", AArch64::Z24)
2213      .Case("z25", AArch64::Z25)
2214      .Case("z26", AArch64::Z26)
2215      .Case("z27", AArch64::Z27)
2216      .Case("z28", AArch64::Z28)
2217      .Case("z29", AArch64::Z29)
2218      .Case("z30", AArch64::Z30)
2219      .Case("z31", AArch64::Z31)
2220      .Default(0);
2221}
2222
2223static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2224  return StringSwitch<unsigned>(Name.lower())
2225      .Case("p0", AArch64::P0)
2226      .Case("p1", AArch64::P1)
2227      .Case("p2", AArch64::P2)
2228      .Case("p3", AArch64::P3)
2229      .Case("p4", AArch64::P4)
2230      .Case("p5", AArch64::P5)
2231      .Case("p6", AArch64::P6)
2232      .Case("p7", AArch64::P7)
2233      .Case("p8", AArch64::P8)
2234      .Case("p9", AArch64::P9)
2235      .Case("p10", AArch64::P10)
2236      .Case("p11", AArch64::P11)
2237      .Case("p12", AArch64::P12)
2238      .Case("p13", AArch64::P13)
2239      .Case("p14", AArch64::P14)
2240      .Case("p15", AArch64::P15)
2241      .Default(0);
2242}
2243
2244bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2245                                     SMLoc &EndLoc) {
2246  StartLoc = getLoc();
2247  auto Res = tryParseScalarRegister(RegNo);
2248  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2249  return Res != MatchOperand_Success;
2250}
2251
2252// Matches a register name or register alias previously defined by '.req'
2253unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2254                                                  RegKind Kind) {
2255  unsigned RegNum = 0;
2256  if ((RegNum = matchSVEDataVectorRegName(Name)))
2257    return Kind == RegKind::SVEDataVector ? RegNum : 0;
2258
2259  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2260    return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2261
2262  if ((RegNum = MatchNeonVectorRegName(Name)))
2263    return Kind == RegKind::NeonVector ? RegNum : 0;
2264
2265  // The parsed register must be of RegKind Scalar
2266  if ((RegNum = MatchRegisterName(Name)))
2267    return Kind == RegKind::Scalar ? RegNum : 0;
2268
2269  if (!RegNum) {
2270    // Handle a few common aliases of registers.
2271    if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2272                    .Case("fp", AArch64::FP)
2273                    .Case("lr",  AArch64::LR)
2274                    .Case("x31", AArch64::XZR)
2275                    .Case("w31", AArch64::WZR)
2276                    .Default(0))
2277      return Kind == RegKind::Scalar ? RegNum : 0;
2278
2279    // Check for aliases registered via .req. Canonicalize to lower case.
2280    // That's more consistent since register names are case insensitive, and
2281    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2282    auto Entry = RegisterReqs.find(Name.lower());
2283    if (Entry == RegisterReqs.end())
2284      return 0;
2285
2286    // set RegNum if the match is the right kind of register
2287    if (Kind == Entry->getValue().first)
2288      RegNum = Entry->getValue().second;
2289  }
2290  return RegNum;
2291}
2292
2293/// tryParseScalarRegister - Try to parse a register name. The token must be an
2294/// Identifier when called, and if it is a register name the token is eaten and
2295/// the register is added to the operand list.
2296OperandMatchResultTy
2297AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2298  MCAsmParser &Parser = getParser();
2299  const AsmToken &Tok = Parser.getTok();
2300  if (Tok.isNot(AsmToken::Identifier))
2301    return MatchOperand_NoMatch;
2302
2303  std::string lowerCase = Tok.getString().lower();
2304  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2305  if (Reg == 0)
2306    return MatchOperand_NoMatch;
2307
2308  RegNum = Reg;
2309  Parser.Lex(); // Eat identifier token.
2310  return MatchOperand_Success;
2311}
2312
2313/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2314OperandMatchResultTy
2315AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2316  MCAsmParser &Parser = getParser();
2317  SMLoc S = getLoc();
2318
2319  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2320    Error(S, "Expected cN operand where 0 <= N <= 15");
2321    return MatchOperand_ParseFail;
2322  }
2323
2324  StringRef Tok = Parser.getTok().getIdentifier();
2325  if (Tok[0] != 'c' && Tok[0] != 'C') {
2326    Error(S, "Expected cN operand where 0 <= N <= 15");
2327    return MatchOperand_ParseFail;
2328  }
2329
2330  uint32_t CRNum;
2331  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2332  if (BadNum || CRNum > 15) {
2333    Error(S, "Expected cN operand where 0 <= N <= 15");
2334    return MatchOperand_ParseFail;
2335  }
2336
2337  Parser.Lex(); // Eat identifier token.
2338  Operands.push_back(
2339      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2340  return MatchOperand_Success;
2341}
2342
2343/// tryParsePrefetch - Try to parse a prefetch operand.
2344template <bool IsSVEPrefetch>
2345OperandMatchResultTy
2346AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2347  MCAsmParser &Parser = getParser();
2348  SMLoc S = getLoc();
2349  const AsmToken &Tok = Parser.getTok();
2350
2351  auto LookupByName = [](StringRef N) {
2352    if (IsSVEPrefetch) {
2353      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2354        return Optional<unsigned>(Res->Encoding);
2355    } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2356      return Optional<unsigned>(Res->Encoding);
2357    return Optional<unsigned>();
2358  };
2359
2360  auto LookupByEncoding = [](unsigned E) {
2361    if (IsSVEPrefetch) {
2362      if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2363        return Optional<StringRef>(Res->Name);
2364    } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2365      return Optional<StringRef>(Res->Name);
2366    return Optional<StringRef>();
2367  };
2368  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2369
2370  // Either an identifier for named values or a 5-bit immediate.
2371  // Eat optional hash.
2372  if (parseOptionalToken(AsmToken::Hash) ||
2373      Tok.is(AsmToken::Integer)) {
2374    const MCExpr *ImmVal;
2375    if (getParser().parseExpression(ImmVal))
2376      return MatchOperand_ParseFail;
2377
2378    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2379    if (!MCE) {
2380      TokError("immediate value expected for prefetch operand");
2381      return MatchOperand_ParseFail;
2382    }
2383    unsigned prfop = MCE->getValue();
2384    if (prfop > MaxVal) {
2385      TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2386               "] expected");
2387      return MatchOperand_ParseFail;
2388    }
2389
2390    auto PRFM = LookupByEncoding(MCE->getValue());
2391    Operands.push_back(AArch64Operand::CreatePrefetch(
2392        prfop, PRFM.getValueOr(""), S, getContext()));
2393    return MatchOperand_Success;
2394  }
2395
2396  if (Tok.isNot(AsmToken::Identifier)) {
2397    TokError("prefetch hint expected");
2398    return MatchOperand_ParseFail;
2399  }
2400
2401  auto PRFM = LookupByName(Tok.getString());
2402  if (!PRFM) {
2403    TokError("prefetch hint expected");
2404    return MatchOperand_ParseFail;
2405  }
2406
2407  Parser.Lex(); // Eat identifier token.
2408  Operands.push_back(AArch64Operand::CreatePrefetch(
2409      *PRFM, Tok.getString(), S, getContext()));
2410  return MatchOperand_Success;
2411}
2412
2413/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2414OperandMatchResultTy
2415AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2416  MCAsmParser &Parser = getParser();
2417  SMLoc S = getLoc();
2418  const AsmToken &Tok = Parser.getTok();
2419  if (Tok.isNot(AsmToken::Identifier)) {
2420    TokError("invalid operand for instruction");
2421    return MatchOperand_ParseFail;
2422  }
2423
2424  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2425  if (!PSB) {
2426    TokError("invalid operand for instruction");
2427    return MatchOperand_ParseFail;
2428  }
2429
2430  Parser.Lex(); // Eat identifier token.
2431  Operands.push_back(AArch64Operand::CreatePSBHint(
2432      PSB->Encoding, Tok.getString(), S, getContext()));
2433  return MatchOperand_Success;
2434}
2435
2436/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2437OperandMatchResultTy
2438AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2439  MCAsmParser &Parser = getParser();
2440  SMLoc S = getLoc();
2441  const AsmToken &Tok = Parser.getTok();
2442  if (Tok.isNot(AsmToken::Identifier)) {
2443    TokError("invalid operand for instruction");
2444    return MatchOperand_ParseFail;
2445  }
2446
2447  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2448  if (!BTI) {
2449    TokError("invalid operand for instruction");
2450    return MatchOperand_ParseFail;
2451  }
2452
2453  Parser.Lex(); // Eat identifier token.
2454  Operands.push_back(AArch64Operand::CreateBTIHint(
2455      BTI->Encoding, Tok.getString(), S, getContext()));
2456  return MatchOperand_Success;
2457}
2458
2459/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2460/// instruction.
2461OperandMatchResultTy
2462AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2463  MCAsmParser &Parser = getParser();
2464  SMLoc S = getLoc();
2465  const MCExpr *Expr = nullptr;
2466
2467  if (Parser.getTok().is(AsmToken::Hash)) {
2468    Parser.Lex(); // Eat hash token.
2469  }
2470
2471  if (parseSymbolicImmVal(Expr))
2472    return MatchOperand_ParseFail;
2473
2474  AArch64MCExpr::VariantKind ELFRefKind;
2475  MCSymbolRefExpr::VariantKind DarwinRefKind;
2476  int64_t Addend;
2477  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2478    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2479        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2480      // No modifier was specified at all; this is the syntax for an ELF basic
2481      // ADRP relocation (unfortunately).
2482      Expr =
2483          AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2484    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2485                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2486               Addend != 0) {
2487      Error(S, "gotpage label reference not allowed an addend");
2488      return MatchOperand_ParseFail;
2489    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2490               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2491               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2492               ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2493               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2494               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2495               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2496      // The operand must be an @page or @gotpage qualified symbolref.
2497      Error(S, "page or gotpage label reference expected");
2498      return MatchOperand_ParseFail;
2499    }
2500  }
2501
2502  // We have either a label reference possibly with addend or an immediate. The
2503  // addend is a raw value here. The linker will adjust it to only reference the
2504  // page.
2505  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2506  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2507
2508  return MatchOperand_Success;
2509}
2510
2511/// tryParseAdrLabel - Parse and validate a source label for the ADR
2512/// instruction.
2513OperandMatchResultTy
2514AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2515  SMLoc S = getLoc();
2516  const MCExpr *Expr = nullptr;
2517
2518  // Leave anything with a bracket to the default for SVE
2519  if (getParser().getTok().is(AsmToken::LBrac))
2520    return MatchOperand_NoMatch;
2521
2522  if (getParser().getTok().is(AsmToken::Hash))
2523    getParser().Lex(); // Eat hash token.
2524
2525  if (parseSymbolicImmVal(Expr))
2526    return MatchOperand_ParseFail;
2527
2528  AArch64MCExpr::VariantKind ELFRefKind;
2529  MCSymbolRefExpr::VariantKind DarwinRefKind;
2530  int64_t Addend;
2531  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2532    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2533        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2534      // No modifier was specified at all; this is the syntax for an ELF basic
2535      // ADR relocation (unfortunately).
2536      Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2537    } else {
2538      Error(S, "unexpected adr label");
2539      return MatchOperand_ParseFail;
2540    }
2541  }
2542
2543  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2544  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2545  return MatchOperand_Success;
2546}
2547
2548/// tryParseFPImm - A floating point immediate expression operand.
2549template<bool AddFPZeroAsLiteral>
2550OperandMatchResultTy
2551AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2552  MCAsmParser &Parser = getParser();
2553  SMLoc S = getLoc();
2554
2555  bool Hash = parseOptionalToken(AsmToken::Hash);
2556
2557  // Handle negation, as that still comes through as a separate token.
2558  bool isNegative = parseOptionalToken(AsmToken::Minus);
2559
2560  const AsmToken &Tok = Parser.getTok();
2561  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2562    if (!Hash)
2563      return MatchOperand_NoMatch;
2564    TokError("invalid floating point immediate");
2565    return MatchOperand_ParseFail;
2566  }
2567
2568  // Parse hexadecimal representation.
2569  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2570    if (Tok.getIntVal() > 255 || isNegative) {
2571      TokError("encoded floating point value out of range");
2572      return MatchOperand_ParseFail;
2573    }
2574
2575    APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2576    Operands.push_back(
2577        AArch64Operand::CreateFPImm(F, true, S, getContext()));
2578  } else {
2579    // Parse FP representation.
2580    APFloat RealVal(APFloat::IEEEdouble());
2581    auto StatusOrErr =
2582        RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2583    if (errorToBool(StatusOrErr.takeError())) {
2584      TokError("invalid floating point representation");
2585      return MatchOperand_ParseFail;
2586    }
2587
2588    if (isNegative)
2589      RealVal.changeSign();
2590
2591    if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2592      Operands.push_back(
2593          AArch64Operand::CreateToken("#0", false, S, getContext()));
2594      Operands.push_back(
2595          AArch64Operand::CreateToken(".0", false, S, getContext()));
2596    } else
2597      Operands.push_back(AArch64Operand::CreateFPImm(
2598          RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2599  }
2600
2601  Parser.Lex(); // Eat the token.
2602
2603  return MatchOperand_Success;
2604}
2605
2606/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2607/// a shift suffix, for example '#1, lsl #12'.
2608OperandMatchResultTy
2609AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2610  MCAsmParser &Parser = getParser();
2611  SMLoc S = getLoc();
2612
2613  if (Parser.getTok().is(AsmToken::Hash))
2614    Parser.Lex(); // Eat '#'
2615  else if (Parser.getTok().isNot(AsmToken::Integer))
2616    // Operand should start from # or should be integer, emit error otherwise.
2617    return MatchOperand_NoMatch;
2618
2619  const MCExpr *Imm = nullptr;
2620  if (parseSymbolicImmVal(Imm))
2621    return MatchOperand_ParseFail;
2622  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2623    SMLoc E = Parser.getTok().getLoc();
2624    Operands.push_back(
2625        AArch64Operand::CreateImm(Imm, S, E, getContext()));
2626    return MatchOperand_Success;
2627  }
2628
2629  // Eat ','
2630  Parser.Lex();
2631
2632  // The optional operand must be "lsl #N" where N is non-negative.
2633  if (!Parser.getTok().is(AsmToken::Identifier) ||
2634      !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2635    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2636    return MatchOperand_ParseFail;
2637  }
2638
2639  // Eat 'lsl'
2640  Parser.Lex();
2641
2642  parseOptionalToken(AsmToken::Hash);
2643
2644  if (Parser.getTok().isNot(AsmToken::Integer)) {
2645    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2646    return MatchOperand_ParseFail;
2647  }
2648
2649  int64_t ShiftAmount = Parser.getTok().getIntVal();
2650
2651  if (ShiftAmount < 0) {
2652    Error(Parser.getTok().getLoc(), "positive shift amount required");
2653    return MatchOperand_ParseFail;
2654  }
2655  Parser.Lex(); // Eat the number
2656
2657  // Just in case the optional lsl #0 is used for immediates other than zero.
2658  if (ShiftAmount == 0 && Imm != nullptr) {
2659    SMLoc E = Parser.getTok().getLoc();
2660    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2661    return MatchOperand_Success;
2662  }
2663
2664  SMLoc E = Parser.getTok().getLoc();
2665  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2666                                                      S, E, getContext()));
2667  return MatchOperand_Success;
2668}
2669
2670/// parseCondCodeString - Parse a Condition Code string.
2671AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2672  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2673                    .Case("eq", AArch64CC::EQ)
2674                    .Case("ne", AArch64CC::NE)
2675                    .Case("cs", AArch64CC::HS)
2676                    .Case("hs", AArch64CC::HS)
2677                    .Case("cc", AArch64CC::LO)
2678                    .Case("lo", AArch64CC::LO)
2679                    .Case("mi", AArch64CC::MI)
2680                    .Case("pl", AArch64CC::PL)
2681                    .Case("vs", AArch64CC::VS)
2682                    .Case("vc", AArch64CC::VC)
2683                    .Case("hi", AArch64CC::HI)
2684                    .Case("ls", AArch64CC::LS)
2685                    .Case("ge", AArch64CC::GE)
2686                    .Case("lt", AArch64CC::LT)
2687                    .Case("gt", AArch64CC::GT)
2688                    .Case("le", AArch64CC::LE)
2689                    .Case("al", AArch64CC::AL)
2690                    .Case("nv", AArch64CC::NV)
2691                    .Default(AArch64CC::Invalid);
2692
2693  if (CC == AArch64CC::Invalid &&
2694      getSTI().getFeatureBits()[AArch64::FeatureSVE])
2695    CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2696                    .Case("none",  AArch64CC::EQ)
2697                    .Case("any",   AArch64CC::NE)
2698                    .Case("nlast", AArch64CC::HS)
2699                    .Case("last",  AArch64CC::LO)
2700                    .Case("first", AArch64CC::MI)
2701                    .Case("nfrst", AArch64CC::PL)
2702                    .Case("pmore", AArch64CC::HI)
2703                    .Case("plast", AArch64CC::LS)
2704                    .Case("tcont", AArch64CC::GE)
2705                    .Case("tstop", AArch64CC::LT)
2706                    .Default(AArch64CC::Invalid);
2707
2708  return CC;
2709}
2710
2711/// parseCondCode - Parse a Condition Code operand.
2712bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2713                                     bool invertCondCode) {
2714  MCAsmParser &Parser = getParser();
2715  SMLoc S = getLoc();
2716  const AsmToken &Tok = Parser.getTok();
2717  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2718
2719  StringRef Cond = Tok.getString();
2720  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2721  if (CC == AArch64CC::Invalid)
2722    return TokError("invalid condition code");
2723  Parser.Lex(); // Eat identifier token.
2724
2725  if (invertCondCode) {
2726    if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2727      return TokError("condition codes AL and NV are invalid for this instruction");
2728    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2729  }
2730
2731  Operands.push_back(
2732      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2733  return false;
2734}
2735
2736/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2737/// them if present.
2738OperandMatchResultTy
2739AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2740  MCAsmParser &Parser = getParser();
2741  const AsmToken &Tok = Parser.getTok();
2742  std::string LowerID = Tok.getString().lower();
2743  AArch64_AM::ShiftExtendType ShOp =
2744      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2745          .Case("lsl", AArch64_AM::LSL)
2746          .Case("lsr", AArch64_AM::LSR)
2747          .Case("asr", AArch64_AM::ASR)
2748          .Case("ror", AArch64_AM::ROR)
2749          .Case("msl", AArch64_AM::MSL)
2750          .Case("uxtb", AArch64_AM::UXTB)
2751          .Case("uxth", AArch64_AM::UXTH)
2752          .Case("uxtw", AArch64_AM::UXTW)
2753          .Case("uxtx", AArch64_AM::UXTX)
2754          .Case("sxtb", AArch64_AM::SXTB)
2755          .Case("sxth", AArch64_AM::SXTH)
2756          .Case("sxtw", AArch64_AM::SXTW)
2757          .Case("sxtx", AArch64_AM::SXTX)
2758          .Default(AArch64_AM::InvalidShiftExtend);
2759
2760  if (ShOp == AArch64_AM::InvalidShiftExtend)
2761    return MatchOperand_NoMatch;
2762
2763  SMLoc S = Tok.getLoc();
2764  Parser.Lex();
2765
2766  bool Hash = parseOptionalToken(AsmToken::Hash);
2767
2768  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2769    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2770        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2771        ShOp == AArch64_AM::MSL) {
2772      // We expect a number here.
2773      TokError("expected #imm after shift specifier");
2774      return MatchOperand_ParseFail;
2775    }
2776
2777    // "extend" type operations don't need an immediate, #0 is implicit.
2778    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2779    Operands.push_back(
2780        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2781    return MatchOperand_Success;
2782  }
2783
2784  // Make sure we do actually have a number, identifier or a parenthesized
2785  // expression.
2786  SMLoc E = Parser.getTok().getLoc();
2787  if (!Parser.getTok().is(AsmToken::Integer) &&
2788      !Parser.getTok().is(AsmToken::LParen) &&
2789      !Parser.getTok().is(AsmToken::Identifier)) {
2790    Error(E, "expected integer shift amount");
2791    return MatchOperand_ParseFail;
2792  }
2793
2794  const MCExpr *ImmVal;
2795  if (getParser().parseExpression(ImmVal))
2796    return MatchOperand_ParseFail;
2797
2798  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2799  if (!MCE) {
2800    Error(E, "expected constant '#imm' after shift specifier");
2801    return MatchOperand_ParseFail;
2802  }
2803
2804  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2805  Operands.push_back(AArch64Operand::CreateShiftExtend(
2806      ShOp, MCE->getValue(), true, S, E, getContext()));
2807  return MatchOperand_Success;
2808}
2809
2810static const struct Extension {
2811  const char *Name;
2812  const FeatureBitset Features;
2813} ExtensionMap[] = {
2814    {"crc", {AArch64::FeatureCRC}},
2815    {"sm4", {AArch64::FeatureSM4}},
2816    {"sha3", {AArch64::FeatureSHA3}},
2817    {"sha2", {AArch64::FeatureSHA2}},
2818    {"aes", {AArch64::FeatureAES}},
2819    {"crypto", {AArch64::FeatureCrypto}},
2820    {"fp", {AArch64::FeatureFPARMv8}},
2821    {"simd", {AArch64::FeatureNEON}},
2822    {"ras", {AArch64::FeatureRAS}},
2823    {"lse", {AArch64::FeatureLSE}},
2824    {"predres", {AArch64::FeaturePredRes}},
2825    {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2826    {"mte", {AArch64::FeatureMTE}},
2827    {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2828    {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2829    {"ccpp", {AArch64::FeatureCCPP}},
2830    {"sve", {AArch64::FeatureSVE}},
2831    {"sve2", {AArch64::FeatureSVE2}},
2832    {"sve2-aes", {AArch64::FeatureSVE2AES}},
2833    {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2834    {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2835    {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2836    // FIXME: Unsupported extensions
2837    {"pan", {}},
2838    {"lor", {}},
2839    {"rdma", {}},
2840    {"profile", {}},
2841};
2842
2843static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2844  if (FBS[AArch64::HasV8_1aOps])
2845    Str += "ARMv8.1a";
2846  else if (FBS[AArch64::HasV8_2aOps])
2847    Str += "ARMv8.2a";
2848  else if (FBS[AArch64::HasV8_3aOps])
2849    Str += "ARMv8.3a";
2850  else if (FBS[AArch64::HasV8_4aOps])
2851    Str += "ARMv8.4a";
2852  else if (FBS[AArch64::HasV8_5aOps])
2853    Str += "ARMv8.5a";
2854  else {
2855    auto ext = std::find_if(std::begin(ExtensionMap),
2856      std::end(ExtensionMap),
2857      [&](const Extension& e)
2858      // Use & in case multiple features are enabled
2859      { return (FBS & e.Features) != FeatureBitset(); }
2860    );
2861
2862    Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2863  }
2864}
2865
2866void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2867                                      SMLoc S) {
2868  const uint16_t Op2 = Encoding & 7;
2869  const uint16_t Cm = (Encoding & 0x78) >> 3;
2870  const uint16_t Cn = (Encoding & 0x780) >> 7;
2871  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2872
2873  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2874
2875  Operands.push_back(
2876      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2877  Operands.push_back(
2878      AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2879  Operands.push_back(
2880      AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2881  Expr = MCConstantExpr::create(Op2, getContext());
2882  Operands.push_back(
2883      AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2884}
2885
2886/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2887/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2888bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2889                                   OperandVector &Operands) {
2890  if (Name.find('.') != StringRef::npos)
2891    return TokError("invalid operand");
2892
2893  Mnemonic = Name;
2894  Operands.push_back(
2895      AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2896
2897  MCAsmParser &Parser = getParser();
2898  const AsmToken &Tok = Parser.getTok();
2899  StringRef Op = Tok.getString();
2900  SMLoc S = Tok.getLoc();
2901
2902  if (Mnemonic == "ic") {
2903    const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2904    if (!IC)
2905      return TokError("invalid operand for IC instruction");
2906    else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2907      std::string Str("IC " + std::string(IC->Name) + " requires ");
2908      setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2909      return TokError(Str.c_str());
2910    }
2911    createSysAlias(IC->Encoding, Operands, S);
2912  } else if (Mnemonic == "dc") {
2913    const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2914    if (!DC)
2915      return TokError("invalid operand for DC instruction");
2916    else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2917      std::string Str("DC " + std::string(DC->Name) + " requires ");
2918      setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2919      return TokError(Str.c_str());
2920    }
2921    createSysAlias(DC->Encoding, Operands, S);
2922  } else if (Mnemonic == "at") {
2923    const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2924    if (!AT)
2925      return TokError("invalid operand for AT instruction");
2926    else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2927      std::string Str("AT " + std::string(AT->Name) + " requires ");
2928      setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2929      return TokError(Str.c_str());
2930    }
2931    createSysAlias(AT->Encoding, Operands, S);
2932  } else if (Mnemonic == "tlbi") {
2933    const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2934    if (!TLBI)
2935      return TokError("invalid operand for TLBI instruction");
2936    else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2937      std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2938      setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2939      return TokError(Str.c_str());
2940    }
2941    createSysAlias(TLBI->Encoding, Operands, S);
2942  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2943    const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2944    if (!PRCTX)
2945      return TokError("invalid operand for prediction restriction instruction");
2946    else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2947      std::string Str(
2948          Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2949      setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2950      return TokError(Str.c_str());
2951    }
2952    uint16_t PRCTX_Op2 =
2953      Mnemonic == "cfp" ? 4 :
2954      Mnemonic == "dvp" ? 5 :
2955      Mnemonic == "cpp" ? 7 :
2956      0;
2957    assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2958    createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2959  }
2960
2961  Parser.Lex(); // Eat operand.
2962
2963  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2964  bool HasRegister = false;
2965
2966  // Check for the optional register operand.
2967  if (parseOptionalToken(AsmToken::Comma)) {
2968    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2969      return TokError("expected register operand");
2970    HasRegister = true;
2971  }
2972
2973  if (ExpectRegister && !HasRegister)
2974    return TokError("specified " + Mnemonic + " op requires a register");
2975  else if (!ExpectRegister && HasRegister)
2976    return TokError("specified " + Mnemonic + " op does not use a register");
2977
2978  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2979    return true;
2980
2981  return false;
2982}
2983
2984OperandMatchResultTy
2985AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2986  MCAsmParser &Parser = getParser();
2987  const AsmToken &Tok = Parser.getTok();
2988
2989  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2990    TokError("'csync' operand expected");
2991    return MatchOperand_ParseFail;
2992  // Can be either a #imm style literal or an option name
2993  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2994    // Immediate operand.
2995    const MCExpr *ImmVal;
2996    SMLoc ExprLoc = getLoc();
2997    if (getParser().parseExpression(ImmVal))
2998      return MatchOperand_ParseFail;
2999    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3000    if (!MCE) {
3001      Error(ExprLoc, "immediate value expected for barrier operand");
3002      return MatchOperand_ParseFail;
3003    }
3004    if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3005      Error(ExprLoc, "barrier operand out of range");
3006      return MatchOperand_ParseFail;
3007    }
3008    auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3009    Operands.push_back(AArch64Operand::CreateBarrier(
3010        MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3011    return MatchOperand_Success;
3012  }
3013
3014  if (Tok.isNot(AsmToken::Identifier)) {
3015    TokError("invalid operand for instruction");
3016    return MatchOperand_ParseFail;
3017  }
3018
3019  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3020  // The only valid named option for ISB is 'sy'
3021  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3022  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3023    TokError("'sy' or #imm operand expected");
3024    return MatchOperand_ParseFail;
3025  // The only valid named option for TSB is 'csync'
3026  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3027    TokError("'csync' operand expected");
3028    return MatchOperand_ParseFail;
3029  } else if (!DB && !TSB) {
3030    TokError("invalid barrier option name");
3031    return MatchOperand_ParseFail;
3032  }
3033
3034  Operands.push_back(AArch64Operand::CreateBarrier(
3035      DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3036  Parser.Lex(); // Consume the option
3037
3038  return MatchOperand_Success;
3039}
3040
3041OperandMatchResultTy
3042AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3043  MCAsmParser &Parser = getParser();
3044  const AsmToken &Tok = Parser.getTok();
3045
3046  if (Tok.isNot(AsmToken::Identifier))
3047    return MatchOperand_NoMatch;
3048
3049  int MRSReg, MSRReg;
3050  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3051  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3052    MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3053    MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3054  } else
3055    MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3056
3057  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3058  unsigned PStateImm = -1;
3059  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3060    PStateImm = PState->Encoding;
3061
3062  Operands.push_back(
3063      AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3064                                   PStateImm, getContext()));
3065  Parser.Lex(); // Eat identifier
3066
3067  return MatchOperand_Success;
3068}
3069
3070/// tryParseNeonVectorRegister - Parse a vector register operand.
3071bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3072  MCAsmParser &Parser = getParser();
3073  if (Parser.getTok().isNot(AsmToken::Identifier))
3074    return true;
3075
3076  SMLoc S = getLoc();
3077  // Check for a vector register specifier first.
3078  StringRef Kind;
3079  unsigned Reg;
3080  OperandMatchResultTy Res =
3081      tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3082  if (Res != MatchOperand_Success)
3083    return true;
3084
3085  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3086  if (!KindRes)
3087    return true;
3088
3089  unsigned ElementWidth = KindRes->second;
3090  Operands.push_back(
3091      AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3092                                      S, getLoc(), getContext()));
3093
3094  // If there was an explicit qualifier, that goes on as a literal text
3095  // operand.
3096  if (!Kind.empty())
3097    Operands.push_back(
3098        AArch64Operand::CreateToken(Kind, false, S, getContext()));
3099
3100  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3101}
3102
3103OperandMatchResultTy
3104AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3105  SMLoc SIdx = getLoc();
3106  if (parseOptionalToken(AsmToken::LBrac)) {
3107    const MCExpr *ImmVal;
3108    if (getParser().parseExpression(ImmVal))
3109      return MatchOperand_NoMatch;
3110    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3111    if (!MCE) {
3112      TokError("immediate value expected for vector index");
3113      return MatchOperand_ParseFail;;
3114    }
3115
3116    SMLoc E = getLoc();
3117
3118    if (parseToken(AsmToken::RBrac, "']' expected"))
3119      return MatchOperand_ParseFail;;
3120
3121    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3122                                                         E, getContext()));
3123    return MatchOperand_Success;
3124  }
3125
3126  return MatchOperand_NoMatch;
3127}
3128
3129// tryParseVectorRegister - Try to parse a vector register name with
3130// optional kind specifier. If it is a register specifier, eat the token
3131// and return it.
3132OperandMatchResultTy
3133AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3134                                         RegKind MatchKind) {
3135  MCAsmParser &Parser = getParser();
3136  const AsmToken &Tok = Parser.getTok();
3137
3138  if (Tok.isNot(AsmToken::Identifier))
3139    return MatchOperand_NoMatch;
3140
3141  StringRef Name = Tok.getString();
3142  // If there is a kind specifier, it's separated from the register name by
3143  // a '.'.
3144  size_t Start = 0, Next = Name.find('.');
3145  StringRef Head = Name.slice(Start, Next);
3146  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3147
3148  if (RegNum) {
3149    if (Next != StringRef::npos) {
3150      Kind = Name.slice(Next, StringRef::npos);
3151      if (!isValidVectorKind(Kind, MatchKind)) {
3152        TokError("invalid vector kind qualifier");
3153        return MatchOperand_ParseFail;
3154      }
3155    }
3156    Parser.Lex(); // Eat the register token.
3157
3158    Reg = RegNum;
3159    return MatchOperand_Success;
3160  }
3161
3162  return MatchOperand_NoMatch;
3163}
3164
3165/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3166OperandMatchResultTy
3167AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3168  // Check for a SVE predicate register specifier first.
3169  const SMLoc S = getLoc();
3170  StringRef Kind;
3171  unsigned RegNum;
3172  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3173  if (Res != MatchOperand_Success)
3174    return Res;
3175
3176  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3177  if (!KindRes)
3178    return MatchOperand_NoMatch;
3179
3180  unsigned ElementWidth = KindRes->second;
3181  Operands.push_back(AArch64Operand::CreateVectorReg(
3182      RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3183      getLoc(), getContext()));
3184
3185  // Not all predicates are followed by a '/m' or '/z'.
3186  MCAsmParser &Parser = getParser();
3187  if (Parser.getTok().isNot(AsmToken::Slash))
3188    return MatchOperand_Success;
3189
3190  // But when they do they shouldn't have an element type suffix.
3191  if (!Kind.empty()) {
3192    Error(S, "not expecting size suffix");
3193    return MatchOperand_ParseFail;
3194  }
3195
3196  // Add a literal slash as operand
3197  Operands.push_back(
3198      AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3199
3200  Parser.Lex(); // Eat the slash.
3201
3202  // Zeroing or merging?
3203  auto Pred = Parser.getTok().getString().lower();
3204  if (Pred != "z" && Pred != "m") {
3205    Error(getLoc(), "expecting 'm' or 'z' predication");
3206    return MatchOperand_ParseFail;
3207  }
3208
3209  // Add zero/merge token.
3210  const char *ZM = Pred == "z" ? "z" : "m";
3211  Operands.push_back(
3212    AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3213
3214  Parser.Lex(); // Eat zero/merge token.
3215  return MatchOperand_Success;
3216}
3217
3218/// parseRegister - Parse a register operand.
3219bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3220  // Try for a Neon vector register.
3221  if (!tryParseNeonVectorRegister(Operands))
3222    return false;
3223
3224  // Otherwise try for a scalar register.
3225  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3226    return false;
3227
3228  return true;
3229}
3230
3231bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3232  MCAsmParser &Parser = getParser();
3233  bool HasELFModifier = false;
3234  AArch64MCExpr::VariantKind RefKind;
3235
3236  if (parseOptionalToken(AsmToken::Colon)) {
3237    HasELFModifier = true;
3238
3239    if (Parser.getTok().isNot(AsmToken::Identifier))
3240      return TokError("expect relocation specifier in operand after ':'");
3241
3242    std::string LowerCase = Parser.getTok().getIdentifier().lower();
3243    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3244                  .Case("lo12", AArch64MCExpr::VK_LO12)
3245                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3246                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3247                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3248                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3249                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3250                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3251                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3252                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3253                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3254                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3255                  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3256                  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3257                  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3258                  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3259                  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3260                  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3261                  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3262                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3263                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3264                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3265                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3266                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3267                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3268                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3269                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3270                  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3271                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3272                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3273                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3274                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3275                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3276                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3277                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3278                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3279                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3280                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3281                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3282                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3283                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3284                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3285                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3286                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3287                  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3288                  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3289                  .Default(AArch64MCExpr::VK_INVALID);
3290
3291    if (RefKind == AArch64MCExpr::VK_INVALID)
3292      return TokError("expect relocation specifier in operand after ':'");
3293
3294    Parser.Lex(); // Eat identifier
3295
3296    if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3297      return true;
3298  }
3299
3300  if (getParser().parseExpression(ImmVal))
3301    return true;
3302
3303  if (HasELFModifier)
3304    ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3305
3306  return false;
3307}
3308
3309template <RegKind VectorKind>
3310OperandMatchResultTy
3311AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3312                                     bool ExpectMatch) {
3313  MCAsmParser &Parser = getParser();
3314  if (!Parser.getTok().is(AsmToken::LCurly))
3315    return MatchOperand_NoMatch;
3316
3317  // Wrapper around parse function
3318  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3319                                     bool NoMatchIsError) {
3320    auto RegTok = Parser.getTok();
3321    auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3322    if (ParseRes == MatchOperand_Success) {
3323      if (parseVectorKind(Kind, VectorKind))
3324        return ParseRes;
3325      llvm_unreachable("Expected a valid vector kind");
3326    }
3327
3328    if (RegTok.isNot(AsmToken::Identifier) ||
3329        ParseRes == MatchOperand_ParseFail ||
3330        (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3331      Error(Loc, "vector register expected");
3332      return MatchOperand_ParseFail;
3333    }
3334
3335    return MatchOperand_NoMatch;
3336  };
3337
3338  SMLoc S = getLoc();
3339  auto LCurly = Parser.getTok();
3340  Parser.Lex(); // Eat left bracket token.
3341
3342  StringRef Kind;
3343  unsigned FirstReg;
3344  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3345
3346  // Put back the original left bracket if there was no match, so that
3347  // different types of list-operands can be matched (e.g. SVE, Neon).
3348  if (ParseRes == MatchOperand_NoMatch)
3349    Parser.getLexer().UnLex(LCurly);
3350
3351  if (ParseRes != MatchOperand_Success)
3352    return ParseRes;
3353
3354  int64_t PrevReg = FirstReg;
3355  unsigned Count = 1;
3356
3357  if (parseOptionalToken(AsmToken::Minus)) {
3358    SMLoc Loc = getLoc();
3359    StringRef NextKind;
3360
3361    unsigned Reg;
3362    ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3363    if (ParseRes != MatchOperand_Success)
3364      return ParseRes;
3365
3366    // Any Kind suffices must match on all regs in the list.
3367    if (Kind != NextKind) {
3368      Error(Loc, "mismatched register size suffix");
3369      return MatchOperand_ParseFail;
3370    }
3371
3372    unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3373
3374    if (Space == 0 || Space > 3) {
3375      Error(Loc, "invalid number of vectors");
3376      return MatchOperand_ParseFail;
3377    }
3378
3379    Count += Space;
3380  }
3381  else {
3382    while (parseOptionalToken(AsmToken::Comma)) {
3383      SMLoc Loc = getLoc();
3384      StringRef NextKind;
3385      unsigned Reg;
3386      ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3387      if (ParseRes != MatchOperand_Success)
3388        return ParseRes;
3389
3390      // Any Kind suffices must match on all regs in the list.
3391      if (Kind != NextKind) {
3392        Error(Loc, "mismatched register size suffix");
3393        return MatchOperand_ParseFail;
3394      }
3395
3396      // Registers must be incremental (with wraparound at 31)
3397      if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3398          (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3399        Error(Loc, "registers must be sequential");
3400        return MatchOperand_ParseFail;
3401      }
3402
3403      PrevReg = Reg;
3404      ++Count;
3405    }
3406  }
3407
3408  if (parseToken(AsmToken::RCurly, "'}' expected"))
3409    return MatchOperand_ParseFail;
3410
3411  if (Count > 4) {
3412    Error(S, "invalid number of vectors");
3413    return MatchOperand_ParseFail;
3414  }
3415
3416  unsigned NumElements = 0;
3417  unsigned ElementWidth = 0;
3418  if (!Kind.empty()) {
3419    if (const auto &VK = parseVectorKind(Kind, VectorKind))
3420      std::tie(NumElements, ElementWidth) = *VK;
3421  }
3422
3423  Operands.push_back(AArch64Operand::CreateVectorList(
3424      FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3425      getContext()));
3426
3427  return MatchOperand_Success;
3428}
3429
3430/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3431bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3432  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3433  if (ParseRes != MatchOperand_Success)
3434    return true;
3435
3436  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3437}
3438
3439OperandMatchResultTy
3440AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3441  SMLoc StartLoc = getLoc();
3442
3443  unsigned RegNum;
3444  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3445  if (Res != MatchOperand_Success)
3446    return Res;
3447
3448  if (!parseOptionalToken(AsmToken::Comma)) {
3449    Operands.push_back(AArch64Operand::CreateReg(
3450        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3451    return MatchOperand_Success;
3452  }
3453
3454  parseOptionalToken(AsmToken::Hash);
3455
3456  if (getParser().getTok().isNot(AsmToken::Integer)) {
3457    Error(getLoc(), "index must be absent or #0");
3458    return MatchOperand_ParseFail;
3459  }
3460
3461  const MCExpr *ImmVal;
3462  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3463      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3464    Error(getLoc(), "index must be absent or #0");
3465    return MatchOperand_ParseFail;
3466  }
3467
3468  Operands.push_back(AArch64Operand::CreateReg(
3469      RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3470  return MatchOperand_Success;
3471}
3472
3473template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3474OperandMatchResultTy
3475AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3476  SMLoc StartLoc = getLoc();
3477
3478  unsigned RegNum;
3479  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3480  if (Res != MatchOperand_Success)
3481    return Res;
3482
3483  // No shift/extend is the default.
3484  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3485    Operands.push_back(AArch64Operand::CreateReg(
3486        RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3487    return MatchOperand_Success;
3488  }
3489
3490  // Eat the comma
3491  getParser().Lex();
3492
3493  // Match the shift
3494  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3495  Res = tryParseOptionalShiftExtend(ExtOpnd);
3496  if (Res != MatchOperand_Success)
3497    return Res;
3498
3499  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3500  Operands.push_back(AArch64Operand::CreateReg(
3501      RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3502      Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3503      Ext->hasShiftExtendAmount()));
3504
3505  return MatchOperand_Success;
3506}
3507
3508bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3509  MCAsmParser &Parser = getParser();
3510
3511  // Some SVE instructions have a decoration after the immediate, i.e.
3512  // "mul vl". We parse them here and add tokens, which must be present in the
3513  // asm string in the tablegen instruction.
3514  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3515  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3516  if (!Parser.getTok().getString().equals_lower("mul") ||
3517      !(NextIsVL || NextIsHash))
3518    return true;
3519
3520  Operands.push_back(
3521    AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3522  Parser.Lex(); // Eat the "mul"
3523
3524  if (NextIsVL) {
3525    Operands.push_back(
3526        AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3527    Parser.Lex(); // Eat the "vl"
3528    return false;
3529  }
3530
3531  if (NextIsHash) {
3532    Parser.Lex(); // Eat the #
3533    SMLoc S = getLoc();
3534
3535    // Parse immediate operand.
3536    const MCExpr *ImmVal;
3537    if (!Parser.parseExpression(ImmVal))
3538      if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3539        Operands.push_back(AArch64Operand::CreateImm(
3540            MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3541            getContext()));
3542        return MatchOperand_Success;
3543      }
3544  }
3545
3546  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3547}
3548
3549/// parseOperand - Parse a arm instruction operand.  For now this parses the
3550/// operand regardless of the mnemonic.
3551bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3552                                  bool invertCondCode) {
3553  MCAsmParser &Parser = getParser();
3554
3555  OperandMatchResultTy ResTy =
3556      MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3557
3558  // Check if the current operand has a custom associated parser, if so, try to
3559  // custom parse the operand, or fallback to the general approach.
3560  if (ResTy == MatchOperand_Success)
3561    return false;
3562  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3563  // there was a match, but an error occurred, in which case, just return that
3564  // the operand parsing failed.
3565  if (ResTy == MatchOperand_ParseFail)
3566    return true;
3567
3568  // Nothing custom, so do general case parsing.
3569  SMLoc S, E;
3570  switch (getLexer().getKind()) {
3571  default: {
3572    SMLoc S = getLoc();
3573    const MCExpr *Expr;
3574    if (parseSymbolicImmVal(Expr))
3575      return Error(S, "invalid operand");
3576
3577    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3578    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3579    return false;
3580  }
3581  case AsmToken::LBrac: {
3582    SMLoc Loc = Parser.getTok().getLoc();
3583    Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3584                                                   getContext()));
3585    Parser.Lex(); // Eat '['
3586
3587    // There's no comma after a '[', so we can parse the next operand
3588    // immediately.
3589    return parseOperand(Operands, false, false);
3590  }
3591  case AsmToken::LCurly:
3592    return parseNeonVectorList(Operands);
3593  case AsmToken::Identifier: {
3594    // If we're expecting a Condition Code operand, then just parse that.
3595    if (isCondCode)
3596      return parseCondCode(Operands, invertCondCode);
3597
3598    // If it's a register name, parse it.
3599    if (!parseRegister(Operands))
3600      return false;
3601
3602    // See if this is a "mul vl" decoration or "mul #<int>" operand used
3603    // by SVE instructions.
3604    if (!parseOptionalMulOperand(Operands))
3605      return false;
3606
3607    // This could be an optional "shift" or "extend" operand.
3608    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3609    // We can only continue if no tokens were eaten.
3610    if (GotShift != MatchOperand_NoMatch)
3611      return GotShift;
3612
3613    // This was not a register so parse other operands that start with an
3614    // identifier (like labels) as expressions and create them as immediates.
3615    const MCExpr *IdVal;
3616    S = getLoc();
3617    if (getParser().parseExpression(IdVal))
3618      return true;
3619    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3620    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3621    return false;
3622  }
3623  case AsmToken::Integer:
3624  case AsmToken::Real:
3625  case AsmToken::Hash: {
3626    // #42 -> immediate.
3627    S = getLoc();
3628
3629    parseOptionalToken(AsmToken::Hash);
3630
3631    // Parse a negative sign
3632    bool isNegative = false;
3633    if (Parser.getTok().is(AsmToken::Minus)) {
3634      isNegative = true;
3635      // We need to consume this token only when we have a Real, otherwise
3636      // we let parseSymbolicImmVal take care of it
3637      if (Parser.getLexer().peekTok().is(AsmToken::Real))
3638        Parser.Lex();
3639    }
3640
3641    // The only Real that should come through here is a literal #0.0 for
3642    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3643    // so convert the value.
3644    const AsmToken &Tok = Parser.getTok();
3645    if (Tok.is(AsmToken::Real)) {
3646      APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3647      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3648      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3649          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3650          Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3651        return TokError("unexpected floating point literal");
3652      else if (IntVal != 0 || isNegative)
3653        return TokError("expected floating-point constant #0.0");
3654      Parser.Lex(); // Eat the token.
3655
3656      Operands.push_back(
3657          AArch64Operand::CreateToken("#0", false, S, getContext()));
3658      Operands.push_back(
3659          AArch64Operand::CreateToken(".0", false, S, getContext()));
3660      return false;
3661    }
3662
3663    const MCExpr *ImmVal;
3664    if (parseSymbolicImmVal(ImmVal))
3665      return true;
3666
3667    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3668    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3669    return false;
3670  }
3671  case AsmToken::Equal: {
3672    SMLoc Loc = getLoc();
3673    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3674      return TokError("unexpected token in operand");
3675    Parser.Lex(); // Eat '='
3676    const MCExpr *SubExprVal;
3677    if (getParser().parseExpression(SubExprVal))
3678      return true;
3679
3680    if (Operands.size() < 2 ||
3681        !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3682      return Error(Loc, "Only valid when first operand is register");
3683
3684    bool IsXReg =
3685        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3686            Operands[1]->getReg());
3687
3688    MCContext& Ctx = getContext();
3689    E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3690    // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3691    if (isa<MCConstantExpr>(SubExprVal)) {
3692      uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3693      uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3694      while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3695        ShiftAmt += 16;
3696        Imm >>= 16;
3697      }
3698      if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3699          Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3700          Operands.push_back(AArch64Operand::CreateImm(
3701                     MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3702        if (ShiftAmt)
3703          Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3704                     ShiftAmt, true, S, E, Ctx));
3705        return false;
3706      }
3707      APInt Simm = APInt(64, Imm << ShiftAmt);
3708      // check if the immediate is an unsigned or signed 32-bit int for W regs
3709      if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3710        return Error(Loc, "Immediate too large for register");
3711    }
3712    // If it is a label or an imm that cannot fit in a movz, put it into CP.
3713    const MCExpr *CPLoc =
3714        getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3715    Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3716    return false;
3717  }
3718  }
3719}
3720
3721bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3722                                 const MCParsedAsmOperand &Op2) const {
3723  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3724  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3725  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3726      AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3727    return MCTargetAsmParser::regsEqual(Op1, Op2);
3728
3729  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3730         "Testing equality of non-scalar registers not supported");
3731
3732  // Check if a registers match their sub/super register classes.
3733  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3734    return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3735  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3736    return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3737  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3738    return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3739  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3740    return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3741
3742  return false;
3743}
3744
3745/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3746/// operands.
3747bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3748                                        StringRef Name, SMLoc NameLoc,
3749                                        OperandVector &Operands) {
3750  MCAsmParser &Parser = getParser();
3751  Name = StringSwitch<StringRef>(Name.lower())
3752             .Case("beq", "b.eq")
3753             .Case("bne", "b.ne")
3754             .Case("bhs", "b.hs")
3755             .Case("bcs", "b.cs")
3756             .Case("blo", "b.lo")
3757             .Case("bcc", "b.cc")
3758             .Case("bmi", "b.mi")
3759             .Case("bpl", "b.pl")
3760             .Case("bvs", "b.vs")
3761             .Case("bvc", "b.vc")
3762             .Case("bhi", "b.hi")
3763             .Case("bls", "b.ls")
3764             .Case("bge", "b.ge")
3765             .Case("blt", "b.lt")
3766             .Case("bgt", "b.gt")
3767             .Case("ble", "b.le")
3768             .Case("bal", "b.al")
3769             .Case("bnv", "b.nv")
3770             .Default(Name);
3771
3772  // First check for the AArch64-specific .req directive.
3773  if (Parser.getTok().is(AsmToken::Identifier) &&
3774      Parser.getTok().getIdentifier() == ".req") {
3775    parseDirectiveReq(Name, NameLoc);
3776    // We always return 'error' for this, as we're done with this
3777    // statement and don't need to match the 'instruction."
3778    return true;
3779  }
3780
3781  // Create the leading tokens for the mnemonic, split by '.' characters.
3782  size_t Start = 0, Next = Name.find('.');
3783  StringRef Head = Name.slice(Start, Next);
3784
3785  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3786  // the SYS instruction.
3787  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3788      Head == "cfp" || Head == "dvp" || Head == "cpp")
3789    return parseSysAlias(Head, NameLoc, Operands);
3790
3791  Operands.push_back(
3792      AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3793  Mnemonic = Head;
3794
3795  // Handle condition codes for a branch mnemonic
3796  if (Head == "b" && Next != StringRef::npos) {
3797    Start = Next;
3798    Next = Name.find('.', Start + 1);
3799    Head = Name.slice(Start + 1, Next);
3800
3801    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3802                                            (Head.data() - Name.data()));
3803    AArch64CC::CondCode CC = parseCondCodeString(Head);
3804    if (CC == AArch64CC::Invalid)
3805      return Error(SuffixLoc, "invalid condition code");
3806    Operands.push_back(
3807        AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3808    Operands.push_back(
3809        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3810  }
3811
3812  // Add the remaining tokens in the mnemonic.
3813  while (Next != StringRef::npos) {
3814    Start = Next;
3815    Next = Name.find('.', Start + 1);
3816    Head = Name.slice(Start, Next);
3817    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3818                                            (Head.data() - Name.data()) + 1);
3819    Operands.push_back(
3820        AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3821  }
3822
3823  // Conditional compare instructions have a Condition Code operand, which needs
3824  // to be parsed and an immediate operand created.
3825  bool condCodeFourthOperand =
3826      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3827       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3828       Head == "csinc" || Head == "csinv" || Head == "csneg");
3829
3830  // These instructions are aliases to some of the conditional select
3831  // instructions. However, the condition code is inverted in the aliased
3832  // instruction.
3833  //
3834  // FIXME: Is this the correct way to handle these? Or should the parser
3835  //        generate the aliased instructions directly?
3836  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3837  bool condCodeThirdOperand =
3838      (Head == "cinc" || Head == "cinv" || Head == "cneg");
3839
3840  // Read the remaining operands.
3841  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3842
3843    unsigned N = 1;
3844    do {
3845      // Parse and remember the operand.
3846      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3847                                     (N == 3 && condCodeThirdOperand) ||
3848                                     (N == 2 && condCodeSecondOperand),
3849                       condCodeSecondOperand || condCodeThirdOperand)) {
3850        return true;
3851      }
3852
3853      // After successfully parsing some operands there are two special cases to
3854      // consider (i.e. notional operands not separated by commas). Both are due
3855      // to memory specifiers:
3856      //  + An RBrac will end an address for load/store/prefetch
3857      //  + An '!' will indicate a pre-indexed operation.
3858      //
3859      // It's someone else's responsibility to make sure these tokens are sane
3860      // in the given context!
3861
3862      SMLoc RLoc = Parser.getTok().getLoc();
3863      if (parseOptionalToken(AsmToken::RBrac))
3864        Operands.push_back(
3865            AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3866      SMLoc ELoc = Parser.getTok().getLoc();
3867      if (parseOptionalToken(AsmToken::Exclaim))
3868        Operands.push_back(
3869            AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3870
3871      ++N;
3872    } while (parseOptionalToken(AsmToken::Comma));
3873  }
3874
3875  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3876    return true;
3877
3878  return false;
3879}
3880
3881static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3882  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3883  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3884         (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3885         (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3886         (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3887         (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3888         (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3889}
3890
3891// FIXME: This entire function is a giant hack to provide us with decent
3892// operand range validation/diagnostics until TableGen/MC can be extended
3893// to support autogeneration of this kind of validation.
3894bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3895                                           SmallVectorImpl<SMLoc> &Loc) {
3896  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3897  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3898
3899  // A prefix only applies to the instruction following it.  Here we extract
3900  // prefix information for the next instruction before validating the current
3901  // one so that in the case of failure we don't erronously continue using the
3902  // current prefix.
3903  PrefixInfo Prefix = NextPrefix;
3904  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3905
3906  // Before validating the instruction in isolation we run through the rules
3907  // applicable when it follows a prefix instruction.
3908  // NOTE: brk & hlt can be prefixed but require no additional validation.
3909  if (Prefix.isActive() &&
3910      (Inst.getOpcode() != AArch64::BRK) &&
3911      (Inst.getOpcode() != AArch64::HLT)) {
3912
3913    // Prefixed intructions must have a destructive operand.
3914    if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3915        AArch64::NotDestructive)
3916      return Error(IDLoc, "instruction is unpredictable when following a"
3917                   " movprfx, suggest replacing movprfx with mov");
3918
3919    // Destination operands must match.
3920    if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3921      return Error(Loc[0], "instruction is unpredictable when following a"
3922                   " movprfx writing to a different destination");
3923
3924    // Destination operand must not be used in any other location.
3925    for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3926      if (Inst.getOperand(i).isReg() &&
3927          (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3928          isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3929        return Error(Loc[0], "instruction is unpredictable when following a"
3930                     " movprfx and destination also used as non-destructive"
3931                     " source");
3932    }
3933
3934    auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3935    if (Prefix.isPredicated()) {
3936      int PgIdx = -1;
3937
3938      // Find the instructions general predicate.
3939      for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3940        if (Inst.getOperand(i).isReg() &&
3941            PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3942          PgIdx = i;
3943          break;
3944        }
3945
3946      // Instruction must be predicated if the movprfx is predicated.
3947      if (PgIdx == -1 ||
3948          (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3949        return Error(IDLoc, "instruction is unpredictable when following a"
3950                     " predicated movprfx, suggest using unpredicated movprfx");
3951
3952      // Instruction must use same general predicate as the movprfx.
3953      if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3954        return Error(IDLoc, "instruction is unpredictable when following a"
3955                     " predicated movprfx using a different general predicate");
3956
3957      // Instruction element type must match the movprfx.
3958      if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3959        return Error(IDLoc, "instruction is unpredictable when following a"
3960                     " predicated movprfx with a different element size");
3961    }
3962  }
3963
3964  // Check for indexed addressing modes w/ the base register being the
3965  // same as a destination/source register or pair load where
3966  // the Rt == Rt2. All of those are undefined behaviour.
3967  switch (Inst.getOpcode()) {
3968  case AArch64::LDPSWpre:
3969  case AArch64::LDPWpost:
3970  case AArch64::LDPWpre:
3971  case AArch64::LDPXpost:
3972  case AArch64::LDPXpre: {
3973    unsigned Rt = Inst.getOperand(1).getReg();
3974    unsigned Rt2 = Inst.getOperand(2).getReg();
3975    unsigned Rn = Inst.getOperand(3).getReg();
3976    if (RI->isSubRegisterEq(Rn, Rt))
3977      return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3978                           "is also a destination");
3979    if (RI->isSubRegisterEq(Rn, Rt2))
3980      return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3981                           "is also a destination");
3982    LLVM_FALLTHROUGH;
3983  }
3984  case AArch64::LDPDi:
3985  case AArch64::LDPQi:
3986  case AArch64::LDPSi:
3987  case AArch64::LDPSWi:
3988  case AArch64::LDPWi:
3989  case AArch64::LDPXi: {
3990    unsigned Rt = Inst.getOperand(0).getReg();
3991    unsigned Rt2 = Inst.getOperand(1).getReg();
3992    if (Rt == Rt2)
3993      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3994    break;
3995  }
3996  case AArch64::LDPDpost:
3997  case AArch64::LDPDpre:
3998  case AArch64::LDPQpost:
3999  case AArch64::LDPQpre:
4000  case AArch64::LDPSpost:
4001  case AArch64::LDPSpre:
4002  case AArch64::LDPSWpost: {
4003    unsigned Rt = Inst.getOperand(1).getReg();
4004    unsigned Rt2 = Inst.getOperand(2).getReg();
4005    if (Rt == Rt2)
4006      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4007    break;
4008  }
4009  case AArch64::STPDpost:
4010  case AArch64::STPDpre:
4011  case AArch64::STPQpost:
4012  case AArch64::STPQpre:
4013  case AArch64::STPSpost:
4014  case AArch64::STPSpre:
4015  case AArch64::STPWpost:
4016  case AArch64::STPWpre:
4017  case AArch64::STPXpost:
4018  case AArch64::STPXpre: {
4019    unsigned Rt = Inst.getOperand(1).getReg();
4020    unsigned Rt2 = Inst.getOperand(2).getReg();
4021    unsigned Rn = Inst.getOperand(3).getReg();
4022    if (RI->isSubRegisterEq(Rn, Rt))
4023      return Error(Loc[0], "unpredictable STP instruction, writeback base "
4024                           "is also a source");
4025    if (RI->isSubRegisterEq(Rn, Rt2))
4026      return Error(Loc[1], "unpredictable STP instruction, writeback base "
4027                           "is also a source");
4028    break;
4029  }
4030  case AArch64::LDRBBpre:
4031  case AArch64::LDRBpre:
4032  case AArch64::LDRHHpre:
4033  case AArch64::LDRHpre:
4034  case AArch64::LDRSBWpre:
4035  case AArch64::LDRSBXpre:
4036  case AArch64::LDRSHWpre:
4037  case AArch64::LDRSHXpre:
4038  case AArch64::LDRSWpre:
4039  case AArch64::LDRWpre:
4040  case AArch64::LDRXpre:
4041  case AArch64::LDRBBpost:
4042  case AArch64::LDRBpost:
4043  case AArch64::LDRHHpost:
4044  case AArch64::LDRHpost:
4045  case AArch64::LDRSBWpost:
4046  case AArch64::LDRSBXpost:
4047  case AArch64::LDRSHWpost:
4048  case AArch64::LDRSHXpost:
4049  case AArch64::LDRSWpost:
4050  case AArch64::LDRWpost:
4051  case AArch64::LDRXpost: {
4052    unsigned Rt = Inst.getOperand(1).getReg();
4053    unsigned Rn = Inst.getOperand(2).getReg();
4054    if (RI->isSubRegisterEq(Rn, Rt))
4055      return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4056                           "is also a source");
4057    break;
4058  }
4059  case AArch64::STRBBpost:
4060  case AArch64::STRBpost:
4061  case AArch64::STRHHpost:
4062  case AArch64::STRHpost:
4063  case AArch64::STRWpost:
4064  case AArch64::STRXpost:
4065  case AArch64::STRBBpre:
4066  case AArch64::STRBpre:
4067  case AArch64::STRHHpre:
4068  case AArch64::STRHpre:
4069  case AArch64::STRWpre:
4070  case AArch64::STRXpre: {
4071    unsigned Rt = Inst.getOperand(1).getReg();
4072    unsigned Rn = Inst.getOperand(2).getReg();
4073    if (RI->isSubRegisterEq(Rn, Rt))
4074      return Error(Loc[0], "unpredictable STR instruction, writeback base "
4075                           "is also a source");
4076    break;
4077  }
4078  case AArch64::STXRB:
4079  case AArch64::STXRH:
4080  case AArch64::STXRW:
4081  case AArch64::STXRX:
4082  case AArch64::STLXRB:
4083  case AArch64::STLXRH:
4084  case AArch64::STLXRW:
4085  case AArch64::STLXRX: {
4086    unsigned Rs = Inst.getOperand(0).getReg();
4087    unsigned Rt = Inst.getOperand(1).getReg();
4088    unsigned Rn = Inst.getOperand(2).getReg();
4089    if (RI->isSubRegisterEq(Rt, Rs) ||
4090        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4091      return Error(Loc[0],
4092                   "unpredictable STXR instruction, status is also a source");
4093    break;
4094  }
4095  case AArch64::STXPW:
4096  case AArch64::STXPX:
4097  case AArch64::STLXPW:
4098  case AArch64::STLXPX: {
4099    unsigned Rs = Inst.getOperand(0).getReg();
4100    unsigned Rt1 = Inst.getOperand(1).getReg();
4101    unsigned Rt2 = Inst.getOperand(2).getReg();
4102    unsigned Rn = Inst.getOperand(3).getReg();
4103    if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4104        (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4105      return Error(Loc[0],
4106                   "unpredictable STXP instruction, status is also a source");
4107    break;
4108  }
4109  }
4110
4111
4112  // Now check immediate ranges. Separate from the above as there is overlap
4113  // in the instructions being checked and this keeps the nested conditionals
4114  // to a minimum.
4115  switch (Inst.getOpcode()) {
4116  case AArch64::ADDSWri:
4117  case AArch64::ADDSXri:
4118  case AArch64::ADDWri:
4119  case AArch64::ADDXri:
4120  case AArch64::SUBSWri:
4121  case AArch64::SUBSXri:
4122  case AArch64::SUBWri:
4123  case AArch64::SUBXri: {
4124    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4125    // some slight duplication here.
4126    if (Inst.getOperand(2).isExpr()) {
4127      const MCExpr *Expr = Inst.getOperand(2).getExpr();
4128      AArch64MCExpr::VariantKind ELFRefKind;
4129      MCSymbolRefExpr::VariantKind DarwinRefKind;
4130      int64_t Addend;
4131      if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4132
4133        // Only allow these with ADDXri.
4134        if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4135             DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4136            Inst.getOpcode() == AArch64::ADDXri)
4137          return false;
4138
4139        // Only allow these with ADDXri/ADDWri
4140        if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4141             ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4142             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4143             ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4144             ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4145             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4146             ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4147             ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4148             ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4149             ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4150            (Inst.getOpcode() == AArch64::ADDXri ||
4151             Inst.getOpcode() == AArch64::ADDWri))
4152          return false;
4153
4154        // Don't allow symbol refs in the immediate field otherwise
4155        // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4156        // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4157        // 'cmp w0, 'borked')
4158        return Error(Loc.back(), "invalid immediate expression");
4159      }
4160      // We don't validate more complex expressions here
4161    }
4162    return false;
4163  }
4164  default:
4165    return false;
4166  }
4167}
4168
4169static std::string AArch64MnemonicSpellCheck(StringRef S,
4170                                             const FeatureBitset &FBS,
4171                                             unsigned VariantID = 0);
4172
4173bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4174                                      uint64_t ErrorInfo,
4175                                      OperandVector &Operands) {
4176  switch (ErrCode) {
4177  case Match_InvalidTiedOperand: {
4178    RegConstraintEqualityTy EqTy =
4179        static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4180            .getRegEqualityTy();
4181    switch (EqTy) {
4182    case RegConstraintEqualityTy::EqualsSubReg:
4183      return Error(Loc, "operand must be 64-bit form of destination register");
4184    case RegConstraintEqualityTy::EqualsSuperReg:
4185      return Error(Loc, "operand must be 32-bit form of destination register");
4186    case RegConstraintEqualityTy::EqualsReg:
4187      return Error(Loc, "operand must match destination register");
4188    }
4189    llvm_unreachable("Unknown RegConstraintEqualityTy");
4190  }
4191  case Match_MissingFeature:
4192    return Error(Loc,
4193                 "instruction requires a CPU feature not currently enabled");
4194  case Match_InvalidOperand:
4195    return Error(Loc, "invalid operand for instruction");
4196  case Match_InvalidSuffix:
4197    return Error(Loc, "invalid type suffix for instruction");
4198  case Match_InvalidCondCode:
4199    return Error(Loc, "expected AArch64 condition code");
4200  case Match_AddSubRegExtendSmall:
4201    return Error(Loc,
4202      "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4203  case Match_AddSubRegExtendLarge:
4204    return Error(Loc,
4205      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206  case Match_AddSubSecondSource:
4207    return Error(Loc,
4208      "expected compatible register, symbol or integer in range [0, 4095]");
4209  case Match_LogicalSecondSource:
4210    return Error(Loc, "expected compatible register or logical immediate");
4211  case Match_InvalidMovImm32Shift:
4212    return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4213  case Match_InvalidMovImm64Shift:
4214    return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4215  case Match_AddSubRegShift32:
4216    return Error(Loc,
4217       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218  case Match_AddSubRegShift64:
4219    return Error(Loc,
4220       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221  case Match_InvalidFPImm:
4222    return Error(Loc,
4223                 "expected compatible register or floating-point constant");
4224  case Match_InvalidMemoryIndexedSImm6:
4225    return Error(Loc, "index must be an integer in range [-32, 31].");
4226  case Match_InvalidMemoryIndexedSImm5:
4227    return Error(Loc, "index must be an integer in range [-16, 15].");
4228  case Match_InvalidMemoryIndexed1SImm4:
4229    return Error(Loc, "index must be an integer in range [-8, 7].");
4230  case Match_InvalidMemoryIndexed2SImm4:
4231    return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4232  case Match_InvalidMemoryIndexed3SImm4:
4233    return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4234  case Match_InvalidMemoryIndexed4SImm4:
4235    return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4236  case Match_InvalidMemoryIndexed16SImm4:
4237    return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4238  case Match_InvalidMemoryIndexed1SImm6:
4239    return Error(Loc, "index must be an integer in range [-32, 31].");
4240  case Match_InvalidMemoryIndexedSImm8:
4241    return Error(Loc, "index must be an integer in range [-128, 127].");
4242  case Match_InvalidMemoryIndexedSImm9:
4243    return Error(Loc, "index must be an integer in range [-256, 255].");
4244  case Match_InvalidMemoryIndexed16SImm9:
4245    return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4246  case Match_InvalidMemoryIndexed8SImm10:
4247    return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4248  case Match_InvalidMemoryIndexed4SImm7:
4249    return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4250  case Match_InvalidMemoryIndexed8SImm7:
4251    return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4252  case Match_InvalidMemoryIndexed16SImm7:
4253    return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4254  case Match_InvalidMemoryIndexed8UImm5:
4255    return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4256  case Match_InvalidMemoryIndexed4UImm5:
4257    return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4258  case Match_InvalidMemoryIndexed2UImm5:
4259    return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4260  case Match_InvalidMemoryIndexed8UImm6:
4261    return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4262  case Match_InvalidMemoryIndexed16UImm6:
4263    return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4264  case Match_InvalidMemoryIndexed4UImm6:
4265    return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4266  case Match_InvalidMemoryIndexed2UImm6:
4267    return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4268  case Match_InvalidMemoryIndexed1UImm6:
4269    return Error(Loc, "index must be in range [0, 63].");
4270  case Match_InvalidMemoryWExtend8:
4271    return Error(Loc,
4272                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273  case Match_InvalidMemoryWExtend16:
4274    return Error(Loc,
4275                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276  case Match_InvalidMemoryWExtend32:
4277    return Error(Loc,
4278                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279  case Match_InvalidMemoryWExtend64:
4280    return Error(Loc,
4281                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282  case Match_InvalidMemoryWExtend128:
4283    return Error(Loc,
4284                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285  case Match_InvalidMemoryXExtend8:
4286    return Error(Loc,
4287                 "expected 'lsl' or 'sxtx' with optional shift of #0");
4288  case Match_InvalidMemoryXExtend16:
4289    return Error(Loc,
4290                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291  case Match_InvalidMemoryXExtend32:
4292    return Error(Loc,
4293                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294  case Match_InvalidMemoryXExtend64:
4295    return Error(Loc,
4296                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297  case Match_InvalidMemoryXExtend128:
4298    return Error(Loc,
4299                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300  case Match_InvalidMemoryIndexed1:
4301    return Error(Loc, "index must be an integer in range [0, 4095].");
4302  case Match_InvalidMemoryIndexed2:
4303    return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4304  case Match_InvalidMemoryIndexed4:
4305    return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4306  case Match_InvalidMemoryIndexed8:
4307    return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4308  case Match_InvalidMemoryIndexed16:
4309    return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4310  case Match_InvalidImm0_1:
4311    return Error(Loc, "immediate must be an integer in range [0, 1].");
4312  case Match_InvalidImm0_7:
4313    return Error(Loc, "immediate must be an integer in range [0, 7].");
4314  case Match_InvalidImm0_15:
4315    return Error(Loc, "immediate must be an integer in range [0, 15].");
4316  case Match_InvalidImm0_31:
4317    return Error(Loc, "immediate must be an integer in range [0, 31].");
4318  case Match_InvalidImm0_63:
4319    return Error(Loc, "immediate must be an integer in range [0, 63].");
4320  case Match_InvalidImm0_127:
4321    return Error(Loc, "immediate must be an integer in range [0, 127].");
4322  case Match_InvalidImm0_255:
4323    return Error(Loc, "immediate must be an integer in range [0, 255].");
4324  case Match_InvalidImm0_65535:
4325    return Error(Loc, "immediate must be an integer in range [0, 65535].");
4326  case Match_InvalidImm1_8:
4327    return Error(Loc, "immediate must be an integer in range [1, 8].");
4328  case Match_InvalidImm1_16:
4329    return Error(Loc, "immediate must be an integer in range [1, 16].");
4330  case Match_InvalidImm1_32:
4331    return Error(Loc, "immediate must be an integer in range [1, 32].");
4332  case Match_InvalidImm1_64:
4333    return Error(Loc, "immediate must be an integer in range [1, 64].");
4334  case Match_InvalidSVEAddSubImm8:
4335    return Error(Loc, "immediate must be an integer in range [0, 255]"
4336                      " with a shift amount of 0");
4337  case Match_InvalidSVEAddSubImm16:
4338  case Match_InvalidSVEAddSubImm32:
4339  case Match_InvalidSVEAddSubImm64:
4340    return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4341                      "multiple of 256 in range [256, 65280]");
4342  case Match_InvalidSVECpyImm8:
4343    return Error(Loc, "immediate must be an integer in range [-128, 255]"
4344                      " with a shift amount of 0");
4345  case Match_InvalidSVECpyImm16:
4346    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4347                      "multiple of 256 in range [-32768, 65280]");
4348  case Match_InvalidSVECpyImm32:
4349  case Match_InvalidSVECpyImm64:
4350    return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4351                      "multiple of 256 in range [-32768, 32512]");
4352  case Match_InvalidIndexRange1_1:
4353    return Error(Loc, "expected lane specifier '[1]'");
4354  case Match_InvalidIndexRange0_15:
4355    return Error(Loc, "vector lane must be an integer in range [0, 15].");
4356  case Match_InvalidIndexRange0_7:
4357    return Error(Loc, "vector lane must be an integer in range [0, 7].");
4358  case Match_InvalidIndexRange0_3:
4359    return Error(Loc, "vector lane must be an integer in range [0, 3].");
4360  case Match_InvalidIndexRange0_1:
4361    return Error(Loc, "vector lane must be an integer in range [0, 1].");
4362  case Match_InvalidSVEIndexRange0_63:
4363    return Error(Loc, "vector lane must be an integer in range [0, 63].");
4364  case Match_InvalidSVEIndexRange0_31:
4365    return Error(Loc, "vector lane must be an integer in range [0, 31].");
4366  case Match_InvalidSVEIndexRange0_15:
4367    return Error(Loc, "vector lane must be an integer in range [0, 15].");
4368  case Match_InvalidSVEIndexRange0_7:
4369    return Error(Loc, "vector lane must be an integer in range [0, 7].");
4370  case Match_InvalidSVEIndexRange0_3:
4371    return Error(Loc, "vector lane must be an integer in range [0, 3].");
4372  case Match_InvalidLabel:
4373    return Error(Loc, "expected label or encodable integer pc offset");
4374  case Match_MRS:
4375    return Error(Loc, "expected readable system register");
4376  case Match_MSR:
4377    return Error(Loc, "expected writable system register or pstate");
4378  case Match_InvalidComplexRotationEven:
4379    return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4380  case Match_InvalidComplexRotationOdd:
4381    return Error(Loc, "complex rotation must be 90 or 270.");
4382  case Match_MnemonicFail: {
4383    std::string Suggestion = AArch64MnemonicSpellCheck(
4384        ((AArch64Operand &)*Operands[0]).getToken(),
4385        ComputeAvailableFeatures(STI->getFeatureBits()));
4386    return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4387  }
4388  case Match_InvalidGPR64shifted8:
4389    return Error(Loc, "register must be x0..x30 or xzr, without shift");
4390  case Match_InvalidGPR64shifted16:
4391    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392  case Match_InvalidGPR64shifted32:
4393    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394  case Match_InvalidGPR64shifted64:
4395    return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396  case Match_InvalidGPR64NoXZRshifted8:
4397    return Error(Loc, "register must be x0..x30 without shift");
4398  case Match_InvalidGPR64NoXZRshifted16:
4399    return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4400  case Match_InvalidGPR64NoXZRshifted32:
4401    return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4402  case Match_InvalidGPR64NoXZRshifted64:
4403    return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4404  case Match_InvalidZPR32UXTW8:
4405  case Match_InvalidZPR32SXTW8:
4406    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407  case Match_InvalidZPR32UXTW16:
4408  case Match_InvalidZPR32SXTW16:
4409    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410  case Match_InvalidZPR32UXTW32:
4411  case Match_InvalidZPR32SXTW32:
4412    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413  case Match_InvalidZPR32UXTW64:
4414  case Match_InvalidZPR32SXTW64:
4415    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416  case Match_InvalidZPR64UXTW8:
4417  case Match_InvalidZPR64SXTW8:
4418    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419  case Match_InvalidZPR64UXTW16:
4420  case Match_InvalidZPR64SXTW16:
4421    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422  case Match_InvalidZPR64UXTW32:
4423  case Match_InvalidZPR64SXTW32:
4424    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425  case Match_InvalidZPR64UXTW64:
4426  case Match_InvalidZPR64SXTW64:
4427    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428  case Match_InvalidZPR32LSL8:
4429    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4430  case Match_InvalidZPR32LSL16:
4431    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432  case Match_InvalidZPR32LSL32:
4433    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434  case Match_InvalidZPR32LSL64:
4435    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436  case Match_InvalidZPR64LSL8:
4437    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4438  case Match_InvalidZPR64LSL16:
4439    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440  case Match_InvalidZPR64LSL32:
4441    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442  case Match_InvalidZPR64LSL64:
4443    return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444  case Match_InvalidZPR0:
4445    return Error(Loc, "expected register without element width suffix");
4446  case Match_InvalidZPR8:
4447  case Match_InvalidZPR16:
4448  case Match_InvalidZPR32:
4449  case Match_InvalidZPR64:
4450  case Match_InvalidZPR128:
4451    return Error(Loc, "invalid element width");
4452  case Match_InvalidZPR_3b8:
4453    return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4454  case Match_InvalidZPR_3b16:
4455    return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4456  case Match_InvalidZPR_3b32:
4457    return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4458  case Match_InvalidZPR_4b16:
4459    return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4460  case Match_InvalidZPR_4b32:
4461    return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4462  case Match_InvalidZPR_4b64:
4463    return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4464  case Match_InvalidSVEPattern:
4465    return Error(Loc, "invalid predicate pattern");
4466  case Match_InvalidSVEPredicateAnyReg:
4467  case Match_InvalidSVEPredicateBReg:
4468  case Match_InvalidSVEPredicateHReg:
4469  case Match_InvalidSVEPredicateSReg:
4470  case Match_InvalidSVEPredicateDReg:
4471    return Error(Loc, "invalid predicate register.");
4472  case Match_InvalidSVEPredicate3bAnyReg:
4473    return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4474  case Match_InvalidSVEPredicate3bBReg:
4475    return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4476  case Match_InvalidSVEPredicate3bHReg:
4477    return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4478  case Match_InvalidSVEPredicate3bSReg:
4479    return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4480  case Match_InvalidSVEPredicate3bDReg:
4481    return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4482  case Match_InvalidSVEExactFPImmOperandHalfOne:
4483    return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4484  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4485    return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4486  case Match_InvalidSVEExactFPImmOperandZeroOne:
4487    return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4488  default:
4489    llvm_unreachable("unexpected error code!");
4490  }
4491}
4492
4493static const char *getSubtargetFeatureName(uint64_t Val);
4494
4495bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4496                                               OperandVector &Operands,
4497                                               MCStreamer &Out,
4498                                               uint64_t &ErrorInfo,
4499                                               bool MatchingInlineAsm) {
4500  assert(!Operands.empty() && "Unexpect empty operand list!");
4501  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4502  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4503
4504  StringRef Tok = Op.getToken();
4505  unsigned NumOperands = Operands.size();
4506
4507  if (NumOperands == 4 && Tok == "lsl") {
4508    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4509    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4510    if (Op2.isScalarReg() && Op3.isImm()) {
4511      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4512      if (Op3CE) {
4513        uint64_t Op3Val = Op3CE->getValue();
4514        uint64_t NewOp3Val = 0;
4515        uint64_t NewOp4Val = 0;
4516        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4517                Op2.getReg())) {
4518          NewOp3Val = (32 - Op3Val) & 0x1f;
4519          NewOp4Val = 31 - Op3Val;
4520        } else {
4521          NewOp3Val = (64 - Op3Val) & 0x3f;
4522          NewOp4Val = 63 - Op3Val;
4523        }
4524
4525        const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4526        const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4527
4528        Operands[0] = AArch64Operand::CreateToken(
4529            "ubfm", false, Op.getStartLoc(), getContext());
4530        Operands.push_back(AArch64Operand::CreateImm(
4531            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4532        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4533                                                Op3.getEndLoc(), getContext());
4534      }
4535    }
4536  } else if (NumOperands == 4 && Tok == "bfc") {
4537    // FIXME: Horrible hack to handle BFC->BFM alias.
4538    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4539    AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4540    AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4541
4542    if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4543      const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4544      const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4545
4546      if (LSBCE && WidthCE) {
4547        uint64_t LSB = LSBCE->getValue();
4548        uint64_t Width = WidthCE->getValue();
4549
4550        uint64_t RegWidth = 0;
4551        if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4552                Op1.getReg()))
4553          RegWidth = 64;
4554        else
4555          RegWidth = 32;
4556
4557        if (LSB >= RegWidth)
4558          return Error(LSBOp.getStartLoc(),
4559                       "expected integer in range [0, 31]");
4560        if (Width < 1 || Width > RegWidth)
4561          return Error(WidthOp.getStartLoc(),
4562                       "expected integer in range [1, 32]");
4563
4564        uint64_t ImmR = 0;
4565        if (RegWidth == 32)
4566          ImmR = (32 - LSB) & 0x1f;
4567        else
4568          ImmR = (64 - LSB) & 0x3f;
4569
4570        uint64_t ImmS = Width - 1;
4571
4572        if (ImmR != 0 && ImmS >= ImmR)
4573          return Error(WidthOp.getStartLoc(),
4574                       "requested insert overflows register");
4575
4576        const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4577        const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4578        Operands[0] = AArch64Operand::CreateToken(
4579              "bfm", false, Op.getStartLoc(), getContext());
4580        Operands[2] = AArch64Operand::CreateReg(
4581            RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4582            SMLoc(), SMLoc(), getContext());
4583        Operands[3] = AArch64Operand::CreateImm(
4584            ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4585        Operands.emplace_back(
4586            AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4587                                      WidthOp.getEndLoc(), getContext()));
4588      }
4589    }
4590  } else if (NumOperands == 5) {
4591    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4592    // UBFIZ -> UBFM aliases.
4593    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4594      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4595      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4596      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4597
4598      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4599        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4600        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4601
4602        if (Op3CE && Op4CE) {
4603          uint64_t Op3Val = Op3CE->getValue();
4604          uint64_t Op4Val = Op4CE->getValue();
4605
4606          uint64_t RegWidth = 0;
4607          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4608                  Op1.getReg()))
4609            RegWidth = 64;
4610          else
4611            RegWidth = 32;
4612
4613          if (Op3Val >= RegWidth)
4614            return Error(Op3.getStartLoc(),
4615                         "expected integer in range [0, 31]");
4616          if (Op4Val < 1 || Op4Val > RegWidth)
4617            return Error(Op4.getStartLoc(),
4618                         "expected integer in range [1, 32]");
4619
4620          uint64_t NewOp3Val = 0;
4621          if (RegWidth == 32)
4622            NewOp3Val = (32 - Op3Val) & 0x1f;
4623          else
4624            NewOp3Val = (64 - Op3Val) & 0x3f;
4625
4626          uint64_t NewOp4Val = Op4Val - 1;
4627
4628          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4629            return Error(Op4.getStartLoc(),
4630                         "requested insert overflows register");
4631
4632          const MCExpr *NewOp3 =
4633              MCConstantExpr::create(NewOp3Val, getContext());
4634          const MCExpr *NewOp4 =
4635              MCConstantExpr::create(NewOp4Val, getContext());
4636          Operands[3] = AArch64Operand::CreateImm(
4637              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4638          Operands[4] = AArch64Operand::CreateImm(
4639              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4640          if (Tok == "bfi")
4641            Operands[0] = AArch64Operand::CreateToken(
4642                "bfm", false, Op.getStartLoc(), getContext());
4643          else if (Tok == "sbfiz")
4644            Operands[0] = AArch64Operand::CreateToken(
4645                "sbfm", false, Op.getStartLoc(), getContext());
4646          else if (Tok == "ubfiz")
4647            Operands[0] = AArch64Operand::CreateToken(
4648                "ubfm", false, Op.getStartLoc(), getContext());
4649          else
4650            llvm_unreachable("No valid mnemonic for alias?");
4651        }
4652      }
4653
4654      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4655      // UBFX -> UBFM aliases.
4656    } else if (NumOperands == 5 &&
4657               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4658      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4659      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4660      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4661
4662      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4663        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4664        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4665
4666        if (Op3CE && Op4CE) {
4667          uint64_t Op3Val = Op3CE->getValue();
4668          uint64_t Op4Val = Op4CE->getValue();
4669
4670          uint64_t RegWidth = 0;
4671          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4672                  Op1.getReg()))
4673            RegWidth = 64;
4674          else
4675            RegWidth = 32;
4676
4677          if (Op3Val >= RegWidth)
4678            return Error(Op3.getStartLoc(),
4679                         "expected integer in range [0, 31]");
4680          if (Op4Val < 1 || Op4Val > RegWidth)
4681            return Error(Op4.getStartLoc(),
4682                         "expected integer in range [1, 32]");
4683
4684          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4685
4686          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4687            return Error(Op4.getStartLoc(),
4688                         "requested extract overflows register");
4689
4690          const MCExpr *NewOp4 =
4691              MCConstantExpr::create(NewOp4Val, getContext());
4692          Operands[4] = AArch64Operand::CreateImm(
4693              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4694          if (Tok == "bfxil")
4695            Operands[0] = AArch64Operand::CreateToken(
4696                "bfm", false, Op.getStartLoc(), getContext());
4697          else if (Tok == "sbfx")
4698            Operands[0] = AArch64Operand::CreateToken(
4699                "sbfm", false, Op.getStartLoc(), getContext());
4700          else if (Tok == "ubfx")
4701            Operands[0] = AArch64Operand::CreateToken(
4702                "ubfm", false, Op.getStartLoc(), getContext());
4703          else
4704            llvm_unreachable("No valid mnemonic for alias?");
4705        }
4706      }
4707    }
4708  }
4709
4710  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4711  // instruction for FP registers correctly in some rare circumstances. Convert
4712  // it to a safe instruction and warn (because silently changing someone's
4713  // assembly is rude).
4714  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4715      NumOperands == 4 && Tok == "movi") {
4716    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4717    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4718    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4719    if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4720        (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4721      StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4722      if (Suffix.lower() == ".2d" &&
4723          cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4724        Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4725                " correctly on this CPU, converting to equivalent movi.16b");
4726        // Switch the suffix to .16b.
4727        unsigned Idx = Op1.isToken() ? 1 : 2;
4728        Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4729                                                  getContext());
4730      }
4731    }
4732  }
4733
4734  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4735  //        InstAlias can't quite handle this since the reg classes aren't
4736  //        subclasses.
4737  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4738    // The source register can be Wn here, but the matcher expects a
4739    // GPR64. Twiddle it here if necessary.
4740    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4741    if (Op.isScalarReg()) {
4742      unsigned Reg = getXRegFromWReg(Op.getReg());
4743      Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4744                                              Op.getStartLoc(), Op.getEndLoc(),
4745                                              getContext());
4746    }
4747  }
4748  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4749  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4750    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4751    if (Op.isScalarReg() &&
4752        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4753            Op.getReg())) {
4754      // The source register can be Wn here, but the matcher expects a
4755      // GPR64. Twiddle it here if necessary.
4756      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4757      if (Op.isScalarReg()) {
4758        unsigned Reg = getXRegFromWReg(Op.getReg());
4759        Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4760                                                Op.getStartLoc(),
4761                                                Op.getEndLoc(), getContext());
4762      }
4763    }
4764  }
4765  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4766  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4767    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4768    if (Op.isScalarReg() &&
4769        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4770            Op.getReg())) {
4771      // The source register can be Wn here, but the matcher expects a
4772      // GPR32. Twiddle it here if necessary.
4773      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4774      if (Op.isScalarReg()) {
4775        unsigned Reg = getWRegFromXReg(Op.getReg());
4776        Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4777                                                Op.getStartLoc(),
4778                                                Op.getEndLoc(), getContext());
4779      }
4780    }
4781  }
4782
4783  MCInst Inst;
4784  FeatureBitset MissingFeatures;
4785  // First try to match against the secondary set of tables containing the
4786  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4787  unsigned MatchResult =
4788      MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4789                           MatchingInlineAsm, 1);
4790
4791  // If that fails, try against the alternate table containing long-form NEON:
4792  // "fadd v0.2s, v1.2s, v2.2s"
4793  if (MatchResult != Match_Success) {
4794    // But first, save the short-form match result: we can use it in case the
4795    // long-form match also fails.
4796    auto ShortFormNEONErrorInfo = ErrorInfo;
4797    auto ShortFormNEONMatchResult = MatchResult;
4798    auto ShortFormNEONMissingFeatures = MissingFeatures;
4799
4800    MatchResult =
4801        MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4802                             MatchingInlineAsm, 0);
4803
4804    // Now, both matches failed, and the long-form match failed on the mnemonic
4805    // suffix token operand.  The short-form match failure is probably more
4806    // relevant: use it instead.
4807    if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4808        Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4809        ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4810      MatchResult = ShortFormNEONMatchResult;
4811      ErrorInfo = ShortFormNEONErrorInfo;
4812      MissingFeatures = ShortFormNEONMissingFeatures;
4813    }
4814  }
4815
4816  switch (MatchResult) {
4817  case Match_Success: {
4818    // Perform range checking and other semantic validations
4819    SmallVector<SMLoc, 8> OperandLocs;
4820    NumOperands = Operands.size();
4821    for (unsigned i = 1; i < NumOperands; ++i)
4822      OperandLocs.push_back(Operands[i]->getStartLoc());
4823    if (validateInstruction(Inst, IDLoc, OperandLocs))
4824      return true;
4825
4826    Inst.setLoc(IDLoc);
4827    Out.EmitInstruction(Inst, getSTI());
4828    return false;
4829  }
4830  case Match_MissingFeature: {
4831    assert(MissingFeatures.any() && "Unknown missing feature!");
4832    // Special case the error message for the very common case where only
4833    // a single subtarget feature is missing (neon, e.g.).
4834    std::string Msg = "instruction requires:";
4835    for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4836      if (MissingFeatures[i]) {
4837        Msg += " ";
4838        Msg += getSubtargetFeatureName(i);
4839      }
4840    }
4841    return Error(IDLoc, Msg);
4842  }
4843  case Match_MnemonicFail:
4844    return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4845  case Match_InvalidOperand: {
4846    SMLoc ErrorLoc = IDLoc;
4847
4848    if (ErrorInfo != ~0ULL) {
4849      if (ErrorInfo >= Operands.size())
4850        return Error(IDLoc, "too few operands for instruction",
4851                     SMRange(IDLoc, getTok().getLoc()));
4852
4853      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4854      if (ErrorLoc == SMLoc())
4855        ErrorLoc = IDLoc;
4856    }
4857    // If the match failed on a suffix token operand, tweak the diagnostic
4858    // accordingly.
4859    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4860        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4861      MatchResult = Match_InvalidSuffix;
4862
4863    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4864  }
4865  case Match_InvalidTiedOperand:
4866  case Match_InvalidMemoryIndexed1:
4867  case Match_InvalidMemoryIndexed2:
4868  case Match_InvalidMemoryIndexed4:
4869  case Match_InvalidMemoryIndexed8:
4870  case Match_InvalidMemoryIndexed16:
4871  case Match_InvalidCondCode:
4872  case Match_AddSubRegExtendSmall:
4873  case Match_AddSubRegExtendLarge:
4874  case Match_AddSubSecondSource:
4875  case Match_LogicalSecondSource:
4876  case Match_AddSubRegShift32:
4877  case Match_AddSubRegShift64:
4878  case Match_InvalidMovImm32Shift:
4879  case Match_InvalidMovImm64Shift:
4880  case Match_InvalidFPImm:
4881  case Match_InvalidMemoryWExtend8:
4882  case Match_InvalidMemoryWExtend16:
4883  case Match_InvalidMemoryWExtend32:
4884  case Match_InvalidMemoryWExtend64:
4885  case Match_InvalidMemoryWExtend128:
4886  case Match_InvalidMemoryXExtend8:
4887  case Match_InvalidMemoryXExtend16:
4888  case Match_InvalidMemoryXExtend32:
4889  case Match_InvalidMemoryXExtend64:
4890  case Match_InvalidMemoryXExtend128:
4891  case Match_InvalidMemoryIndexed1SImm4:
4892  case Match_InvalidMemoryIndexed2SImm4:
4893  case Match_InvalidMemoryIndexed3SImm4:
4894  case Match_InvalidMemoryIndexed4SImm4:
4895  case Match_InvalidMemoryIndexed1SImm6:
4896  case Match_InvalidMemoryIndexed16SImm4:
4897  case Match_InvalidMemoryIndexed4SImm7:
4898  case Match_InvalidMemoryIndexed8SImm7:
4899  case Match_InvalidMemoryIndexed16SImm7:
4900  case Match_InvalidMemoryIndexed8UImm5:
4901  case Match_InvalidMemoryIndexed4UImm5:
4902  case Match_InvalidMemoryIndexed2UImm5:
4903  case Match_InvalidMemoryIndexed1UImm6:
4904  case Match_InvalidMemoryIndexed2UImm6:
4905  case Match_InvalidMemoryIndexed4UImm6:
4906  case Match_InvalidMemoryIndexed8UImm6:
4907  case Match_InvalidMemoryIndexed16UImm6:
4908  case Match_InvalidMemoryIndexedSImm6:
4909  case Match_InvalidMemoryIndexedSImm5:
4910  case Match_InvalidMemoryIndexedSImm8:
4911  case Match_InvalidMemoryIndexedSImm9:
4912  case Match_InvalidMemoryIndexed16SImm9:
4913  case Match_InvalidMemoryIndexed8SImm10:
4914  case Match_InvalidImm0_1:
4915  case Match_InvalidImm0_7:
4916  case Match_InvalidImm0_15:
4917  case Match_InvalidImm0_31:
4918  case Match_InvalidImm0_63:
4919  case Match_InvalidImm0_127:
4920  case Match_InvalidImm0_255:
4921  case Match_InvalidImm0_65535:
4922  case Match_InvalidImm1_8:
4923  case Match_InvalidImm1_16:
4924  case Match_InvalidImm1_32:
4925  case Match_InvalidImm1_64:
4926  case Match_InvalidSVEAddSubImm8:
4927  case Match_InvalidSVEAddSubImm16:
4928  case Match_InvalidSVEAddSubImm32:
4929  case Match_InvalidSVEAddSubImm64:
4930  case Match_InvalidSVECpyImm8:
4931  case Match_InvalidSVECpyImm16:
4932  case Match_InvalidSVECpyImm32:
4933  case Match_InvalidSVECpyImm64:
4934  case Match_InvalidIndexRange1_1:
4935  case Match_InvalidIndexRange0_15:
4936  case Match_InvalidIndexRange0_7:
4937  case Match_InvalidIndexRange0_3:
4938  case Match_InvalidIndexRange0_1:
4939  case Match_InvalidSVEIndexRange0_63:
4940  case Match_InvalidSVEIndexRange0_31:
4941  case Match_InvalidSVEIndexRange0_15:
4942  case Match_InvalidSVEIndexRange0_7:
4943  case Match_InvalidSVEIndexRange0_3:
4944  case Match_InvalidLabel:
4945  case Match_InvalidComplexRotationEven:
4946  case Match_InvalidComplexRotationOdd:
4947  case Match_InvalidGPR64shifted8:
4948  case Match_InvalidGPR64shifted16:
4949  case Match_InvalidGPR64shifted32:
4950  case Match_InvalidGPR64shifted64:
4951  case Match_InvalidGPR64NoXZRshifted8:
4952  case Match_InvalidGPR64NoXZRshifted16:
4953  case Match_InvalidGPR64NoXZRshifted32:
4954  case Match_InvalidGPR64NoXZRshifted64:
4955  case Match_InvalidZPR32UXTW8:
4956  case Match_InvalidZPR32UXTW16:
4957  case Match_InvalidZPR32UXTW32:
4958  case Match_InvalidZPR32UXTW64:
4959  case Match_InvalidZPR32SXTW8:
4960  case Match_InvalidZPR32SXTW16:
4961  case Match_InvalidZPR32SXTW32:
4962  case Match_InvalidZPR32SXTW64:
4963  case Match_InvalidZPR64UXTW8:
4964  case Match_InvalidZPR64SXTW8:
4965  case Match_InvalidZPR64UXTW16:
4966  case Match_InvalidZPR64SXTW16:
4967  case Match_InvalidZPR64UXTW32:
4968  case Match_InvalidZPR64SXTW32:
4969  case Match_InvalidZPR64UXTW64:
4970  case Match_InvalidZPR64SXTW64:
4971  case Match_InvalidZPR32LSL8:
4972  case Match_InvalidZPR32LSL16:
4973  case Match_InvalidZPR32LSL32:
4974  case Match_InvalidZPR32LSL64:
4975  case Match_InvalidZPR64LSL8:
4976  case Match_InvalidZPR64LSL16:
4977  case Match_InvalidZPR64LSL32:
4978  case Match_InvalidZPR64LSL64:
4979  case Match_InvalidZPR0:
4980  case Match_InvalidZPR8:
4981  case Match_InvalidZPR16:
4982  case Match_InvalidZPR32:
4983  case Match_InvalidZPR64:
4984  case Match_InvalidZPR128:
4985  case Match_InvalidZPR_3b8:
4986  case Match_InvalidZPR_3b16:
4987  case Match_InvalidZPR_3b32:
4988  case Match_InvalidZPR_4b16:
4989  case Match_InvalidZPR_4b32:
4990  case Match_InvalidZPR_4b64:
4991  case Match_InvalidSVEPredicateAnyReg:
4992  case Match_InvalidSVEPattern:
4993  case Match_InvalidSVEPredicateBReg:
4994  case Match_InvalidSVEPredicateHReg:
4995  case Match_InvalidSVEPredicateSReg:
4996  case Match_InvalidSVEPredicateDReg:
4997  case Match_InvalidSVEPredicate3bAnyReg:
4998  case Match_InvalidSVEPredicate3bBReg:
4999  case Match_InvalidSVEPredicate3bHReg:
5000  case Match_InvalidSVEPredicate3bSReg:
5001  case Match_InvalidSVEPredicate3bDReg:
5002  case Match_InvalidSVEExactFPImmOperandHalfOne:
5003  case Match_InvalidSVEExactFPImmOperandHalfTwo:
5004  case Match_InvalidSVEExactFPImmOperandZeroOne:
5005  case Match_MSR:
5006  case Match_MRS: {
5007    if (ErrorInfo >= Operands.size())
5008      return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5009    // Any time we get here, there's nothing fancy to do. Just get the
5010    // operand SMLoc and display the diagnostic.
5011    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5012    if (ErrorLoc == SMLoc())
5013      ErrorLoc = IDLoc;
5014    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5015  }
5016  }
5017
5018  llvm_unreachable("Implement any new match types added!");
5019}
5020
5021/// ParseDirective parses the arm specific directives
5022bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5023  const MCObjectFileInfo::Environment Format =
5024    getContext().getObjectFileInfo()->getObjectFileType();
5025  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5026
5027  StringRef IDVal = DirectiveID.getIdentifier();
5028  SMLoc Loc = DirectiveID.getLoc();
5029  if (IDVal == ".arch")
5030    parseDirectiveArch(Loc);
5031  else if (IDVal == ".cpu")
5032    parseDirectiveCPU(Loc);
5033  else if (IDVal == ".tlsdesccall")
5034    parseDirectiveTLSDescCall(Loc);
5035  else if (IDVal == ".ltorg" || IDVal == ".pool")
5036    parseDirectiveLtorg(Loc);
5037  else if (IDVal == ".unreq")
5038    parseDirectiveUnreq(Loc);
5039  else if (IDVal == ".inst")
5040    parseDirectiveInst(Loc);
5041  else if (IDVal == ".cfi_negate_ra_state")
5042    parseDirectiveCFINegateRAState();
5043  else if (IDVal == ".cfi_b_key_frame")
5044    parseDirectiveCFIBKeyFrame();
5045  else if (IDVal == ".arch_extension")
5046    parseDirectiveArchExtension(Loc);
5047  else if (IsMachO) {
5048    if (IDVal == MCLOHDirectiveName())
5049      parseDirectiveLOH(IDVal, Loc);
5050    else
5051      return true;
5052  } else
5053    return true;
5054  return false;
5055}
5056
5057static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5058                            SmallVector<StringRef, 4> &RequestedExtensions) {
5059  const bool NoCrypto =
5060      (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5061                 "nocrypto") != std::end(RequestedExtensions));
5062  const bool Crypto =
5063      (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5064                 "crypto") != std::end(RequestedExtensions));
5065
5066  if (!NoCrypto && Crypto) {
5067    switch (ArchKind) {
5068    default:
5069      // Map 'generic' (and others) to sha2 and aes, because
5070      // that was the traditional meaning of crypto.
5071    case AArch64::ArchKind::ARMV8_1A:
5072    case AArch64::ArchKind::ARMV8_2A:
5073    case AArch64::ArchKind::ARMV8_3A:
5074      RequestedExtensions.push_back("sha2");
5075      RequestedExtensions.push_back("aes");
5076      break;
5077    case AArch64::ArchKind::ARMV8_4A:
5078    case AArch64::ArchKind::ARMV8_5A:
5079      RequestedExtensions.push_back("sm4");
5080      RequestedExtensions.push_back("sha3");
5081      RequestedExtensions.push_back("sha2");
5082      RequestedExtensions.push_back("aes");
5083      break;
5084    }
5085  } else if (NoCrypto) {
5086    switch (ArchKind) {
5087    default:
5088      // Map 'generic' (and others) to sha2 and aes, because
5089      // that was the traditional meaning of crypto.
5090    case AArch64::ArchKind::ARMV8_1A:
5091    case AArch64::ArchKind::ARMV8_2A:
5092    case AArch64::ArchKind::ARMV8_3A:
5093      RequestedExtensions.push_back("nosha2");
5094      RequestedExtensions.push_back("noaes");
5095      break;
5096    case AArch64::ArchKind::ARMV8_4A:
5097    case AArch64::ArchKind::ARMV8_5A:
5098      RequestedExtensions.push_back("nosm4");
5099      RequestedExtensions.push_back("nosha3");
5100      RequestedExtensions.push_back("nosha2");
5101      RequestedExtensions.push_back("noaes");
5102      break;
5103    }
5104  }
5105}
5106
5107/// parseDirectiveArch
5108///   ::= .arch token
5109bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5110  SMLoc ArchLoc = getLoc();
5111
5112  StringRef Arch, ExtensionString;
5113  std::tie(Arch, ExtensionString) =
5114      getParser().parseStringToEndOfStatement().trim().split('+');
5115
5116  AArch64::ArchKind ID = AArch64::parseArch(Arch);
5117  if (ID == AArch64::ArchKind::INVALID)
5118    return Error(ArchLoc, "unknown arch name");
5119
5120  if (parseToken(AsmToken::EndOfStatement))
5121    return true;
5122
5123  // Get the architecture and extension features.
5124  std::vector<StringRef> AArch64Features;
5125  AArch64::getArchFeatures(ID, AArch64Features);
5126  AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5127                                AArch64Features);
5128
5129  MCSubtargetInfo &STI = copySTI();
5130  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5131  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5132
5133  SmallVector<StringRef, 4> RequestedExtensions;
5134  if (!ExtensionString.empty())
5135    ExtensionString.split(RequestedExtensions, '+');
5136
5137  ExpandCryptoAEK(ID, RequestedExtensions);
5138
5139  FeatureBitset Features = STI.getFeatureBits();
5140  for (auto Name : RequestedExtensions) {
5141    bool EnableFeature = true;
5142
5143    if (Name.startswith_lower("no")) {
5144      EnableFeature = false;
5145      Name = Name.substr(2);
5146    }
5147
5148    for (const auto &Extension : ExtensionMap) {
5149      if (Extension.Name != Name)
5150        continue;
5151
5152      if (Extension.Features.none())
5153        report_fatal_error("unsupported architectural extension: " + Name);
5154
5155      FeatureBitset ToggleFeatures = EnableFeature
5156                                         ? (~Features & Extension.Features)
5157                                         : ( Features & Extension.Features);
5158      FeatureBitset Features =
5159          ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5160      setAvailableFeatures(Features);
5161      break;
5162    }
5163  }
5164  return false;
5165}
5166
5167/// parseDirectiveArchExtension
5168///   ::= .arch_extension [no]feature
5169bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5170  SMLoc ExtLoc = getLoc();
5171
5172  StringRef Name = getParser().parseStringToEndOfStatement().trim();
5173
5174  if (parseToken(AsmToken::EndOfStatement,
5175                 "unexpected token in '.arch_extension' directive"))
5176    return true;
5177
5178  bool EnableFeature = true;
5179  if (Name.startswith_lower("no")) {
5180    EnableFeature = false;
5181    Name = Name.substr(2);
5182  }
5183
5184  MCSubtargetInfo &STI = copySTI();
5185  FeatureBitset Features = STI.getFeatureBits();
5186  for (const auto &Extension : ExtensionMap) {
5187    if (Extension.Name != Name)
5188      continue;
5189
5190    if (Extension.Features.none())
5191      return Error(ExtLoc, "unsupported architectural extension: " + Name);
5192
5193    FeatureBitset ToggleFeatures = EnableFeature
5194                                       ? (~Features & Extension.Features)
5195                                       : (Features & Extension.Features);
5196    FeatureBitset Features =
5197        ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5198    setAvailableFeatures(Features);
5199    return false;
5200  }
5201
5202  return Error(ExtLoc, "unknown architectural extension: " + Name);
5203}
5204
5205static SMLoc incrementLoc(SMLoc L, int Offset) {
5206  return SMLoc::getFromPointer(L.getPointer() + Offset);
5207}
5208
5209/// parseDirectiveCPU
5210///   ::= .cpu id
5211bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5212  SMLoc CurLoc = getLoc();
5213
5214  StringRef CPU, ExtensionString;
5215  std::tie(CPU, ExtensionString) =
5216      getParser().parseStringToEndOfStatement().trim().split('+');
5217
5218  if (parseToken(AsmToken::EndOfStatement))
5219    return true;
5220
5221  SmallVector<StringRef, 4> RequestedExtensions;
5222  if (!ExtensionString.empty())
5223    ExtensionString.split(RequestedExtensions, '+');
5224
5225  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5226  // once that is tablegen'ed
5227  if (!getSTI().isCPUStringValid(CPU)) {
5228    Error(CurLoc, "unknown CPU name");
5229    return false;
5230  }
5231
5232  MCSubtargetInfo &STI = copySTI();
5233  STI.setDefaultFeatures(CPU, "");
5234  CurLoc = incrementLoc(CurLoc, CPU.size());
5235
5236  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5237
5238  FeatureBitset Features = STI.getFeatureBits();
5239  for (auto Name : RequestedExtensions) {
5240    // Advance source location past '+'.
5241    CurLoc = incrementLoc(CurLoc, 1);
5242
5243    bool EnableFeature = true;
5244
5245    if (Name.startswith_lower("no")) {
5246      EnableFeature = false;
5247      Name = Name.substr(2);
5248    }
5249
5250    bool FoundExtension = false;
5251    for (const auto &Extension : ExtensionMap) {
5252      if (Extension.Name != Name)
5253        continue;
5254
5255      if (Extension.Features.none())
5256        report_fatal_error("unsupported architectural extension: " + Name);
5257
5258      FeatureBitset ToggleFeatures = EnableFeature
5259                                         ? (~Features & Extension.Features)
5260                                         : ( Features & Extension.Features);
5261      FeatureBitset Features =
5262          ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5263      setAvailableFeatures(Features);
5264      FoundExtension = true;
5265
5266      break;
5267    }
5268
5269    if (!FoundExtension)
5270      Error(CurLoc, "unsupported architectural extension");
5271
5272    CurLoc = incrementLoc(CurLoc, Name.size());
5273  }
5274  return false;
5275}
5276
5277/// parseDirectiveInst
5278///  ::= .inst opcode [, ...]
5279bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5280  if (getLexer().is(AsmToken::EndOfStatement))
5281    return Error(Loc, "expected expression following '.inst' directive");
5282
5283  auto parseOp = [&]() -> bool {
5284    SMLoc L = getLoc();
5285    const MCExpr *Expr = nullptr;
5286    if (check(getParser().parseExpression(Expr), L, "expected expression"))
5287      return true;
5288    const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5289    if (check(!Value, L, "expected constant expression"))
5290      return true;
5291    getTargetStreamer().emitInst(Value->getValue());
5292    return false;
5293  };
5294
5295  if (parseMany(parseOp))
5296    return addErrorSuffix(" in '.inst' directive");
5297  return false;
5298}
5299
5300// parseDirectiveTLSDescCall:
5301//   ::= .tlsdesccall symbol
5302bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5303  StringRef Name;
5304  if (check(getParser().parseIdentifier(Name), L,
5305            "expected symbol after directive") ||
5306      parseToken(AsmToken::EndOfStatement))
5307    return true;
5308
5309  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5310  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5311  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5312
5313  MCInst Inst;
5314  Inst.setOpcode(AArch64::TLSDESCCALL);
5315  Inst.addOperand(MCOperand::createExpr(Expr));
5316
5317  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5318  return false;
5319}
5320
5321/// ::= .loh <lohName | lohId> label1, ..., labelN
5322/// The number of arguments depends on the loh identifier.
5323bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5324  MCLOHType Kind;
5325  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5326    if (getParser().getTok().isNot(AsmToken::Integer))
5327      return TokError("expected an identifier or a number in directive");
5328    // We successfully get a numeric value for the identifier.
5329    // Check if it is valid.
5330    int64_t Id = getParser().getTok().getIntVal();
5331    if (Id <= -1U && !isValidMCLOHType(Id))
5332      return TokError("invalid numeric identifier in directive");
5333    Kind = (MCLOHType)Id;
5334  } else {
5335    StringRef Name = getTok().getIdentifier();
5336    // We successfully parse an identifier.
5337    // Check if it is a recognized one.
5338    int Id = MCLOHNameToId(Name);
5339
5340    if (Id == -1)
5341      return TokError("invalid identifier in directive");
5342    Kind = (MCLOHType)Id;
5343  }
5344  // Consume the identifier.
5345  Lex();
5346  // Get the number of arguments of this LOH.
5347  int NbArgs = MCLOHIdToNbArgs(Kind);
5348
5349  assert(NbArgs != -1 && "Invalid number of arguments");
5350
5351  SmallVector<MCSymbol *, 3> Args;
5352  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5353    StringRef Name;
5354    if (getParser().parseIdentifier(Name))
5355      return TokError("expected identifier in directive");
5356    Args.push_back(getContext().getOrCreateSymbol(Name));
5357
5358    if (Idx + 1 == NbArgs)
5359      break;
5360    if (parseToken(AsmToken::Comma,
5361                   "unexpected token in '" + Twine(IDVal) + "' directive"))
5362      return true;
5363  }
5364  if (parseToken(AsmToken::EndOfStatement,
5365                 "unexpected token in '" + Twine(IDVal) + "' directive"))
5366    return true;
5367
5368  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5369  return false;
5370}
5371
5372/// parseDirectiveLtorg
5373///  ::= .ltorg | .pool
5374bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5375  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5376    return true;
5377  getTargetStreamer().emitCurrentConstantPool();
5378  return false;
5379}
5380
5381/// parseDirectiveReq
5382///  ::= name .req registername
5383bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5384  MCAsmParser &Parser = getParser();
5385  Parser.Lex(); // Eat the '.req' token.
5386  SMLoc SRegLoc = getLoc();
5387  RegKind RegisterKind = RegKind::Scalar;
5388  unsigned RegNum;
5389  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5390
5391  if (ParseRes != MatchOperand_Success) {
5392    StringRef Kind;
5393    RegisterKind = RegKind::NeonVector;
5394    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5395
5396    if (ParseRes == MatchOperand_ParseFail)
5397      return true;
5398
5399    if (ParseRes == MatchOperand_Success && !Kind.empty())
5400      return Error(SRegLoc, "vector register without type specifier expected");
5401  }
5402
5403  if (ParseRes != MatchOperand_Success) {
5404    StringRef Kind;
5405    RegisterKind = RegKind::SVEDataVector;
5406    ParseRes =
5407        tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5408
5409    if (ParseRes == MatchOperand_ParseFail)
5410      return true;
5411
5412    if (ParseRes == MatchOperand_Success && !Kind.empty())
5413      return Error(SRegLoc,
5414                   "sve vector register without type specifier expected");
5415  }
5416
5417  if (ParseRes != MatchOperand_Success) {
5418    StringRef Kind;
5419    RegisterKind = RegKind::SVEPredicateVector;
5420    ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5421
5422    if (ParseRes == MatchOperand_ParseFail)
5423      return true;
5424
5425    if (ParseRes == MatchOperand_Success && !Kind.empty())
5426      return Error(SRegLoc,
5427                   "sve predicate register without type specifier expected");
5428  }
5429
5430  if (ParseRes != MatchOperand_Success)
5431    return Error(SRegLoc, "register name or alias expected");
5432
5433  // Shouldn't be anything else.
5434  if (parseToken(AsmToken::EndOfStatement,
5435                 "unexpected input in .req directive"))
5436    return true;
5437
5438  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5439  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5440    Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5441
5442  return false;
5443}
5444
5445/// parseDirectiveUneq
5446///  ::= .unreq registername
5447bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5448  MCAsmParser &Parser = getParser();
5449  if (getTok().isNot(AsmToken::Identifier))
5450    return TokError("unexpected input in .unreq directive.");
5451  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5452  Parser.Lex(); // Eat the identifier.
5453  if (parseToken(AsmToken::EndOfStatement))
5454    return addErrorSuffix("in '.unreq' directive");
5455  return false;
5456}
5457
5458bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5459  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5460    return true;
5461  getStreamer().EmitCFINegateRAState();
5462  return false;
5463}
5464
5465/// parseDirectiveCFIBKeyFrame
5466/// ::= .cfi_b_key
5467bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5468  if (parseToken(AsmToken::EndOfStatement,
5469                 "unexpected token in '.cfi_b_key_frame'"))
5470    return true;
5471  getStreamer().EmitCFIBKeyFrame();
5472  return false;
5473}
5474
5475bool
5476AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5477                                    AArch64MCExpr::VariantKind &ELFRefKind,
5478                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
5479                                    int64_t &Addend) {
5480  ELFRefKind = AArch64MCExpr::VK_INVALID;
5481  DarwinRefKind = MCSymbolRefExpr::VK_None;
5482  Addend = 0;
5483
5484  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5485    ELFRefKind = AE->getKind();
5486    Expr = AE->getSubExpr();
5487  }
5488
5489  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5490  if (SE) {
5491    // It's a simple symbol reference with no addend.
5492    DarwinRefKind = SE->getKind();
5493    return true;
5494  }
5495
5496  // Check that it looks like a symbol + an addend
5497  MCValue Res;
5498  bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5499  if (!Relocatable || Res.getSymB())
5500    return false;
5501
5502  // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5503  // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5504  if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5505    return false;
5506
5507  if (Res.getSymA())
5508    DarwinRefKind = Res.getSymA()->getKind();
5509  Addend = Res.getConstant();
5510
5511  // It's some symbol reference + a constant addend, but really
5512  // shouldn't use both Darwin and ELF syntax.
5513  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5514         DarwinRefKind == MCSymbolRefExpr::VK_None;
5515}
5516
5517/// Force static initialization.
5518extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
5519  RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5520  RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5521  RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5522  RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
5523  RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
5524}
5525
5526#define GET_REGISTER_MATCHER
5527#define GET_SUBTARGET_FEATURE_NAME
5528#define GET_MATCHER_IMPLEMENTATION
5529#define GET_MNEMONIC_SPELL_CHECKER
5530#include "AArch64GenAsmMatcher.inc"
5531
5532// Define this matcher function after the auto-generated include so we
5533// have the match class enum definitions.
5534unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5535                                                      unsigned Kind) {
5536  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5537  // If the kind is a token for a literal immediate, check if our asm
5538  // operand matches. This is for InstAliases which have a fixed-value
5539  // immediate in the syntax.
5540  int64_t ExpectedVal;
5541  switch (Kind) {
5542  default:
5543    return Match_InvalidOperand;
5544  case MCK__HASH_0:
5545    ExpectedVal = 0;
5546    break;
5547  case MCK__HASH_1:
5548    ExpectedVal = 1;
5549    break;
5550  case MCK__HASH_12:
5551    ExpectedVal = 12;
5552    break;
5553  case MCK__HASH_16:
5554    ExpectedVal = 16;
5555    break;
5556  case MCK__HASH_2:
5557    ExpectedVal = 2;
5558    break;
5559  case MCK__HASH_24:
5560    ExpectedVal = 24;
5561    break;
5562  case MCK__HASH_3:
5563    ExpectedVal = 3;
5564    break;
5565  case MCK__HASH_32:
5566    ExpectedVal = 32;
5567    break;
5568  case MCK__HASH_4:
5569    ExpectedVal = 4;
5570    break;
5571  case MCK__HASH_48:
5572    ExpectedVal = 48;
5573    break;
5574  case MCK__HASH_6:
5575    ExpectedVal = 6;
5576    break;
5577  case MCK__HASH_64:
5578    ExpectedVal = 64;
5579    break;
5580  case MCK__HASH_8:
5581    ExpectedVal = 8;
5582    break;
5583  }
5584  if (!Op.isImm())
5585    return Match_InvalidOperand;
5586  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5587  if (!CE)
5588    return Match_InvalidOperand;
5589  if (CE->getValue() == ExpectedVal)
5590    return Match_Success;
5591  return Match_InvalidOperand;
5592}
5593
5594OperandMatchResultTy
5595AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5596
5597  SMLoc S = getLoc();
5598
5599  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5600    Error(S, "expected register");
5601    return MatchOperand_ParseFail;
5602  }
5603
5604  unsigned FirstReg;
5605  OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5606  if (Res != MatchOperand_Success)
5607    return MatchOperand_ParseFail;
5608
5609  const MCRegisterClass &WRegClass =
5610      AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5611  const MCRegisterClass &XRegClass =
5612      AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5613
5614  bool isXReg = XRegClass.contains(FirstReg),
5615       isWReg = WRegClass.contains(FirstReg);
5616  if (!isXReg && !isWReg) {
5617    Error(S, "expected first even register of a "
5618             "consecutive same-size even/odd register pair");
5619    return MatchOperand_ParseFail;
5620  }
5621
5622  const MCRegisterInfo *RI = getContext().getRegisterInfo();
5623  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5624
5625  if (FirstEncoding & 0x1) {
5626    Error(S, "expected first even register of a "
5627             "consecutive same-size even/odd register pair");
5628    return MatchOperand_ParseFail;
5629  }
5630
5631  if (getParser().getTok().isNot(AsmToken::Comma)) {
5632    Error(getLoc(), "expected comma");
5633    return MatchOperand_ParseFail;
5634  }
5635  // Eat the comma
5636  getParser().Lex();
5637
5638  SMLoc E = getLoc();
5639  unsigned SecondReg;
5640  Res = tryParseScalarRegister(SecondReg);
5641  if (Res != MatchOperand_Success)
5642    return MatchOperand_ParseFail;
5643
5644  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5645      (isXReg && !XRegClass.contains(SecondReg)) ||
5646      (isWReg && !WRegClass.contains(SecondReg))) {
5647    Error(E,"expected second odd register of a "
5648             "consecutive same-size even/odd register pair");
5649    return MatchOperand_ParseFail;
5650  }
5651
5652  unsigned Pair = 0;
5653  if (isXReg) {
5654    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5655           &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5656  } else {
5657    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5658           &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5659  }
5660
5661  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5662      getLoc(), getContext()));
5663
5664  return MatchOperand_Success;
5665}
5666
5667template <bool ParseShiftExtend, bool ParseSuffix>
5668OperandMatchResultTy
5669AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5670  const SMLoc S = getLoc();
5671  // Check for a SVE vector register specifier first.
5672  unsigned RegNum;
5673  StringRef Kind;
5674
5675  OperandMatchResultTy Res =
5676      tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5677
5678  if (Res != MatchOperand_Success)
5679    return Res;
5680
5681  if (ParseSuffix && Kind.empty())
5682    return MatchOperand_NoMatch;
5683
5684  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5685  if (!KindRes)
5686    return MatchOperand_NoMatch;
5687
5688  unsigned ElementWidth = KindRes->second;
5689
5690  // No shift/extend is the default.
5691  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5692    Operands.push_back(AArch64Operand::CreateVectorReg(
5693        RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5694
5695    OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5696    if (Res == MatchOperand_ParseFail)
5697      return MatchOperand_ParseFail;
5698    return MatchOperand_Success;
5699  }
5700
5701  // Eat the comma
5702  getParser().Lex();
5703
5704  // Match the shift
5705  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5706  Res = tryParseOptionalShiftExtend(ExtOpnd);
5707  if (Res != MatchOperand_Success)
5708    return Res;
5709
5710  auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5711  Operands.push_back(AArch64Operand::CreateVectorReg(
5712      RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5713      getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5714      Ext->hasShiftExtendAmount()));
5715
5716  return MatchOperand_Success;
5717}
5718
5719OperandMatchResultTy
5720AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5721  MCAsmParser &Parser = getParser();
5722
5723  SMLoc SS = getLoc();
5724  const AsmToken &TokE = Parser.getTok();
5725  bool IsHash = TokE.is(AsmToken::Hash);
5726
5727  if (!IsHash && TokE.isNot(AsmToken::Identifier))
5728    return MatchOperand_NoMatch;
5729
5730  int64_t Pattern;
5731  if (IsHash) {
5732    Parser.Lex(); // Eat hash
5733
5734    // Parse the immediate operand.
5735    const MCExpr *ImmVal;
5736    SS = getLoc();
5737    if (Parser.parseExpression(ImmVal))
5738      return MatchOperand_ParseFail;
5739
5740    auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5741    if (!MCE)
5742      return MatchOperand_ParseFail;
5743
5744    Pattern = MCE->getValue();
5745  } else {
5746    // Parse the pattern
5747    auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5748    if (!Pat)
5749      return MatchOperand_NoMatch;
5750
5751    Parser.Lex();
5752    Pattern = Pat->Encoding;
5753    assert(Pattern >= 0 && Pattern < 32);
5754  }
5755
5756  Operands.push_back(
5757      AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5758                                SS, getLoc(), getContext()));
5759
5760  return MatchOperand_Success;
5761}
5762