• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCLinkerOptimizationHint.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmLexer.h"
32 #include "llvm/MC/MCParser/MCAsmParser.h"
33 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
34 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/MC/MCTargetOptions.h"
41 #include "llvm/MC/SubtargetFeature.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/SMLoc.h"
47 #include "llvm/Support/TargetParser.h"
48 #include "llvm/Support/TargetRegistry.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <cassert>
51 #include <cctype>
52 #include <cstdint>
53 #include <cstdio>
54 #include <string>
55 #include <tuple>
56 #include <utility>
57 #include <vector>
58 
59 using namespace llvm;
60 
61 namespace {
62 
63 enum class RegKind {
64   Scalar,
65   NeonVector,
66   SVEDataVector,
67   SVEPredicateVector
68 };
69 
70 enum RegConstraintEqualityTy {
71   EqualsReg,
72   EqualsSuperReg,
73   EqualsSubReg
74 };
75 
76 class AArch64AsmParser : public MCTargetAsmParser {
77 private:
78   StringRef Mnemonic; ///< Instruction mnemonic.
79 
80   // Map of register aliases registers via the .req directive.
81   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
82 
83   class PrefixInfo {
84   public:
CreateFromInst(const MCInst & Inst,uint64_t TSFlags)85     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
86       PrefixInfo Prefix;
87       switch (Inst.getOpcode()) {
88       case AArch64::MOVPRFX_ZZ:
89         Prefix.Active = true;
90         Prefix.Dst = Inst.getOperand(0).getReg();
91         break;
92       case AArch64::MOVPRFX_ZPmZ_B:
93       case AArch64::MOVPRFX_ZPmZ_H:
94       case AArch64::MOVPRFX_ZPmZ_S:
95       case AArch64::MOVPRFX_ZPmZ_D:
96         Prefix.Active = true;
97         Prefix.Predicated = true;
98         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
99         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
100                "No destructive element size set for movprfx");
101         Prefix.Dst = Inst.getOperand(0).getReg();
102         Prefix.Pg = Inst.getOperand(2).getReg();
103         break;
104       case AArch64::MOVPRFX_ZPzZ_B:
105       case AArch64::MOVPRFX_ZPzZ_H:
106       case AArch64::MOVPRFX_ZPzZ_S:
107       case AArch64::MOVPRFX_ZPzZ_D:
108         Prefix.Active = true;
109         Prefix.Predicated = true;
110         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
111         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
112                "No destructive element size set for movprfx");
113         Prefix.Dst = Inst.getOperand(0).getReg();
114         Prefix.Pg = Inst.getOperand(1).getReg();
115         break;
116       default:
117         break;
118       }
119 
120       return Prefix;
121     }
122 
PrefixInfo()123     PrefixInfo() : Active(false), Predicated(false) {}
isActive() const124     bool isActive() const { return Active; }
isPredicated() const125     bool isPredicated() const { return Predicated; }
getElementSize() const126     unsigned getElementSize() const {
127       assert(Predicated);
128       return ElementSize;
129     }
getDstReg() const130     unsigned getDstReg() const { return Dst; }
getPgReg() const131     unsigned getPgReg() const {
132       assert(Predicated);
133       return Pg;
134     }
135 
136   private:
137     bool Active;
138     bool Predicated;
139     unsigned ElementSize;
140     unsigned Dst;
141     unsigned Pg;
142   } NextPrefix;
143 
getTargetStreamer()144   AArch64TargetStreamer &getTargetStreamer() {
145     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
146     return static_cast<AArch64TargetStreamer &>(TS);
147   }
148 
getLoc() const149   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
150 
151   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
152   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
153   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
154   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
155   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
156   bool parseRegister(OperandVector &Operands);
157   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
158   bool parseNeonVectorList(OperandVector &Operands);
159   bool parseOptionalMulOperand(OperandVector &Operands);
160   bool parseOperand(OperandVector &Operands, bool isCondCode,
161                     bool invertCondCode);
162 
163   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
164                       OperandVector &Operands);
165 
166   bool parseDirectiveArch(SMLoc L);
167   bool parseDirectiveCPU(SMLoc L);
168   bool parseDirectiveInst(SMLoc L);
169 
170   bool parseDirectiveTLSDescCall(SMLoc L);
171 
172   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
173   bool parseDirectiveLtorg(SMLoc L);
174 
175   bool parseDirectiveReq(StringRef Name, SMLoc L);
176   bool parseDirectiveUnreq(SMLoc L);
177 
178   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
179                            SmallVectorImpl<SMLoc> &Loc);
180   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
181                                OperandVector &Operands, MCStreamer &Out,
182                                uint64_t &ErrorInfo,
183                                bool MatchingInlineAsm) override;
184 /// @name Auto-generated Match Functions
185 /// {
186 
187 #define GET_ASSEMBLER_HEADER
188 #include "AArch64GenAsmMatcher.inc"
189 
190   /// }
191 
192   OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
193   OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
194                                               RegKind MatchKind);
195   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
196   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
197   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
198   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
199   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
200   template <bool IsSVEPrefetch = false>
201   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
202   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
203   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
204   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
205   template<bool AddFPZeroAsLiteral>
206   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
207   OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
208   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
209   bool tryParseNeonVectorRegister(OperandVector &Operands);
210   OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
211   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
212   template <bool ParseShiftExtend,
213             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
214   OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
215   template <bool ParseShiftExtend, bool ParseSuffix>
216   OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
217   OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
218   template <RegKind VectorKind>
219   OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
220                                           bool ExpectMatch = false);
221   OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
222 
223 public:
224   enum AArch64MatchResultTy {
225     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
226 #define GET_OPERAND_DIAGNOSTIC_TYPES
227 #include "AArch64GenAsmMatcher.inc"
228   };
229   bool IsILP32;
230 
AArch64AsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)231   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
232                    const MCInstrInfo &MII, const MCTargetOptions &Options)
233     : MCTargetAsmParser(Options, STI, MII) {
234     IsILP32 = Options.getABIName() == "ilp32";
235     MCAsmParserExtension::Initialize(Parser);
236     MCStreamer &S = getParser().getStreamer();
237     if (S.getTargetStreamer() == nullptr)
238       new AArch64TargetStreamer(S);
239 
240     // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
241     // directives as they have the same form and semantics:
242     ///  ::= (.hword | .word | .xword ) [ expression (, expression)* ]
243     Parser.addAliasForDirective(".hword", ".2byte");
244     Parser.addAliasForDirective(".word", ".4byte");
245     Parser.addAliasForDirective(".xword", ".8byte");
246 
247     // Initialize the set of available features.
248     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
249   }
250 
251   bool regsEqual(const MCParsedAsmOperand &Op1,
252                  const MCParsedAsmOperand &Op2) const override;
253   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
254                         SMLoc NameLoc, OperandVector &Operands) override;
255   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
256   bool ParseDirective(AsmToken DirectiveID) override;
257   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
258                                       unsigned Kind) override;
259 
260   static bool classifySymbolRef(const MCExpr *Expr,
261                                 AArch64MCExpr::VariantKind &ELFRefKind,
262                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
263                                 int64_t &Addend);
264 };
265 
266 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
267 /// instruction.
268 class AArch64Operand : public MCParsedAsmOperand {
269 private:
270   enum KindTy {
271     k_Immediate,
272     k_ShiftedImm,
273     k_CondCode,
274     k_Register,
275     k_VectorList,
276     k_VectorIndex,
277     k_Token,
278     k_SysReg,
279     k_SysCR,
280     k_Prefetch,
281     k_ShiftExtend,
282     k_FPImm,
283     k_Barrier,
284     k_PSBHint,
285   } Kind;
286 
287   SMLoc StartLoc, EndLoc;
288 
289   struct TokOp {
290     const char *Data;
291     unsigned Length;
292     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
293   };
294 
295   // Separate shift/extend operand.
296   struct ShiftExtendOp {
297     AArch64_AM::ShiftExtendType Type;
298     unsigned Amount;
299     bool HasExplicitAmount;
300   };
301 
302   struct RegOp {
303     unsigned RegNum;
304     RegKind Kind;
305     int ElementWidth;
306 
307     // The register may be allowed as a different register class,
308     // e.g. for GPR64as32 or GPR32as64.
309     RegConstraintEqualityTy EqualityTy;
310 
311     // In some cases the shift/extend needs to be explicitly parsed together
312     // with the register, rather than as a separate operand. This is needed
313     // for addressing modes where the instruction as a whole dictates the
314     // scaling/extend, rather than specific bits in the instruction.
315     // By parsing them as a single operand, we avoid the need to pass an
316     // extra operand in all CodeGen patterns (because all operands need to
317     // have an associated value), and we avoid the need to update TableGen to
318     // accept operands that have no associated bits in the instruction.
319     //
320     // An added benefit of parsing them together is that the assembler
321     // can give a sensible diagnostic if the scaling is not correct.
322     //
323     // The default is 'lsl #0' (HasExplicitAmount = false) if no
324     // ShiftExtend is specified.
325     ShiftExtendOp ShiftExtend;
326   };
327 
328   struct VectorListOp {
329     unsigned RegNum;
330     unsigned Count;
331     unsigned NumElements;
332     unsigned ElementWidth;
333     RegKind  RegisterKind;
334   };
335 
336   struct VectorIndexOp {
337     unsigned Val;
338   };
339 
340   struct ImmOp {
341     const MCExpr *Val;
342   };
343 
344   struct ShiftedImmOp {
345     const MCExpr *Val;
346     unsigned ShiftAmount;
347   };
348 
349   struct CondCodeOp {
350     AArch64CC::CondCode Code;
351   };
352 
353   struct FPImmOp {
354     uint64_t Val; // APFloat value bitcasted to uint64_t.
355     bool IsExact; // describes whether parsed value was exact.
356   };
357 
358   struct BarrierOp {
359     const char *Data;
360     unsigned Length;
361     unsigned Val; // Not the enum since not all values have names.
362   };
363 
364   struct SysRegOp {
365     const char *Data;
366     unsigned Length;
367     uint32_t MRSReg;
368     uint32_t MSRReg;
369     uint32_t PStateField;
370   };
371 
372   struct SysCRImmOp {
373     unsigned Val;
374   };
375 
376   struct PrefetchOp {
377     const char *Data;
378     unsigned Length;
379     unsigned Val;
380   };
381 
382   struct PSBHintOp {
383     const char *Data;
384     unsigned Length;
385     unsigned Val;
386   };
387 
388   struct ExtendOp {
389     unsigned Val;
390   };
391 
392   union {
393     struct TokOp Tok;
394     struct RegOp Reg;
395     struct VectorListOp VectorList;
396     struct VectorIndexOp VectorIndex;
397     struct ImmOp Imm;
398     struct ShiftedImmOp ShiftedImm;
399     struct CondCodeOp CondCode;
400     struct FPImmOp FPImm;
401     struct BarrierOp Barrier;
402     struct SysRegOp SysReg;
403     struct SysCRImmOp SysCRImm;
404     struct PrefetchOp Prefetch;
405     struct PSBHintOp PSBHint;
406     struct ShiftExtendOp ShiftExtend;
407   };
408 
409   // Keep the MCContext around as the MCExprs may need manipulated during
410   // the add<>Operands() calls.
411   MCContext &Ctx;
412 
413 public:
AArch64Operand(KindTy K,MCContext & Ctx)414   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
415 
AArch64Operand(const AArch64Operand & o)416   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
417     Kind = o.Kind;
418     StartLoc = o.StartLoc;
419     EndLoc = o.EndLoc;
420     switch (Kind) {
421     case k_Token:
422       Tok = o.Tok;
423       break;
424     case k_Immediate:
425       Imm = o.Imm;
426       break;
427     case k_ShiftedImm:
428       ShiftedImm = o.ShiftedImm;
429       break;
430     case k_CondCode:
431       CondCode = o.CondCode;
432       break;
433     case k_FPImm:
434       FPImm = o.FPImm;
435       break;
436     case k_Barrier:
437       Barrier = o.Barrier;
438       break;
439     case k_Register:
440       Reg = o.Reg;
441       break;
442     case k_VectorList:
443       VectorList = o.VectorList;
444       break;
445     case k_VectorIndex:
446       VectorIndex = o.VectorIndex;
447       break;
448     case k_SysReg:
449       SysReg = o.SysReg;
450       break;
451     case k_SysCR:
452       SysCRImm = o.SysCRImm;
453       break;
454     case k_Prefetch:
455       Prefetch = o.Prefetch;
456       break;
457     case k_PSBHint:
458       PSBHint = o.PSBHint;
459       break;
460     case k_ShiftExtend:
461       ShiftExtend = o.ShiftExtend;
462       break;
463     }
464   }
465 
466   /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const467   SMLoc getStartLoc() const override { return StartLoc; }
468   /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const469   SMLoc getEndLoc() const override { return EndLoc; }
470 
getToken() const471   StringRef getToken() const {
472     assert(Kind == k_Token && "Invalid access!");
473     return StringRef(Tok.Data, Tok.Length);
474   }
475 
isTokenSuffix() const476   bool isTokenSuffix() const {
477     assert(Kind == k_Token && "Invalid access!");
478     return Tok.IsSuffix;
479   }
480 
getImm() const481   const MCExpr *getImm() const {
482     assert(Kind == k_Immediate && "Invalid access!");
483     return Imm.Val;
484   }
485 
getShiftedImmVal() const486   const MCExpr *getShiftedImmVal() const {
487     assert(Kind == k_ShiftedImm && "Invalid access!");
488     return ShiftedImm.Val;
489   }
490 
getShiftedImmShift() const491   unsigned getShiftedImmShift() const {
492     assert(Kind == k_ShiftedImm && "Invalid access!");
493     return ShiftedImm.ShiftAmount;
494   }
495 
getCondCode() const496   AArch64CC::CondCode getCondCode() const {
497     assert(Kind == k_CondCode && "Invalid access!");
498     return CondCode.Code;
499   }
500 
getFPImm() const501   APFloat getFPImm() const {
502     assert (Kind == k_FPImm && "Invalid access!");
503     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
504   }
505 
getFPImmIsExact() const506   bool getFPImmIsExact() const {
507     assert (Kind == k_FPImm && "Invalid access!");
508     return FPImm.IsExact;
509   }
510 
getBarrier() const511   unsigned getBarrier() const {
512     assert(Kind == k_Barrier && "Invalid access!");
513     return Barrier.Val;
514   }
515 
getBarrierName() const516   StringRef getBarrierName() const {
517     assert(Kind == k_Barrier && "Invalid access!");
518     return StringRef(Barrier.Data, Barrier.Length);
519   }
520 
getReg() const521   unsigned getReg() const override {
522     assert(Kind == k_Register && "Invalid access!");
523     return Reg.RegNum;
524   }
525 
getRegEqualityTy() const526   RegConstraintEqualityTy getRegEqualityTy() const {
527     assert(Kind == k_Register && "Invalid access!");
528     return Reg.EqualityTy;
529   }
530 
getVectorListStart() const531   unsigned getVectorListStart() const {
532     assert(Kind == k_VectorList && "Invalid access!");
533     return VectorList.RegNum;
534   }
535 
getVectorListCount() const536   unsigned getVectorListCount() const {
537     assert(Kind == k_VectorList && "Invalid access!");
538     return VectorList.Count;
539   }
540 
getVectorIndex() const541   unsigned getVectorIndex() const {
542     assert(Kind == k_VectorIndex && "Invalid access!");
543     return VectorIndex.Val;
544   }
545 
getSysReg() const546   StringRef getSysReg() const {
547     assert(Kind == k_SysReg && "Invalid access!");
548     return StringRef(SysReg.Data, SysReg.Length);
549   }
550 
getSysCR() const551   unsigned getSysCR() const {
552     assert(Kind == k_SysCR && "Invalid access!");
553     return SysCRImm.Val;
554   }
555 
getPrefetch() const556   unsigned getPrefetch() const {
557     assert(Kind == k_Prefetch && "Invalid access!");
558     return Prefetch.Val;
559   }
560 
getPSBHint() const561   unsigned getPSBHint() const {
562     assert(Kind == k_PSBHint && "Invalid access!");
563     return PSBHint.Val;
564   }
565 
getPSBHintName() const566   StringRef getPSBHintName() const {
567     assert(Kind == k_PSBHint && "Invalid access!");
568     return StringRef(PSBHint.Data, PSBHint.Length);
569   }
570 
getPrefetchName() const571   StringRef getPrefetchName() const {
572     assert(Kind == k_Prefetch && "Invalid access!");
573     return StringRef(Prefetch.Data, Prefetch.Length);
574   }
575 
getShiftExtendType() const576   AArch64_AM::ShiftExtendType getShiftExtendType() const {
577     if (Kind == k_ShiftExtend)
578       return ShiftExtend.Type;
579     if (Kind == k_Register)
580       return Reg.ShiftExtend.Type;
581     llvm_unreachable("Invalid access!");
582   }
583 
getShiftExtendAmount() const584   unsigned getShiftExtendAmount() const {
585     if (Kind == k_ShiftExtend)
586       return ShiftExtend.Amount;
587     if (Kind == k_Register)
588       return Reg.ShiftExtend.Amount;
589     llvm_unreachable("Invalid access!");
590   }
591 
hasShiftExtendAmount() const592   bool hasShiftExtendAmount() const {
593     if (Kind == k_ShiftExtend)
594       return ShiftExtend.HasExplicitAmount;
595     if (Kind == k_Register)
596       return Reg.ShiftExtend.HasExplicitAmount;
597     llvm_unreachable("Invalid access!");
598   }
599 
isImm() const600   bool isImm() const override { return Kind == k_Immediate; }
isMem() const601   bool isMem() const override { return false; }
602 
isUImm6() const603   bool isUImm6() const {
604     if (!isImm())
605       return false;
606     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607     if (!MCE)
608       return false;
609     int64_t Val = MCE->getValue();
610     return (Val >= 0 && Val < 64);
611   }
612 
isSImm() const613   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
614 
isSImmScaled() const615   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
616     return isImmScaled<Bits, Scale>(true);
617   }
618 
isUImmScaled() const619   template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
620     return isImmScaled<Bits, Scale>(false);
621   }
622 
623   template <int Bits, int Scale>
isImmScaled(bool Signed) const624   DiagnosticPredicate isImmScaled(bool Signed) const {
625     if (!isImm())
626       return DiagnosticPredicateTy::NoMatch;
627 
628     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
629     if (!MCE)
630       return DiagnosticPredicateTy::NoMatch;
631 
632     int64_t MinVal, MaxVal;
633     if (Signed) {
634       int64_t Shift = Bits - 1;
635       MinVal = (int64_t(1) << Shift) * -Scale;
636       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
637     } else {
638       MinVal = 0;
639       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
640     }
641 
642     int64_t Val = MCE->getValue();
643     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
644       return DiagnosticPredicateTy::Match;
645 
646     return DiagnosticPredicateTy::NearMatch;
647   }
648 
isSVEPattern() const649   DiagnosticPredicate isSVEPattern() const {
650     if (!isImm())
651       return DiagnosticPredicateTy::NoMatch;
652     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
653     if (!MCE)
654       return DiagnosticPredicateTy::NoMatch;
655     int64_t Val = MCE->getValue();
656     if (Val >= 0 && Val < 32)
657       return DiagnosticPredicateTy::Match;
658     return DiagnosticPredicateTy::NearMatch;
659   }
660 
isSymbolicUImm12Offset(const MCExpr * Expr,unsigned Scale) const661   bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
662     AArch64MCExpr::VariantKind ELFRefKind;
663     MCSymbolRefExpr::VariantKind DarwinRefKind;
664     int64_t Addend;
665     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
666                                            Addend)) {
667       // If we don't understand the expression, assume the best and
668       // let the fixup and relocation code deal with it.
669       return true;
670     }
671 
672     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
673         ELFRefKind == AArch64MCExpr::VK_LO12 ||
674         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
675         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
676         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
677         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
678         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
679         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
680         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
681         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
682         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
683       // Note that we don't range-check the addend. It's adjusted modulo page
684       // size when converted, so there is no "out of range" condition when using
685       // @pageoff.
686       return Addend >= 0 && (Addend % Scale) == 0;
687     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
688                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
689       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
690       return Addend == 0;
691     }
692 
693     return false;
694   }
695 
isUImm12Offset() const696   template <int Scale> bool isUImm12Offset() const {
697     if (!isImm())
698       return false;
699 
700     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
701     if (!MCE)
702       return isSymbolicUImm12Offset(getImm(), Scale);
703 
704     int64_t Val = MCE->getValue();
705     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
706   }
707 
708   template <int N, int M>
isImmInRange() const709   bool isImmInRange() const {
710     if (!isImm())
711       return false;
712     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
713     if (!MCE)
714       return false;
715     int64_t Val = MCE->getValue();
716     return (Val >= N && Val <= M);
717   }
718 
719   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
720   // a logical immediate can always be represented when inverted.
721   template <typename T>
isLogicalImm() const722   bool isLogicalImm() const {
723     if (!isImm())
724       return false;
725     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726     if (!MCE)
727       return false;
728 
729     int64_t Val = MCE->getValue();
730     int64_t SVal = typename std::make_signed<T>::type(Val);
731     int64_t UVal = typename std::make_unsigned<T>::type(Val);
732     if (Val != SVal && Val != UVal)
733       return false;
734 
735     return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
736   }
737 
isShiftedImm() const738   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
739 
740   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
741   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
742   /// immediate that can be shifted by 'Shift'.
743   template <unsigned Width>
getShiftedVal() const744   Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
745     if (isShiftedImm() && Width == getShiftedImmShift())
746       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
747         return std::make_pair(CE->getValue(), Width);
748 
749     if (isImm())
750       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
751         int64_t Val = CE->getValue();
752         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
753           return std::make_pair(Val >> Width, Width);
754         else
755           return std::make_pair(Val, 0u);
756       }
757 
758     return {};
759   }
760 
isAddSubImm() const761   bool isAddSubImm() const {
762     if (!isShiftedImm() && !isImm())
763       return false;
764 
765     const MCExpr *Expr;
766 
767     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
768     if (isShiftedImm()) {
769       unsigned Shift = ShiftedImm.ShiftAmount;
770       Expr = ShiftedImm.Val;
771       if (Shift != 0 && Shift != 12)
772         return false;
773     } else {
774       Expr = getImm();
775     }
776 
777     AArch64MCExpr::VariantKind ELFRefKind;
778     MCSymbolRefExpr::VariantKind DarwinRefKind;
779     int64_t Addend;
780     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
781                                           DarwinRefKind, Addend)) {
782       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
783           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
784           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
785           || ELFRefKind == AArch64MCExpr::VK_LO12
786           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
787           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
788           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
789           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
790           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
791           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
792           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
793           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
794           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
795     }
796 
797     // If it's a constant, it should be a real immediate in range.
798     if (auto ShiftedVal = getShiftedVal<12>())
799       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
800 
801     // If it's an expression, we hope for the best and let the fixup/relocation
802     // code deal with it.
803     return true;
804   }
805 
isAddSubImmNeg() const806   bool isAddSubImmNeg() const {
807     if (!isShiftedImm() && !isImm())
808       return false;
809 
810     // Otherwise it should be a real negative immediate in range.
811     if (auto ShiftedVal = getShiftedVal<12>())
812       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
813 
814     return false;
815   }
816 
817   // Signed value in the range -128 to +127. For element widths of
818   // 16 bits or higher it may also be a signed multiple of 256 in the
819   // range -32768 to +32512.
820   // For element-width of 8 bits a range of -128 to 255 is accepted,
821   // since a copy of a byte can be either signed/unsigned.
822   template <typename T>
isSVECpyImm() const823   DiagnosticPredicate isSVECpyImm() const {
824     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
825       return DiagnosticPredicateTy::NoMatch;
826 
827     bool IsByte =
828         std::is_same<int8_t, typename std::make_signed<T>::type>::value;
829     if (auto ShiftedImm = getShiftedVal<8>())
830       if (!(IsByte && ShiftedImm->second) &&
831           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
832                                      << ShiftedImm->second))
833         return DiagnosticPredicateTy::Match;
834 
835     return DiagnosticPredicateTy::NearMatch;
836   }
837 
838   // Unsigned value in the range 0 to 255. For element widths of
839   // 16 bits or higher it may also be a signed multiple of 256 in the
840   // range 0 to 65280.
isSVEAddSubImm() const841   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
842     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
843       return DiagnosticPredicateTy::NoMatch;
844 
845     bool IsByte =
846         std::is_same<int8_t, typename std::make_signed<T>::type>::value;
847     if (auto ShiftedImm = getShiftedVal<8>())
848       if (!(IsByte && ShiftedImm->second) &&
849           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
850                                         << ShiftedImm->second))
851         return DiagnosticPredicateTy::Match;
852 
853     return DiagnosticPredicateTy::NearMatch;
854   }
855 
isSVEPreferredLogicalImm() const856   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
857     if (isLogicalImm<T>() && !isSVECpyImm<T>())
858       return DiagnosticPredicateTy::Match;
859     return DiagnosticPredicateTy::NoMatch;
860   }
861 
isCondCode() const862   bool isCondCode() const { return Kind == k_CondCode; }
863 
isSIMDImmType10() const864   bool isSIMDImmType10() const {
865     if (!isImm())
866       return false;
867     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
868     if (!MCE)
869       return false;
870     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
871   }
872 
873   template<int N>
isBranchTarget() const874   bool isBranchTarget() const {
875     if (!isImm())
876       return false;
877     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
878     if (!MCE)
879       return true;
880     int64_t Val = MCE->getValue();
881     if (Val & 0x3)
882       return false;
883     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
884     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
885   }
886 
887   bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const888   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
889     if (!isImm())
890       return false;
891 
892     AArch64MCExpr::VariantKind ELFRefKind;
893     MCSymbolRefExpr::VariantKind DarwinRefKind;
894     int64_t Addend;
895     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
896                                              DarwinRefKind, Addend)) {
897       return false;
898     }
899     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
900       return false;
901 
902     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
903       if (ELFRefKind == AllowedModifiers[i])
904         return Addend == 0;
905     }
906 
907     return false;
908   }
909 
isMovZSymbolG3() const910   bool isMovZSymbolG3() const {
911     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
912   }
913 
isMovZSymbolG2() const914   bool isMovZSymbolG2() const {
915     return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
916                          AArch64MCExpr::VK_TPREL_G2,
917                          AArch64MCExpr::VK_DTPREL_G2});
918   }
919 
isMovZSymbolG1() const920   bool isMovZSymbolG1() const {
921     return isMovWSymbol({
922         AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
923         AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
924         AArch64MCExpr::VK_DTPREL_G1,
925     });
926   }
927 
isMovZSymbolG0() const928   bool isMovZSymbolG0() const {
929     return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
930                          AArch64MCExpr::VK_TPREL_G0,
931                          AArch64MCExpr::VK_DTPREL_G0});
932   }
933 
isMovKSymbolG3() const934   bool isMovKSymbolG3() const {
935     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
936   }
937 
isMovKSymbolG2() const938   bool isMovKSymbolG2() const {
939     return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
940   }
941 
isMovKSymbolG1() const942   bool isMovKSymbolG1() const {
943     return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
944                          AArch64MCExpr::VK_TPREL_G1_NC,
945                          AArch64MCExpr::VK_DTPREL_G1_NC});
946   }
947 
isMovKSymbolG0() const948   bool isMovKSymbolG0() const {
949     return isMovWSymbol(
950         {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
951          AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
952   }
953 
954   template<int RegWidth, int Shift>
isMOVZMovAlias() const955   bool isMOVZMovAlias() const {
956     if (!isImm()) return false;
957 
958     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
959     if (!CE) return false;
960     uint64_t Value = CE->getValue();
961 
962     return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
963   }
964 
965   template<int RegWidth, int Shift>
isMOVNMovAlias() const966   bool isMOVNMovAlias() const {
967     if (!isImm()) return false;
968 
969     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
970     if (!CE) return false;
971     uint64_t Value = CE->getValue();
972 
973     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
974   }
975 
isFPImm() const976   bool isFPImm() const {
977     return Kind == k_FPImm &&
978            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
979   }
980 
isBarrier() const981   bool isBarrier() const { return Kind == k_Barrier; }
isSysReg() const982   bool isSysReg() const { return Kind == k_SysReg; }
983 
isMRSSystemRegister() const984   bool isMRSSystemRegister() const {
985     if (!isSysReg()) return false;
986 
987     return SysReg.MRSReg != -1U;
988   }
989 
isMSRSystemRegister() const990   bool isMSRSystemRegister() const {
991     if (!isSysReg()) return false;
992     return SysReg.MSRReg != -1U;
993   }
994 
isSystemPStateFieldWithImm0_1() const995   bool isSystemPStateFieldWithImm0_1() const {
996     if (!isSysReg()) return false;
997     return (SysReg.PStateField == AArch64PState::PAN ||
998             SysReg.PStateField == AArch64PState::DIT ||
999             SysReg.PStateField == AArch64PState::UAO);
1000   }
1001 
isSystemPStateFieldWithImm0_15() const1002   bool isSystemPStateFieldWithImm0_15() const {
1003     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1004     return SysReg.PStateField != -1U;
1005   }
1006 
isReg() const1007   bool isReg() const override {
1008     return Kind == k_Register;
1009   }
1010 
isScalarReg() const1011   bool isScalarReg() const {
1012     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1013   }
1014 
isNeonVectorReg() const1015   bool isNeonVectorReg() const {
1016     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1017   }
1018 
isNeonVectorRegLo() const1019   bool isNeonVectorRegLo() const {
1020     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1021            AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1022                Reg.RegNum);
1023   }
1024 
isSVEVectorReg() const1025   template <unsigned Class> bool isSVEVectorReg() const {
1026     RegKind RK;
1027     switch (Class) {
1028     case AArch64::ZPRRegClassID:
1029     case AArch64::ZPR_3bRegClassID:
1030     case AArch64::ZPR_4bRegClassID:
1031       RK = RegKind::SVEDataVector;
1032       break;
1033     case AArch64::PPRRegClassID:
1034     case AArch64::PPR_3bRegClassID:
1035       RK = RegKind::SVEPredicateVector;
1036       break;
1037     default:
1038       llvm_unreachable("Unsupport register class");
1039     }
1040 
1041     return (Kind == k_Register && Reg.Kind == RK) &&
1042            AArch64MCRegisterClasses[Class].contains(getReg());
1043   }
1044 
isFPRasZPR() const1045   template <unsigned Class> bool isFPRasZPR() const {
1046     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1047            AArch64MCRegisterClasses[Class].contains(getReg());
1048   }
1049 
1050   template <int ElementWidth, unsigned Class>
isSVEPredicateVectorRegOfWidth() const1051   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1052     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1053       return DiagnosticPredicateTy::NoMatch;
1054 
1055     if (isSVEVectorReg<Class>() &&
1056            (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1057       return DiagnosticPredicateTy::Match;
1058 
1059     return DiagnosticPredicateTy::NearMatch;
1060   }
1061 
1062   template <int ElementWidth, unsigned Class>
isSVEDataVectorRegOfWidth() const1063   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1064     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1065       return DiagnosticPredicateTy::NoMatch;
1066 
1067     if (isSVEVectorReg<Class>() &&
1068            (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1069       return DiagnosticPredicateTy::Match;
1070 
1071     return DiagnosticPredicateTy::NearMatch;
1072   }
1073 
1074   template <int ElementWidth, unsigned Class,
1075             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1076             bool ShiftWidthAlwaysSame>
isSVEDataVectorRegWithShiftExtend() const1077   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1078     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1079     if (!VectorMatch.isMatch())
1080       return DiagnosticPredicateTy::NoMatch;
1081 
1082     // Give a more specific diagnostic when the user has explicitly typed in
1083     // a shift-amount that does not match what is expected, but for which
1084     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1085     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1086     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1087                         ShiftExtendTy == AArch64_AM::SXTW) &&
1088         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1089       return DiagnosticPredicateTy::NoMatch;
1090 
1091     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1092       return DiagnosticPredicateTy::Match;
1093 
1094     return DiagnosticPredicateTy::NearMatch;
1095   }
1096 
isGPR32as64() const1097   bool isGPR32as64() const {
1098     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1099       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1100   }
1101 
isGPR64as32() const1102   bool isGPR64as32() const {
1103     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1104       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1105   }
1106 
isWSeqPair() const1107   bool isWSeqPair() const {
1108     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1109            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1110                Reg.RegNum);
1111   }
1112 
isXSeqPair() const1113   bool isXSeqPair() const {
1114     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1115            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1116                Reg.RegNum);
1117   }
1118 
1119   template<int64_t Angle, int64_t Remainder>
isComplexRotation() const1120   DiagnosticPredicate isComplexRotation() const {
1121     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1122 
1123     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1124     if (!CE) return DiagnosticPredicateTy::NoMatch;
1125     uint64_t Value = CE->getValue();
1126 
1127     if (Value % Angle == Remainder && Value <= 270)
1128       return DiagnosticPredicateTy::Match;
1129     return DiagnosticPredicateTy::NearMatch;
1130   }
1131 
isGPR64() const1132   template <unsigned RegClassID> bool isGPR64() const {
1133     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1134            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1135   }
1136 
1137   template <unsigned RegClassID, int ExtWidth>
isGPR64WithShiftExtend() const1138   DiagnosticPredicate isGPR64WithShiftExtend() const {
1139     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1140       return DiagnosticPredicateTy::NoMatch;
1141 
1142     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1143         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1144       return DiagnosticPredicateTy::Match;
1145     return DiagnosticPredicateTy::NearMatch;
1146   }
1147 
1148   /// Is this a vector list with the type implicit (presumably attached to the
1149   /// instruction itself)?
1150   template <RegKind VectorKind, unsigned NumRegs>
isImplicitlyTypedVectorList() const1151   bool isImplicitlyTypedVectorList() const {
1152     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1153            VectorList.NumElements == 0 &&
1154            VectorList.RegisterKind == VectorKind;
1155   }
1156 
1157   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1158             unsigned ElementWidth>
isTypedVectorList() const1159   bool isTypedVectorList() const {
1160     if (Kind != k_VectorList)
1161       return false;
1162     if (VectorList.Count != NumRegs)
1163       return false;
1164     if (VectorList.RegisterKind != VectorKind)
1165       return false;
1166     if (VectorList.ElementWidth != ElementWidth)
1167       return false;
1168     return VectorList.NumElements == NumElements;
1169   }
1170 
1171   template <int Min, int Max>
isVectorIndex() const1172   DiagnosticPredicate isVectorIndex() const {
1173     if (Kind != k_VectorIndex)
1174       return DiagnosticPredicateTy::NoMatch;
1175     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1176       return DiagnosticPredicateTy::Match;
1177     return DiagnosticPredicateTy::NearMatch;
1178   }
1179 
isToken() const1180   bool isToken() const override { return Kind == k_Token; }
1181 
isTokenEqual(StringRef Str) const1182   bool isTokenEqual(StringRef Str) const {
1183     return Kind == k_Token && getToken() == Str;
1184   }
isSysCR() const1185   bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const1186   bool isPrefetch() const { return Kind == k_Prefetch; }
isPSBHint() const1187   bool isPSBHint() const { return Kind == k_PSBHint; }
isShiftExtend() const1188   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const1189   bool isShifter() const {
1190     if (!isShiftExtend())
1191       return false;
1192 
1193     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1194     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1195             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1196             ST == AArch64_AM::MSL);
1197   }
1198 
isExactFPImm() const1199   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1200     if (Kind != k_FPImm)
1201       return DiagnosticPredicateTy::NoMatch;
1202 
1203     if (getFPImmIsExact()) {
1204       // Lookup the immediate from table of supported immediates.
1205       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1206       assert(Desc && "Unknown enum value");
1207 
1208       // Calculate its FP value.
1209       APFloat RealVal(APFloat::IEEEdouble());
1210       if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1211           APFloat::opOK)
1212         llvm_unreachable("FP immediate is not exact");
1213 
1214       if (getFPImm().bitwiseIsEqual(RealVal))
1215         return DiagnosticPredicateTy::Match;
1216     }
1217 
1218     return DiagnosticPredicateTy::NearMatch;
1219   }
1220 
1221   template <unsigned ImmA, unsigned ImmB>
isExactFPImm() const1222   DiagnosticPredicate isExactFPImm() const {
1223     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1224     if ((Res = isExactFPImm<ImmA>()))
1225       return DiagnosticPredicateTy::Match;
1226     if ((Res = isExactFPImm<ImmB>()))
1227       return DiagnosticPredicateTy::Match;
1228     return Res;
1229   }
1230 
isExtend() const1231   bool isExtend() const {
1232     if (!isShiftExtend())
1233       return false;
1234 
1235     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1236     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1237             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1238             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1239             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1240             ET == AArch64_AM::LSL) &&
1241            getShiftExtendAmount() <= 4;
1242   }
1243 
isExtend64() const1244   bool isExtend64() const {
1245     if (!isExtend())
1246       return false;
1247     // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1248     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1249     return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1250   }
1251 
isExtendLSL64() const1252   bool isExtendLSL64() const {
1253     if (!isExtend())
1254       return false;
1255     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1256     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1257             ET == AArch64_AM::LSL) &&
1258            getShiftExtendAmount() <= 4;
1259   }
1260 
isMemXExtend() const1261   template<int Width> bool isMemXExtend() const {
1262     if (!isExtend())
1263       return false;
1264     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1265     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1266            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1267             getShiftExtendAmount() == 0);
1268   }
1269 
isMemWExtend() const1270   template<int Width> bool isMemWExtend() const {
1271     if (!isExtend())
1272       return false;
1273     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1274     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1275            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1276             getShiftExtendAmount() == 0);
1277   }
1278 
1279   template <unsigned width>
isArithmeticShifter() const1280   bool isArithmeticShifter() const {
1281     if (!isShifter())
1282       return false;
1283 
1284     // An arithmetic shifter is LSL, LSR, or ASR.
1285     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1286     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1287             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1288   }
1289 
1290   template <unsigned width>
isLogicalShifter() const1291   bool isLogicalShifter() const {
1292     if (!isShifter())
1293       return false;
1294 
1295     // A logical shifter is LSL, LSR, ASR or ROR.
1296     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1297     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1298             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1299            getShiftExtendAmount() < width;
1300   }
1301 
isMovImm32Shifter() const1302   bool isMovImm32Shifter() const {
1303     if (!isShifter())
1304       return false;
1305 
1306     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1307     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1308     if (ST != AArch64_AM::LSL)
1309       return false;
1310     uint64_t Val = getShiftExtendAmount();
1311     return (Val == 0 || Val == 16);
1312   }
1313 
isMovImm64Shifter() const1314   bool isMovImm64Shifter() const {
1315     if (!isShifter())
1316       return false;
1317 
1318     // A MOVi shifter is LSL of 0 or 16.
1319     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1320     if (ST != AArch64_AM::LSL)
1321       return false;
1322     uint64_t Val = getShiftExtendAmount();
1323     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1324   }
1325 
isLogicalVecShifter() const1326   bool isLogicalVecShifter() const {
1327     if (!isShifter())
1328       return false;
1329 
1330     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1331     unsigned Shift = getShiftExtendAmount();
1332     return getShiftExtendType() == AArch64_AM::LSL &&
1333            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1334   }
1335 
isLogicalVecHalfWordShifter() const1336   bool isLogicalVecHalfWordShifter() const {
1337     if (!isLogicalVecShifter())
1338       return false;
1339 
1340     // A logical vector shifter is a left shift by 0 or 8.
1341     unsigned Shift = getShiftExtendAmount();
1342     return getShiftExtendType() == AArch64_AM::LSL &&
1343            (Shift == 0 || Shift == 8);
1344   }
1345 
isMoveVecShifter() const1346   bool isMoveVecShifter() const {
1347     if (!isShiftExtend())
1348       return false;
1349 
1350     // A logical vector shifter is a left shift by 8 or 16.
1351     unsigned Shift = getShiftExtendAmount();
1352     return getShiftExtendType() == AArch64_AM::MSL &&
1353            (Shift == 8 || Shift == 16);
1354   }
1355 
1356   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1357   // to LDUR/STUR when the offset is not legal for the former but is for
1358   // the latter. As such, in addition to checking for being a legal unscaled
1359   // address, also check that it is not a legal scaled address. This avoids
1360   // ambiguity in the matcher.
1361   template<int Width>
isSImm9OffsetFB() const1362   bool isSImm9OffsetFB() const {
1363     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1364   }
1365 
isAdrpLabel() const1366   bool isAdrpLabel() const {
1367     // Validation was handled during parsing, so we just sanity check that
1368     // something didn't go haywire.
1369     if (!isImm())
1370         return false;
1371 
1372     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1373       int64_t Val = CE->getValue();
1374       int64_t Min = - (4096 * (1LL << (21 - 1)));
1375       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1376       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1377     }
1378 
1379     return true;
1380   }
1381 
isAdrLabel() const1382   bool isAdrLabel() const {
1383     // Validation was handled during parsing, so we just sanity check that
1384     // something didn't go haywire.
1385     if (!isImm())
1386         return false;
1387 
1388     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1389       int64_t Val = CE->getValue();
1390       int64_t Min = - (1LL << (21 - 1));
1391       int64_t Max = ((1LL << (21 - 1)) - 1);
1392       return Val >= Min && Val <= Max;
1393     }
1394 
1395     return true;
1396   }
1397 
addExpr(MCInst & Inst,const MCExpr * Expr) const1398   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1399     // Add as immediates when possible.  Null MCExpr = 0.
1400     if (!Expr)
1401       Inst.addOperand(MCOperand::createImm(0));
1402     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1403       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1404     else
1405       Inst.addOperand(MCOperand::createExpr(Expr));
1406   }
1407 
addRegOperands(MCInst & Inst,unsigned N) const1408   void addRegOperands(MCInst &Inst, unsigned N) const {
1409     assert(N == 1 && "Invalid number of operands!");
1410     Inst.addOperand(MCOperand::createReg(getReg()));
1411   }
1412 
addGPR32as64Operands(MCInst & Inst,unsigned N) const1413   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1414     assert(N == 1 && "Invalid number of operands!");
1415     assert(
1416         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1417 
1418     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1419     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1420         RI->getEncodingValue(getReg()));
1421 
1422     Inst.addOperand(MCOperand::createReg(Reg));
1423   }
1424 
addGPR64as32Operands(MCInst & Inst,unsigned N) const1425   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1426     assert(N == 1 && "Invalid number of operands!");
1427     assert(
1428         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1429 
1430     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1431     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1432         RI->getEncodingValue(getReg()));
1433 
1434     Inst.addOperand(MCOperand::createReg(Reg));
1435   }
1436 
1437   template <int Width>
addFPRasZPRRegOperands(MCInst & Inst,unsigned N) const1438   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1439     unsigned Base;
1440     switch (Width) {
1441     case 8:   Base = AArch64::B0; break;
1442     case 16:  Base = AArch64::H0; break;
1443     case 32:  Base = AArch64::S0; break;
1444     case 64:  Base = AArch64::D0; break;
1445     case 128: Base = AArch64::Q0; break;
1446     default:
1447       llvm_unreachable("Unsupported width");
1448     }
1449     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1450   }
1451 
addVectorReg64Operands(MCInst & Inst,unsigned N) const1452   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1453     assert(N == 1 && "Invalid number of operands!");
1454     assert(
1455         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1456     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1457   }
1458 
addVectorReg128Operands(MCInst & Inst,unsigned N) const1459   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1460     assert(N == 1 && "Invalid number of operands!");
1461     assert(
1462         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1463     Inst.addOperand(MCOperand::createReg(getReg()));
1464   }
1465 
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1466   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1467     assert(N == 1 && "Invalid number of operands!");
1468     Inst.addOperand(MCOperand::createReg(getReg()));
1469   }
1470 
1471   enum VecListIndexType {
1472     VecListIdx_DReg = 0,
1473     VecListIdx_QReg = 1,
1474     VecListIdx_ZReg = 2,
1475   };
1476 
1477   template <VecListIndexType RegTy, unsigned NumRegs>
addVectorListOperands(MCInst & Inst,unsigned N) const1478   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1479     assert(N == 1 && "Invalid number of operands!");
1480     static const unsigned FirstRegs[][5] = {
1481       /* DReg */ { AArch64::Q0,
1482                    AArch64::D0,       AArch64::D0_D1,
1483                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1484       /* QReg */ { AArch64::Q0,
1485                    AArch64::Q0,       AArch64::Q0_Q1,
1486                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1487       /* ZReg */ { AArch64::Z0,
1488                    AArch64::Z0,       AArch64::Z0_Z1,
1489                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1490     };
1491 
1492     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1493            " NumRegs must be <= 4 for ZRegs");
1494 
1495     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1496     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1497                                          FirstRegs[(unsigned)RegTy][0]));
1498   }
1499 
addVectorIndexOperands(MCInst & Inst,unsigned N) const1500   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1501     assert(N == 1 && "Invalid number of operands!");
1502     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1503   }
1504 
1505   template <unsigned ImmIs0, unsigned ImmIs1>
addExactFPImmOperands(MCInst & Inst,unsigned N) const1506   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1507     assert(N == 1 && "Invalid number of operands!");
1508     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1509     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1510   }
1511 
addImmOperands(MCInst & Inst,unsigned N) const1512   void addImmOperands(MCInst &Inst, unsigned N) const {
1513     assert(N == 1 && "Invalid number of operands!");
1514     // If this is a pageoff symrefexpr with an addend, adjust the addend
1515     // to be only the page-offset portion. Otherwise, just add the expr
1516     // as-is.
1517     addExpr(Inst, getImm());
1518   }
1519 
1520   template <int Shift>
addImmWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1521   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1522     assert(N == 2 && "Invalid number of operands!");
1523     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1524       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1525       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1526     } else if (isShiftedImm()) {
1527       addExpr(Inst, getShiftedImmVal());
1528       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1529     } else {
1530       addExpr(Inst, getImm());
1531       Inst.addOperand(MCOperand::createImm(0));
1532     }
1533   }
1534 
1535   template <int Shift>
addImmNegWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1536   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1537     assert(N == 2 && "Invalid number of operands!");
1538     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1539       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1540       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1541     } else
1542       llvm_unreachable("Not a shifted negative immediate");
1543   }
1544 
addCondCodeOperands(MCInst & Inst,unsigned N) const1545   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1546     assert(N == 1 && "Invalid number of operands!");
1547     Inst.addOperand(MCOperand::createImm(getCondCode()));
1548   }
1549 
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1550   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1551     assert(N == 1 && "Invalid number of operands!");
1552     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1553     if (!MCE)
1554       addExpr(Inst, getImm());
1555     else
1556       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1557   }
1558 
addAdrLabelOperands(MCInst & Inst,unsigned N) const1559   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1560     addImmOperands(Inst, N);
1561   }
1562 
1563   template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1564   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1565     assert(N == 1 && "Invalid number of operands!");
1566     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1567 
1568     if (!MCE) {
1569       Inst.addOperand(MCOperand::createExpr(getImm()));
1570       return;
1571     }
1572     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1573   }
1574 
addUImm6Operands(MCInst & Inst,unsigned N) const1575   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1576     assert(N == 1 && "Invalid number of operands!");
1577     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1578     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1579   }
1580 
1581   template <int Scale>
addImmScaledOperands(MCInst & Inst,unsigned N) const1582   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1583     assert(N == 1 && "Invalid number of operands!");
1584     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1585     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1586   }
1587 
1588   template <typename T>
addLogicalImmOperands(MCInst & Inst,unsigned N) const1589   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1590     assert(N == 1 && "Invalid number of operands!");
1591     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1592     typename std::make_unsigned<T>::type Val = MCE->getValue();
1593     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1594     Inst.addOperand(MCOperand::createImm(encoding));
1595   }
1596 
1597   template <typename T>
addLogicalImmNotOperands(MCInst & Inst,unsigned N) const1598   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1599     assert(N == 1 && "Invalid number of operands!");
1600     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1601     typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1602     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1603     Inst.addOperand(MCOperand::createImm(encoding));
1604   }
1605 
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const1606   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1607     assert(N == 1 && "Invalid number of operands!");
1608     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1609     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1610     Inst.addOperand(MCOperand::createImm(encoding));
1611   }
1612 
addBranchTarget26Operands(MCInst & Inst,unsigned N) const1613   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1614     // Branch operands don't encode the low bits, so shift them off
1615     // here. If it's a label, however, just put it on directly as there's
1616     // not enough information now to do anything.
1617     assert(N == 1 && "Invalid number of operands!");
1618     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1619     if (!MCE) {
1620       addExpr(Inst, getImm());
1621       return;
1622     }
1623     assert(MCE && "Invalid constant immediate operand!");
1624     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1625   }
1626 
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const1627   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1628     // Branch operands don't encode the low bits, so shift them off
1629     // here. If it's a label, however, just put it on directly as there's
1630     // not enough information now to do anything.
1631     assert(N == 1 && "Invalid number of operands!");
1632     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1633     if (!MCE) {
1634       addExpr(Inst, getImm());
1635       return;
1636     }
1637     assert(MCE && "Invalid constant immediate operand!");
1638     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1639   }
1640 
addBranchTarget14Operands(MCInst & Inst,unsigned N) const1641   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1642     // Branch operands don't encode the low bits, so shift them off
1643     // here. If it's a label, however, just put it on directly as there's
1644     // not enough information now to do anything.
1645     assert(N == 1 && "Invalid number of operands!");
1646     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1647     if (!MCE) {
1648       addExpr(Inst, getImm());
1649       return;
1650     }
1651     assert(MCE && "Invalid constant immediate operand!");
1652     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1653   }
1654 
addFPImmOperands(MCInst & Inst,unsigned N) const1655   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1656     assert(N == 1 && "Invalid number of operands!");
1657     Inst.addOperand(MCOperand::createImm(
1658         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1659   }
1660 
addBarrierOperands(MCInst & Inst,unsigned N) const1661   void addBarrierOperands(MCInst &Inst, unsigned N) const {
1662     assert(N == 1 && "Invalid number of operands!");
1663     Inst.addOperand(MCOperand::createImm(getBarrier()));
1664   }
1665 
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const1666   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1667     assert(N == 1 && "Invalid number of operands!");
1668 
1669     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1670   }
1671 
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const1672   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1673     assert(N == 1 && "Invalid number of operands!");
1674 
1675     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1676   }
1677 
addSystemPStateFieldWithImm0_1Operands(MCInst & Inst,unsigned N) const1678   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1679     assert(N == 1 && "Invalid number of operands!");
1680 
1681     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1682   }
1683 
addSystemPStateFieldWithImm0_15Operands(MCInst & Inst,unsigned N) const1684   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1685     assert(N == 1 && "Invalid number of operands!");
1686 
1687     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1688   }
1689 
addSysCROperands(MCInst & Inst,unsigned N) const1690   void addSysCROperands(MCInst &Inst, unsigned N) const {
1691     assert(N == 1 && "Invalid number of operands!");
1692     Inst.addOperand(MCOperand::createImm(getSysCR()));
1693   }
1694 
addPrefetchOperands(MCInst & Inst,unsigned N) const1695   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1696     assert(N == 1 && "Invalid number of operands!");
1697     Inst.addOperand(MCOperand::createImm(getPrefetch()));
1698   }
1699 
addPSBHintOperands(MCInst & Inst,unsigned N) const1700   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1701     assert(N == 1 && "Invalid number of operands!");
1702     Inst.addOperand(MCOperand::createImm(getPSBHint()));
1703   }
1704 
addShifterOperands(MCInst & Inst,unsigned N) const1705   void addShifterOperands(MCInst &Inst, unsigned N) const {
1706     assert(N == 1 && "Invalid number of operands!");
1707     unsigned Imm =
1708         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1709     Inst.addOperand(MCOperand::createImm(Imm));
1710   }
1711 
addExtendOperands(MCInst & Inst,unsigned N) const1712   void addExtendOperands(MCInst &Inst, unsigned N) const {
1713     assert(N == 1 && "Invalid number of operands!");
1714     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1715     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1716     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1717     Inst.addOperand(MCOperand::createImm(Imm));
1718   }
1719 
addExtend64Operands(MCInst & Inst,unsigned N) const1720   void addExtend64Operands(MCInst &Inst, unsigned N) const {
1721     assert(N == 1 && "Invalid number of operands!");
1722     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1723     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1724     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1725     Inst.addOperand(MCOperand::createImm(Imm));
1726   }
1727 
addMemExtendOperands(MCInst & Inst,unsigned N) const1728   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1729     assert(N == 2 && "Invalid number of operands!");
1730     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1731     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1732     Inst.addOperand(MCOperand::createImm(IsSigned));
1733     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1734   }
1735 
1736   // For 8-bit load/store instructions with a register offset, both the
1737   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1738   // they're disambiguated by whether the shift was explicit or implicit rather
1739   // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const1740   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1741     assert(N == 2 && "Invalid number of operands!");
1742     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1743     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1744     Inst.addOperand(MCOperand::createImm(IsSigned));
1745     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1746   }
1747 
1748   template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const1749   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1750     assert(N == 1 && "Invalid number of operands!");
1751 
1752     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1753     uint64_t Value = CE->getValue();
1754     Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1755   }
1756 
1757   template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const1758   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1759     assert(N == 1 && "Invalid number of operands!");
1760 
1761     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1762     uint64_t Value = CE->getValue();
1763     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1764   }
1765 
addComplexRotationEvenOperands(MCInst & Inst,unsigned N) const1766   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1767     assert(N == 1 && "Invalid number of operands!");
1768     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1769     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1770   }
1771 
addComplexRotationOddOperands(MCInst & Inst,unsigned N) const1772   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1773     assert(N == 1 && "Invalid number of operands!");
1774     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1775     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1776   }
1777 
1778   void print(raw_ostream &OS) const override;
1779 
1780   static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,bool IsSuffix,SMLoc S,MCContext & Ctx)1781   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1782     auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1783     Op->Tok.Data = Str.data();
1784     Op->Tok.Length = Str.size();
1785     Op->Tok.IsSuffix = IsSuffix;
1786     Op->StartLoc = S;
1787     Op->EndLoc = S;
1788     return Op;
1789   }
1790 
1791   static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,RegKind Kind,SMLoc S,SMLoc E,MCContext & Ctx,RegConstraintEqualityTy EqTy=RegConstraintEqualityTy::EqualsReg,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)1792   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1793             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1794             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1795             unsigned ShiftAmount = 0,
1796             unsigned HasExplicitAmount = false) {
1797     auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1798     Op->Reg.RegNum = RegNum;
1799     Op->Reg.Kind = Kind;
1800     Op->Reg.ElementWidth = 0;
1801     Op->Reg.EqualityTy = EqTy;
1802     Op->Reg.ShiftExtend.Type = ExtTy;
1803     Op->Reg.ShiftExtend.Amount = ShiftAmount;
1804     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1805     Op->StartLoc = S;
1806     Op->EndLoc = E;
1807     return Op;
1808   }
1809 
1810   static std::unique_ptr<AArch64Operand>
CreateVectorReg(unsigned RegNum,RegKind Kind,unsigned ElementWidth,SMLoc S,SMLoc E,MCContext & Ctx,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)1811   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1812                   SMLoc S, SMLoc E, MCContext &Ctx,
1813                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1814                   unsigned ShiftAmount = 0,
1815                   unsigned HasExplicitAmount = false) {
1816     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1817             Kind == RegKind::SVEPredicateVector) &&
1818            "Invalid vector kind");
1819     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1820                         HasExplicitAmount);
1821     Op->Reg.ElementWidth = ElementWidth;
1822     return Op;
1823   }
1824 
1825   static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned NumElements,unsigned ElementWidth,RegKind RegisterKind,SMLoc S,SMLoc E,MCContext & Ctx)1826   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1827                    unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1828                    MCContext &Ctx) {
1829     auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1830     Op->VectorList.RegNum = RegNum;
1831     Op->VectorList.Count = Count;
1832     Op->VectorList.NumElements = NumElements;
1833     Op->VectorList.ElementWidth = ElementWidth;
1834     Op->VectorList.RegisterKind = RegisterKind;
1835     Op->StartLoc = S;
1836     Op->EndLoc = E;
1837     return Op;
1838   }
1839 
1840   static std::unique_ptr<AArch64Operand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)1841   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1842     auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1843     Op->VectorIndex.Val = Idx;
1844     Op->StartLoc = S;
1845     Op->EndLoc = E;
1846     return Op;
1847   }
1848 
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)1849   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1850                                                    SMLoc E, MCContext &Ctx) {
1851     auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1852     Op->Imm.Val = Val;
1853     Op->StartLoc = S;
1854     Op->EndLoc = E;
1855     return Op;
1856   }
1857 
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)1858   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1859                                                           unsigned ShiftAmount,
1860                                                           SMLoc S, SMLoc E,
1861                                                           MCContext &Ctx) {
1862     auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1863     Op->ShiftedImm .Val = Val;
1864     Op->ShiftedImm.ShiftAmount = ShiftAmount;
1865     Op->StartLoc = S;
1866     Op->EndLoc = E;
1867     return Op;
1868   }
1869 
1870   static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)1871   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1872     auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1873     Op->CondCode.Code = Code;
1874     Op->StartLoc = S;
1875     Op->EndLoc = E;
1876     return Op;
1877   }
1878 
1879   static std::unique_ptr<AArch64Operand>
CreateFPImm(APFloat Val,bool IsExact,SMLoc S,MCContext & Ctx)1880   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1881     auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1882     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1883     Op->FPImm.IsExact = IsExact;
1884     Op->StartLoc = S;
1885     Op->EndLoc = S;
1886     return Op;
1887   }
1888 
CreateBarrier(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1889   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1890                                                        StringRef Str,
1891                                                        SMLoc S,
1892                                                        MCContext &Ctx) {
1893     auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1894     Op->Barrier.Val = Val;
1895     Op->Barrier.Data = Str.data();
1896     Op->Barrier.Length = Str.size();
1897     Op->StartLoc = S;
1898     Op->EndLoc = S;
1899     return Op;
1900   }
1901 
CreateSysReg(StringRef Str,SMLoc S,uint32_t MRSReg,uint32_t MSRReg,uint32_t PStateField,MCContext & Ctx)1902   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1903                                                       uint32_t MRSReg,
1904                                                       uint32_t MSRReg,
1905                                                       uint32_t PStateField,
1906                                                       MCContext &Ctx) {
1907     auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1908     Op->SysReg.Data = Str.data();
1909     Op->SysReg.Length = Str.size();
1910     Op->SysReg.MRSReg = MRSReg;
1911     Op->SysReg.MSRReg = MSRReg;
1912     Op->SysReg.PStateField = PStateField;
1913     Op->StartLoc = S;
1914     Op->EndLoc = S;
1915     return Op;
1916   }
1917 
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)1918   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1919                                                      SMLoc E, MCContext &Ctx) {
1920     auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1921     Op->SysCRImm.Val = Val;
1922     Op->StartLoc = S;
1923     Op->EndLoc = E;
1924     return Op;
1925   }
1926 
CreatePrefetch(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1927   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1928                                                         StringRef Str,
1929                                                         SMLoc S,
1930                                                         MCContext &Ctx) {
1931     auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1932     Op->Prefetch.Val = Val;
1933     Op->Barrier.Data = Str.data();
1934     Op->Barrier.Length = Str.size();
1935     Op->StartLoc = S;
1936     Op->EndLoc = S;
1937     return Op;
1938   }
1939 
CreatePSBHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1940   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1941                                                        StringRef Str,
1942                                                        SMLoc S,
1943                                                        MCContext &Ctx) {
1944     auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1945     Op->PSBHint.Val = Val;
1946     Op->PSBHint.Data = Str.data();
1947     Op->PSBHint.Length = Str.size();
1948     Op->StartLoc = S;
1949     Op->EndLoc = S;
1950     return Op;
1951   }
1952 
1953   static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)1954   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1955                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1956     auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1957     Op->ShiftExtend.Type = ShOp;
1958     Op->ShiftExtend.Amount = Val;
1959     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1960     Op->StartLoc = S;
1961     Op->EndLoc = E;
1962     return Op;
1963   }
1964 };
1965 
1966 } // end anonymous namespace.
1967 
print(raw_ostream & OS) const1968 void AArch64Operand::print(raw_ostream &OS) const {
1969   switch (Kind) {
1970   case k_FPImm:
1971     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
1972     if (!getFPImmIsExact())
1973       OS << " (inexact)";
1974     OS << ">";
1975     break;
1976   case k_Barrier: {
1977     StringRef Name = getBarrierName();
1978     if (!Name.empty())
1979       OS << "<barrier " << Name << ">";
1980     else
1981       OS << "<barrier invalid #" << getBarrier() << ">";
1982     break;
1983   }
1984   case k_Immediate:
1985     OS << *getImm();
1986     break;
1987   case k_ShiftedImm: {
1988     unsigned Shift = getShiftedImmShift();
1989     OS << "<shiftedimm ";
1990     OS << *getShiftedImmVal();
1991     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1992     break;
1993   }
1994   case k_CondCode:
1995     OS << "<condcode " << getCondCode() << ">";
1996     break;
1997   case k_VectorList: {
1998     OS << "<vectorlist ";
1999     unsigned Reg = getVectorListStart();
2000     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2001       OS << Reg + i << " ";
2002     OS << ">";
2003     break;
2004   }
2005   case k_VectorIndex:
2006     OS << "<vectorindex " << getVectorIndex() << ">";
2007     break;
2008   case k_SysReg:
2009     OS << "<sysreg: " << getSysReg() << '>';
2010     break;
2011   case k_Token:
2012     OS << "'" << getToken() << "'";
2013     break;
2014   case k_SysCR:
2015     OS << "c" << getSysCR();
2016     break;
2017   case k_Prefetch: {
2018     StringRef Name = getPrefetchName();
2019     if (!Name.empty())
2020       OS << "<prfop " << Name << ">";
2021     else
2022       OS << "<prfop invalid #" << getPrefetch() << ">";
2023     break;
2024   }
2025   case k_PSBHint:
2026     OS << getPSBHintName();
2027     break;
2028   case k_Register:
2029     OS << "<register " << getReg() << ">";
2030     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2031       break;
2032     LLVM_FALLTHROUGH;
2033   case k_ShiftExtend:
2034     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2035        << getShiftExtendAmount();
2036     if (!hasShiftExtendAmount())
2037       OS << "<imp>";
2038     OS << '>';
2039     break;
2040   }
2041 }
2042 
2043 /// @name Auto-generated Match Functions
2044 /// {
2045 
2046 static unsigned MatchRegisterName(StringRef Name);
2047 
2048 /// }
2049 
MatchNeonVectorRegName(StringRef Name)2050 static unsigned MatchNeonVectorRegName(StringRef Name) {
2051   return StringSwitch<unsigned>(Name.lower())
2052       .Case("v0", AArch64::Q0)
2053       .Case("v1", AArch64::Q1)
2054       .Case("v2", AArch64::Q2)
2055       .Case("v3", AArch64::Q3)
2056       .Case("v4", AArch64::Q4)
2057       .Case("v5", AArch64::Q5)
2058       .Case("v6", AArch64::Q6)
2059       .Case("v7", AArch64::Q7)
2060       .Case("v8", AArch64::Q8)
2061       .Case("v9", AArch64::Q9)
2062       .Case("v10", AArch64::Q10)
2063       .Case("v11", AArch64::Q11)
2064       .Case("v12", AArch64::Q12)
2065       .Case("v13", AArch64::Q13)
2066       .Case("v14", AArch64::Q14)
2067       .Case("v15", AArch64::Q15)
2068       .Case("v16", AArch64::Q16)
2069       .Case("v17", AArch64::Q17)
2070       .Case("v18", AArch64::Q18)
2071       .Case("v19", AArch64::Q19)
2072       .Case("v20", AArch64::Q20)
2073       .Case("v21", AArch64::Q21)
2074       .Case("v22", AArch64::Q22)
2075       .Case("v23", AArch64::Q23)
2076       .Case("v24", AArch64::Q24)
2077       .Case("v25", AArch64::Q25)
2078       .Case("v26", AArch64::Q26)
2079       .Case("v27", AArch64::Q27)
2080       .Case("v28", AArch64::Q28)
2081       .Case("v29", AArch64::Q29)
2082       .Case("v30", AArch64::Q30)
2083       .Case("v31", AArch64::Q31)
2084       .Default(0);
2085 }
2086 
2087 /// Returns an optional pair of (#elements, element-width) if Suffix
2088 /// is a valid vector kind. Where the number of elements in a vector
2089 /// or the vector width is implicit or explicitly unknown (but still a
2090 /// valid suffix kind), 0 is used.
parseVectorKind(StringRef Suffix,RegKind VectorKind)2091 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2092                                                      RegKind VectorKind) {
2093   std::pair<int, int> Res = {-1, -1};
2094 
2095   switch (VectorKind) {
2096   case RegKind::NeonVector:
2097     Res =
2098         StringSwitch<std::pair<int, int>>(Suffix.lower())
2099             .Case("", {0, 0})
2100             .Case(".1d", {1, 64})
2101             .Case(".1q", {1, 128})
2102             // '.2h' needed for fp16 scalar pairwise reductions
2103             .Case(".2h", {2, 16})
2104             .Case(".2s", {2, 32})
2105             .Case(".2d", {2, 64})
2106             // '.4b' is another special case for the ARMv8.2a dot product
2107             // operand
2108             .Case(".4b", {4, 8})
2109             .Case(".4h", {4, 16})
2110             .Case(".4s", {4, 32})
2111             .Case(".8b", {8, 8})
2112             .Case(".8h", {8, 16})
2113             .Case(".16b", {16, 8})
2114             // Accept the width neutral ones, too, for verbose syntax. If those
2115             // aren't used in the right places, the token operand won't match so
2116             // all will work out.
2117             .Case(".b", {0, 8})
2118             .Case(".h", {0, 16})
2119             .Case(".s", {0, 32})
2120             .Case(".d", {0, 64})
2121             .Default({-1, -1});
2122     break;
2123   case RegKind::SVEPredicateVector:
2124   case RegKind::SVEDataVector:
2125     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2126               .Case("", {0, 0})
2127               .Case(".b", {0, 8})
2128               .Case(".h", {0, 16})
2129               .Case(".s", {0, 32})
2130               .Case(".d", {0, 64})
2131               .Case(".q", {0, 128})
2132               .Default({-1, -1});
2133     break;
2134   default:
2135     llvm_unreachable("Unsupported RegKind");
2136   }
2137 
2138   if (Res == std::make_pair(-1, -1))
2139     return Optional<std::pair<int, int>>();
2140 
2141   return Optional<std::pair<int, int>>(Res);
2142 }
2143 
isValidVectorKind(StringRef Suffix,RegKind VectorKind)2144 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2145   return parseVectorKind(Suffix, VectorKind).hasValue();
2146 }
2147 
matchSVEDataVectorRegName(StringRef Name)2148 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2149   return StringSwitch<unsigned>(Name.lower())
2150       .Case("z0", AArch64::Z0)
2151       .Case("z1", AArch64::Z1)
2152       .Case("z2", AArch64::Z2)
2153       .Case("z3", AArch64::Z3)
2154       .Case("z4", AArch64::Z4)
2155       .Case("z5", AArch64::Z5)
2156       .Case("z6", AArch64::Z6)
2157       .Case("z7", AArch64::Z7)
2158       .Case("z8", AArch64::Z8)
2159       .Case("z9", AArch64::Z9)
2160       .Case("z10", AArch64::Z10)
2161       .Case("z11", AArch64::Z11)
2162       .Case("z12", AArch64::Z12)
2163       .Case("z13", AArch64::Z13)
2164       .Case("z14", AArch64::Z14)
2165       .Case("z15", AArch64::Z15)
2166       .Case("z16", AArch64::Z16)
2167       .Case("z17", AArch64::Z17)
2168       .Case("z18", AArch64::Z18)
2169       .Case("z19", AArch64::Z19)
2170       .Case("z20", AArch64::Z20)
2171       .Case("z21", AArch64::Z21)
2172       .Case("z22", AArch64::Z22)
2173       .Case("z23", AArch64::Z23)
2174       .Case("z24", AArch64::Z24)
2175       .Case("z25", AArch64::Z25)
2176       .Case("z26", AArch64::Z26)
2177       .Case("z27", AArch64::Z27)
2178       .Case("z28", AArch64::Z28)
2179       .Case("z29", AArch64::Z29)
2180       .Case("z30", AArch64::Z30)
2181       .Case("z31", AArch64::Z31)
2182       .Default(0);
2183 }
2184 
matchSVEPredicateVectorRegName(StringRef Name)2185 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2186   return StringSwitch<unsigned>(Name.lower())
2187       .Case("p0", AArch64::P0)
2188       .Case("p1", AArch64::P1)
2189       .Case("p2", AArch64::P2)
2190       .Case("p3", AArch64::P3)
2191       .Case("p4", AArch64::P4)
2192       .Case("p5", AArch64::P5)
2193       .Case("p6", AArch64::P6)
2194       .Case("p7", AArch64::P7)
2195       .Case("p8", AArch64::P8)
2196       .Case("p9", AArch64::P9)
2197       .Case("p10", AArch64::P10)
2198       .Case("p11", AArch64::P11)
2199       .Case("p12", AArch64::P12)
2200       .Case("p13", AArch64::P13)
2201       .Case("p14", AArch64::P14)
2202       .Case("p15", AArch64::P15)
2203       .Default(0);
2204 }
2205 
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)2206 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2207                                      SMLoc &EndLoc) {
2208   StartLoc = getLoc();
2209   auto Res = tryParseScalarRegister(RegNo);
2210   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2211   return Res != MatchOperand_Success;
2212 }
2213 
2214 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,RegKind Kind)2215 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2216                                                   RegKind Kind) {
2217   unsigned RegNum = 0;
2218   if ((RegNum = matchSVEDataVectorRegName(Name)))
2219     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2220 
2221   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2222     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2223 
2224   if ((RegNum = MatchNeonVectorRegName(Name)))
2225     return Kind == RegKind::NeonVector ? RegNum : 0;
2226 
2227   // The parsed register must be of RegKind Scalar
2228   if ((RegNum = MatchRegisterName(Name)))
2229     return Kind == RegKind::Scalar ? RegNum : 0;
2230 
2231   if (!RegNum) {
2232     // Handle a few common aliases of registers.
2233     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2234                     .Case("fp", AArch64::FP)
2235                     .Case("lr",  AArch64::LR)
2236                     .Case("x31", AArch64::XZR)
2237                     .Case("w31", AArch64::WZR)
2238                     .Default(0))
2239       return Kind == RegKind::Scalar ? RegNum : 0;
2240 
2241     // Check for aliases registered via .req. Canonicalize to lower case.
2242     // That's more consistent since register names are case insensitive, and
2243     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2244     auto Entry = RegisterReqs.find(Name.lower());
2245     if (Entry == RegisterReqs.end())
2246       return 0;
2247 
2248     // set RegNum if the match is the right kind of register
2249     if (Kind == Entry->getValue().first)
2250       RegNum = Entry->getValue().second;
2251   }
2252   return RegNum;
2253 }
2254 
2255 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2256 /// Identifier when called, and if it is a register name the token is eaten and
2257 /// the register is added to the operand list.
2258 OperandMatchResultTy
tryParseScalarRegister(unsigned & RegNum)2259 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2260   MCAsmParser &Parser = getParser();
2261   const AsmToken &Tok = Parser.getTok();
2262   if (Tok.isNot(AsmToken::Identifier))
2263     return MatchOperand_NoMatch;
2264 
2265   std::string lowerCase = Tok.getString().lower();
2266   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2267   if (Reg == 0)
2268     return MatchOperand_NoMatch;
2269 
2270   RegNum = Reg;
2271   Parser.Lex(); // Eat identifier token.
2272   return MatchOperand_Success;
2273 }
2274 
2275 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2276 OperandMatchResultTy
tryParseSysCROperand(OperandVector & Operands)2277 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2278   MCAsmParser &Parser = getParser();
2279   SMLoc S = getLoc();
2280 
2281   if (Parser.getTok().isNot(AsmToken::Identifier)) {
2282     Error(S, "Expected cN operand where 0 <= N <= 15");
2283     return MatchOperand_ParseFail;
2284   }
2285 
2286   StringRef Tok = Parser.getTok().getIdentifier();
2287   if (Tok[0] != 'c' && Tok[0] != 'C') {
2288     Error(S, "Expected cN operand where 0 <= N <= 15");
2289     return MatchOperand_ParseFail;
2290   }
2291 
2292   uint32_t CRNum;
2293   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2294   if (BadNum || CRNum > 15) {
2295     Error(S, "Expected cN operand where 0 <= N <= 15");
2296     return MatchOperand_ParseFail;
2297   }
2298 
2299   Parser.Lex(); // Eat identifier token.
2300   Operands.push_back(
2301       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2302   return MatchOperand_Success;
2303 }
2304 
2305 /// tryParsePrefetch - Try to parse a prefetch operand.
2306 template <bool IsSVEPrefetch>
2307 OperandMatchResultTy
tryParsePrefetch(OperandVector & Operands)2308 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2309   MCAsmParser &Parser = getParser();
2310   SMLoc S = getLoc();
2311   const AsmToken &Tok = Parser.getTok();
2312 
2313   auto LookupByName = [](StringRef N) {
2314     if (IsSVEPrefetch) {
2315       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2316         return Optional<unsigned>(Res->Encoding);
2317     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2318       return Optional<unsigned>(Res->Encoding);
2319     return Optional<unsigned>();
2320   };
2321 
2322   auto LookupByEncoding = [](unsigned E) {
2323     if (IsSVEPrefetch) {
2324       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2325         return Optional<StringRef>(Res->Name);
2326     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2327       return Optional<StringRef>(Res->Name);
2328     return Optional<StringRef>();
2329   };
2330   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2331 
2332   // Either an identifier for named values or a 5-bit immediate.
2333   // Eat optional hash.
2334   if (parseOptionalToken(AsmToken::Hash) ||
2335       Tok.is(AsmToken::Integer)) {
2336     const MCExpr *ImmVal;
2337     if (getParser().parseExpression(ImmVal))
2338       return MatchOperand_ParseFail;
2339 
2340     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2341     if (!MCE) {
2342       TokError("immediate value expected for prefetch operand");
2343       return MatchOperand_ParseFail;
2344     }
2345     unsigned prfop = MCE->getValue();
2346     if (prfop > MaxVal) {
2347       TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2348                "] expected");
2349       return MatchOperand_ParseFail;
2350     }
2351 
2352     auto PRFM = LookupByEncoding(MCE->getValue());
2353     Operands.push_back(AArch64Operand::CreatePrefetch(
2354         prfop, PRFM.getValueOr(""), S, getContext()));
2355     return MatchOperand_Success;
2356   }
2357 
2358   if (Tok.isNot(AsmToken::Identifier)) {
2359     TokError("prefetch hint expected");
2360     return MatchOperand_ParseFail;
2361   }
2362 
2363   auto PRFM = LookupByName(Tok.getString());
2364   if (!PRFM) {
2365     TokError("prefetch hint expected");
2366     return MatchOperand_ParseFail;
2367   }
2368 
2369   Parser.Lex(); // Eat identifier token.
2370   Operands.push_back(AArch64Operand::CreatePrefetch(
2371       *PRFM, Tok.getString(), S, getContext()));
2372   return MatchOperand_Success;
2373 }
2374 
2375 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2376 OperandMatchResultTy
tryParsePSBHint(OperandVector & Operands)2377 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2378   MCAsmParser &Parser = getParser();
2379   SMLoc S = getLoc();
2380   const AsmToken &Tok = Parser.getTok();
2381   if (Tok.isNot(AsmToken::Identifier)) {
2382     TokError("invalid operand for instruction");
2383     return MatchOperand_ParseFail;
2384   }
2385 
2386   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2387   if (!PSB) {
2388     TokError("invalid operand for instruction");
2389     return MatchOperand_ParseFail;
2390   }
2391 
2392   Parser.Lex(); // Eat identifier token.
2393   Operands.push_back(AArch64Operand::CreatePSBHint(
2394       PSB->Encoding, Tok.getString(), S, getContext()));
2395   return MatchOperand_Success;
2396 }
2397 
2398 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2399 /// instruction.
2400 OperandMatchResultTy
tryParseAdrpLabel(OperandVector & Operands)2401 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2402   MCAsmParser &Parser = getParser();
2403   SMLoc S = getLoc();
2404   const MCExpr *Expr;
2405 
2406   if (Parser.getTok().is(AsmToken::Hash)) {
2407     Parser.Lex(); // Eat hash token.
2408   }
2409 
2410   if (parseSymbolicImmVal(Expr))
2411     return MatchOperand_ParseFail;
2412 
2413   AArch64MCExpr::VariantKind ELFRefKind;
2414   MCSymbolRefExpr::VariantKind DarwinRefKind;
2415   int64_t Addend;
2416   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2417     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2418         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2419       // No modifier was specified at all; this is the syntax for an ELF basic
2420       // ADRP relocation (unfortunately).
2421       Expr =
2422           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2423     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2424                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2425                Addend != 0) {
2426       Error(S, "gotpage label reference not allowed an addend");
2427       return MatchOperand_ParseFail;
2428     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2429                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2430                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2431                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2432                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2433                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2434       // The operand must be an @page or @gotpage qualified symbolref.
2435       Error(S, "page or gotpage label reference expected");
2436       return MatchOperand_ParseFail;
2437     }
2438   }
2439 
2440   // We have either a label reference possibly with addend or an immediate. The
2441   // addend is a raw value here. The linker will adjust it to only reference the
2442   // page.
2443   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2444   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2445 
2446   return MatchOperand_Success;
2447 }
2448 
2449 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2450 /// instruction.
2451 OperandMatchResultTy
tryParseAdrLabel(OperandVector & Operands)2452 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2453   SMLoc S = getLoc();
2454   const MCExpr *Expr;
2455 
2456   const AsmToken &Tok = getParser().getTok();
2457   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2458     if (getParser().parseExpression(Expr))
2459       return MatchOperand_ParseFail;
2460 
2461     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2462     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2463 
2464     return MatchOperand_Success;
2465   }
2466   return MatchOperand_NoMatch;
2467 }
2468 
2469 /// tryParseFPImm - A floating point immediate expression operand.
2470 template<bool AddFPZeroAsLiteral>
2471 OperandMatchResultTy
tryParseFPImm(OperandVector & Operands)2472 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2473   MCAsmParser &Parser = getParser();
2474   SMLoc S = getLoc();
2475 
2476   bool Hash = parseOptionalToken(AsmToken::Hash);
2477 
2478   // Handle negation, as that still comes through as a separate token.
2479   bool isNegative = parseOptionalToken(AsmToken::Minus);
2480 
2481   const AsmToken &Tok = Parser.getTok();
2482   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2483     if (!Hash)
2484       return MatchOperand_NoMatch;
2485     TokError("invalid floating point immediate");
2486     return MatchOperand_ParseFail;
2487   }
2488 
2489   // Parse hexadecimal representation.
2490   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2491     if (Tok.getIntVal() > 255 || isNegative) {
2492       TokError("encoded floating point value out of range");
2493       return MatchOperand_ParseFail;
2494     }
2495 
2496     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2497     Operands.push_back(
2498         AArch64Operand::CreateFPImm(F, true, S, getContext()));
2499   } else {
2500     // Parse FP representation.
2501     APFloat RealVal(APFloat::IEEEdouble());
2502     auto Status =
2503         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2504     if (isNegative)
2505       RealVal.changeSign();
2506 
2507     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2508       Operands.push_back(
2509           AArch64Operand::CreateToken("#0", false, S, getContext()));
2510       Operands.push_back(
2511           AArch64Operand::CreateToken(".0", false, S, getContext()));
2512     } else
2513       Operands.push_back(AArch64Operand::CreateFPImm(
2514           RealVal, Status == APFloat::opOK, S, getContext()));
2515   }
2516 
2517   Parser.Lex(); // Eat the token.
2518 
2519   return MatchOperand_Success;
2520 }
2521 
2522 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2523 /// a shift suffix, for example '#1, lsl #12'.
2524 OperandMatchResultTy
tryParseImmWithOptionalShift(OperandVector & Operands)2525 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2526   MCAsmParser &Parser = getParser();
2527   SMLoc S = getLoc();
2528 
2529   if (Parser.getTok().is(AsmToken::Hash))
2530     Parser.Lex(); // Eat '#'
2531   else if (Parser.getTok().isNot(AsmToken::Integer))
2532     // Operand should start from # or should be integer, emit error otherwise.
2533     return MatchOperand_NoMatch;
2534 
2535   const MCExpr *Imm;
2536   if (parseSymbolicImmVal(Imm))
2537     return MatchOperand_ParseFail;
2538   else if (Parser.getTok().isNot(AsmToken::Comma)) {
2539     SMLoc E = Parser.getTok().getLoc();
2540     Operands.push_back(
2541         AArch64Operand::CreateImm(Imm, S, E, getContext()));
2542     return MatchOperand_Success;
2543   }
2544 
2545   // Eat ','
2546   Parser.Lex();
2547 
2548   // The optional operand must be "lsl #N" where N is non-negative.
2549   if (!Parser.getTok().is(AsmToken::Identifier) ||
2550       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2551     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2552     return MatchOperand_ParseFail;
2553   }
2554 
2555   // Eat 'lsl'
2556   Parser.Lex();
2557 
2558   parseOptionalToken(AsmToken::Hash);
2559 
2560   if (Parser.getTok().isNot(AsmToken::Integer)) {
2561     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2562     return MatchOperand_ParseFail;
2563   }
2564 
2565   int64_t ShiftAmount = Parser.getTok().getIntVal();
2566 
2567   if (ShiftAmount < 0) {
2568     Error(Parser.getTok().getLoc(), "positive shift amount required");
2569     return MatchOperand_ParseFail;
2570   }
2571   Parser.Lex(); // Eat the number
2572 
2573   // Just in case the optional lsl #0 is used for immediates other than zero.
2574   if (ShiftAmount == 0 && Imm != 0) {
2575     SMLoc E = Parser.getTok().getLoc();
2576     Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2577     return MatchOperand_Success;
2578   }
2579 
2580   SMLoc E = Parser.getTok().getLoc();
2581   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2582                                                       S, E, getContext()));
2583   return MatchOperand_Success;
2584 }
2585 
2586 /// parseCondCodeString - Parse a Condition Code string.
parseCondCodeString(StringRef Cond)2587 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2588   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2589                     .Case("eq", AArch64CC::EQ)
2590                     .Case("ne", AArch64CC::NE)
2591                     .Case("cs", AArch64CC::HS)
2592                     .Case("hs", AArch64CC::HS)
2593                     .Case("cc", AArch64CC::LO)
2594                     .Case("lo", AArch64CC::LO)
2595                     .Case("mi", AArch64CC::MI)
2596                     .Case("pl", AArch64CC::PL)
2597                     .Case("vs", AArch64CC::VS)
2598                     .Case("vc", AArch64CC::VC)
2599                     .Case("hi", AArch64CC::HI)
2600                     .Case("ls", AArch64CC::LS)
2601                     .Case("ge", AArch64CC::GE)
2602                     .Case("lt", AArch64CC::LT)
2603                     .Case("gt", AArch64CC::GT)
2604                     .Case("le", AArch64CC::LE)
2605                     .Case("al", AArch64CC::AL)
2606                     .Case("nv", AArch64CC::NV)
2607                     .Default(AArch64CC::Invalid);
2608 
2609   if (CC == AArch64CC::Invalid &&
2610       getSTI().getFeatureBits()[AArch64::FeatureSVE])
2611     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2612                     .Case("none",  AArch64CC::EQ)
2613                     .Case("any",   AArch64CC::NE)
2614                     .Case("nlast", AArch64CC::HS)
2615                     .Case("last",  AArch64CC::LO)
2616                     .Case("first", AArch64CC::MI)
2617                     .Case("nfrst", AArch64CC::PL)
2618                     .Case("pmore", AArch64CC::HI)
2619                     .Case("plast", AArch64CC::LS)
2620                     .Case("tcont", AArch64CC::GE)
2621                     .Case("tstop", AArch64CC::LT)
2622                     .Default(AArch64CC::Invalid);
2623 
2624   return CC;
2625 }
2626 
2627 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)2628 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2629                                      bool invertCondCode) {
2630   MCAsmParser &Parser = getParser();
2631   SMLoc S = getLoc();
2632   const AsmToken &Tok = Parser.getTok();
2633   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2634 
2635   StringRef Cond = Tok.getString();
2636   AArch64CC::CondCode CC = parseCondCodeString(Cond);
2637   if (CC == AArch64CC::Invalid)
2638     return TokError("invalid condition code");
2639   Parser.Lex(); // Eat identifier token.
2640 
2641   if (invertCondCode) {
2642     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2643       return TokError("condition codes AL and NV are invalid for this instruction");
2644     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2645   }
2646 
2647   Operands.push_back(
2648       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2649   return false;
2650 }
2651 
2652 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2653 /// them if present.
2654 OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector & Operands)2655 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2656   MCAsmParser &Parser = getParser();
2657   const AsmToken &Tok = Parser.getTok();
2658   std::string LowerID = Tok.getString().lower();
2659   AArch64_AM::ShiftExtendType ShOp =
2660       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2661           .Case("lsl", AArch64_AM::LSL)
2662           .Case("lsr", AArch64_AM::LSR)
2663           .Case("asr", AArch64_AM::ASR)
2664           .Case("ror", AArch64_AM::ROR)
2665           .Case("msl", AArch64_AM::MSL)
2666           .Case("uxtb", AArch64_AM::UXTB)
2667           .Case("uxth", AArch64_AM::UXTH)
2668           .Case("uxtw", AArch64_AM::UXTW)
2669           .Case("uxtx", AArch64_AM::UXTX)
2670           .Case("sxtb", AArch64_AM::SXTB)
2671           .Case("sxth", AArch64_AM::SXTH)
2672           .Case("sxtw", AArch64_AM::SXTW)
2673           .Case("sxtx", AArch64_AM::SXTX)
2674           .Default(AArch64_AM::InvalidShiftExtend);
2675 
2676   if (ShOp == AArch64_AM::InvalidShiftExtend)
2677     return MatchOperand_NoMatch;
2678 
2679   SMLoc S = Tok.getLoc();
2680   Parser.Lex();
2681 
2682   bool Hash = parseOptionalToken(AsmToken::Hash);
2683 
2684   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2685     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2686         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2687         ShOp == AArch64_AM::MSL) {
2688       // We expect a number here.
2689       TokError("expected #imm after shift specifier");
2690       return MatchOperand_ParseFail;
2691     }
2692 
2693     // "extend" type operations don't need an immediate, #0 is implicit.
2694     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2695     Operands.push_back(
2696         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2697     return MatchOperand_Success;
2698   }
2699 
2700   // Make sure we do actually have a number, identifier or a parenthesized
2701   // expression.
2702   SMLoc E = Parser.getTok().getLoc();
2703   if (!Parser.getTok().is(AsmToken::Integer) &&
2704       !Parser.getTok().is(AsmToken::LParen) &&
2705       !Parser.getTok().is(AsmToken::Identifier)) {
2706     Error(E, "expected integer shift amount");
2707     return MatchOperand_ParseFail;
2708   }
2709 
2710   const MCExpr *ImmVal;
2711   if (getParser().parseExpression(ImmVal))
2712     return MatchOperand_ParseFail;
2713 
2714   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2715   if (!MCE) {
2716     Error(E, "expected constant '#imm' after shift specifier");
2717     return MatchOperand_ParseFail;
2718   }
2719 
2720   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2721   Operands.push_back(AArch64Operand::CreateShiftExtend(
2722       ShOp, MCE->getValue(), true, S, E, getContext()));
2723   return MatchOperand_Success;
2724 }
2725 
setRequiredFeatureString(FeatureBitset FBS,std::string & Str)2726 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2727   if (FBS[AArch64::HasV8_1aOps])
2728     Str += "ARMv8.1a";
2729   else if (FBS[AArch64::HasV8_2aOps])
2730     Str += "ARMv8.2a";
2731   else if (FBS[AArch64::HasV8_3aOps])
2732     Str += "ARMv8.3a";
2733   else if (FBS[AArch64::HasV8_4aOps])
2734     Str += "ARMv8.4a";
2735   else
2736     Str += "(unknown)";
2737 }
2738 
createSysAlias(uint16_t Encoding,OperandVector & Operands,SMLoc S)2739 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2740                                       SMLoc S) {
2741   const uint16_t Op2 = Encoding & 7;
2742   const uint16_t Cm = (Encoding & 0x78) >> 3;
2743   const uint16_t Cn = (Encoding & 0x780) >> 7;
2744   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2745 
2746   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2747 
2748   Operands.push_back(
2749       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2750   Operands.push_back(
2751       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2752   Operands.push_back(
2753       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2754   Expr = MCConstantExpr::create(Op2, getContext());
2755   Operands.push_back(
2756       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2757 }
2758 
2759 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2760 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)2761 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2762                                    OperandVector &Operands) {
2763   if (Name.find('.') != StringRef::npos)
2764     return TokError("invalid operand");
2765 
2766   Mnemonic = Name;
2767   Operands.push_back(
2768       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2769 
2770   MCAsmParser &Parser = getParser();
2771   const AsmToken &Tok = Parser.getTok();
2772   StringRef Op = Tok.getString();
2773   SMLoc S = Tok.getLoc();
2774 
2775   if (Mnemonic == "ic") {
2776     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2777     if (!IC)
2778       return TokError("invalid operand for IC instruction");
2779     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2780       std::string Str("IC " + std::string(IC->Name) + " requires ");
2781       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2782       return TokError(Str.c_str());
2783     }
2784     createSysAlias(IC->Encoding, Operands, S);
2785   } else if (Mnemonic == "dc") {
2786     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2787     if (!DC)
2788       return TokError("invalid operand for DC instruction");
2789     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2790       std::string Str("DC " + std::string(DC->Name) + " requires ");
2791       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2792       return TokError(Str.c_str());
2793     }
2794     createSysAlias(DC->Encoding, Operands, S);
2795   } else if (Mnemonic == "at") {
2796     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2797     if (!AT)
2798       return TokError("invalid operand for AT instruction");
2799     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2800       std::string Str("AT " + std::string(AT->Name) + " requires ");
2801       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2802       return TokError(Str.c_str());
2803     }
2804     createSysAlias(AT->Encoding, Operands, S);
2805   } else if (Mnemonic == "tlbi") {
2806     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2807     if (!TLBI)
2808       return TokError("invalid operand for TLBI instruction");
2809     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2810       std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2811       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2812       return TokError(Str.c_str());
2813     }
2814     createSysAlias(TLBI->Encoding, Operands, S);
2815   }
2816 
2817   Parser.Lex(); // Eat operand.
2818 
2819   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2820   bool HasRegister = false;
2821 
2822   // Check for the optional register operand.
2823   if (parseOptionalToken(AsmToken::Comma)) {
2824     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2825       return TokError("expected register operand");
2826     HasRegister = true;
2827   }
2828 
2829   if (ExpectRegister && !HasRegister)
2830     return TokError("specified " + Mnemonic + " op requires a register");
2831   else if (!ExpectRegister && HasRegister)
2832     return TokError("specified " + Mnemonic + " op does not use a register");
2833 
2834   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2835     return true;
2836 
2837   return false;
2838 }
2839 
2840 OperandMatchResultTy
tryParseBarrierOperand(OperandVector & Operands)2841 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2842   MCAsmParser &Parser = getParser();
2843   const AsmToken &Tok = Parser.getTok();
2844 
2845   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2846     TokError("'csync' operand expected");
2847     return MatchOperand_ParseFail;
2848   // Can be either a #imm style literal or an option name
2849   } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2850     // Immediate operand.
2851     const MCExpr *ImmVal;
2852     SMLoc ExprLoc = getLoc();
2853     if (getParser().parseExpression(ImmVal))
2854       return MatchOperand_ParseFail;
2855     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2856     if (!MCE) {
2857       Error(ExprLoc, "immediate value expected for barrier operand");
2858       return MatchOperand_ParseFail;
2859     }
2860     if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2861       Error(ExprLoc, "barrier operand out of range");
2862       return MatchOperand_ParseFail;
2863     }
2864     auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2865     Operands.push_back(AArch64Operand::CreateBarrier(
2866         MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2867     return MatchOperand_Success;
2868   }
2869 
2870   if (Tok.isNot(AsmToken::Identifier)) {
2871     TokError("invalid operand for instruction");
2872     return MatchOperand_ParseFail;
2873   }
2874 
2875   auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
2876   // The only valid named option for ISB is 'sy'
2877   auto DB = AArch64DB::lookupDBByName(Tok.getString());
2878   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2879     TokError("'sy' or #imm operand expected");
2880     return MatchOperand_ParseFail;
2881   // The only valid named option for TSB is 'csync'
2882   } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
2883     TokError("'csync' operand expected");
2884     return MatchOperand_ParseFail;
2885   } else if (!DB && !TSB) {
2886     TokError("invalid barrier option name");
2887     return MatchOperand_ParseFail;
2888   }
2889 
2890   Operands.push_back(AArch64Operand::CreateBarrier(
2891       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
2892   Parser.Lex(); // Consume the option
2893 
2894   return MatchOperand_Success;
2895 }
2896 
2897 OperandMatchResultTy
tryParseSysReg(OperandVector & Operands)2898 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2899   MCAsmParser &Parser = getParser();
2900   const AsmToken &Tok = Parser.getTok();
2901 
2902   if (Tok.isNot(AsmToken::Identifier))
2903     return MatchOperand_NoMatch;
2904 
2905   int MRSReg, MSRReg;
2906   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2907   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2908     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2909     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2910   } else
2911     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2912 
2913   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2914   unsigned PStateImm = -1;
2915   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2916     PStateImm = PState->Encoding;
2917 
2918   Operands.push_back(
2919       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2920                                    PStateImm, getContext()));
2921   Parser.Lex(); // Eat identifier
2922 
2923   return MatchOperand_Success;
2924 }
2925 
2926 /// tryParseNeonVectorRegister - Parse a vector register operand.
tryParseNeonVectorRegister(OperandVector & Operands)2927 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2928   MCAsmParser &Parser = getParser();
2929   if (Parser.getTok().isNot(AsmToken::Identifier))
2930     return true;
2931 
2932   SMLoc S = getLoc();
2933   // Check for a vector register specifier first.
2934   StringRef Kind;
2935   unsigned Reg;
2936   OperandMatchResultTy Res =
2937       tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
2938   if (Res != MatchOperand_Success)
2939     return true;
2940 
2941   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
2942   if (!KindRes)
2943     return true;
2944 
2945   unsigned ElementWidth = KindRes->second;
2946   Operands.push_back(
2947       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
2948                                       S, getLoc(), getContext()));
2949 
2950   // If there was an explicit qualifier, that goes on as a literal text
2951   // operand.
2952   if (!Kind.empty())
2953     Operands.push_back(
2954         AArch64Operand::CreateToken(Kind, false, S, getContext()));
2955 
2956   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
2957 }
2958 
2959 OperandMatchResultTy
tryParseVectorIndex(OperandVector & Operands)2960 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
2961   SMLoc SIdx = getLoc();
2962   if (parseOptionalToken(AsmToken::LBrac)) {
2963     const MCExpr *ImmVal;
2964     if (getParser().parseExpression(ImmVal))
2965       return MatchOperand_NoMatch;
2966     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2967     if (!MCE) {
2968       TokError("immediate value expected for vector index");
2969       return MatchOperand_ParseFail;;
2970     }
2971 
2972     SMLoc E = getLoc();
2973 
2974     if (parseToken(AsmToken::RBrac, "']' expected"))
2975       return MatchOperand_ParseFail;;
2976 
2977     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2978                                                          E, getContext()));
2979     return MatchOperand_Success;
2980   }
2981 
2982   return MatchOperand_NoMatch;
2983 }
2984 
2985 // tryParseVectorRegister - Try to parse a vector register name with
2986 // optional kind specifier. If it is a register specifier, eat the token
2987 // and return it.
2988 OperandMatchResultTy
tryParseVectorRegister(unsigned & Reg,StringRef & Kind,RegKind MatchKind)2989 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
2990                                          RegKind MatchKind) {
2991   MCAsmParser &Parser = getParser();
2992   const AsmToken &Tok = Parser.getTok();
2993 
2994   if (Tok.isNot(AsmToken::Identifier))
2995     return MatchOperand_NoMatch;
2996 
2997   StringRef Name = Tok.getString();
2998   // If there is a kind specifier, it's separated from the register name by
2999   // a '.'.
3000   size_t Start = 0, Next = Name.find('.');
3001   StringRef Head = Name.slice(Start, Next);
3002   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3003 
3004   if (RegNum) {
3005     if (Next != StringRef::npos) {
3006       Kind = Name.slice(Next, StringRef::npos);
3007       if (!isValidVectorKind(Kind, MatchKind)) {
3008         TokError("invalid vector kind qualifier");
3009         return MatchOperand_ParseFail;
3010       }
3011     }
3012     Parser.Lex(); // Eat the register token.
3013 
3014     Reg = RegNum;
3015     return MatchOperand_Success;
3016   }
3017 
3018   return MatchOperand_NoMatch;
3019 }
3020 
3021 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3022 OperandMatchResultTy
tryParseSVEPredicateVector(OperandVector & Operands)3023 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3024   // Check for a SVE predicate register specifier first.
3025   const SMLoc S = getLoc();
3026   StringRef Kind;
3027   unsigned RegNum;
3028   auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3029   if (Res != MatchOperand_Success)
3030     return Res;
3031 
3032   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3033   if (!KindRes)
3034     return MatchOperand_NoMatch;
3035 
3036   unsigned ElementWidth = KindRes->second;
3037   Operands.push_back(AArch64Operand::CreateVectorReg(
3038       RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3039       getLoc(), getContext()));
3040 
3041   // Not all predicates are followed by a '/m' or '/z'.
3042   MCAsmParser &Parser = getParser();
3043   if (Parser.getTok().isNot(AsmToken::Slash))
3044     return MatchOperand_Success;
3045 
3046   // But when they do they shouldn't have an element type suffix.
3047   if (!Kind.empty()) {
3048     Error(S, "not expecting size suffix");
3049     return MatchOperand_ParseFail;
3050   }
3051 
3052   // Add a literal slash as operand
3053   Operands.push_back(
3054       AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3055 
3056   Parser.Lex(); // Eat the slash.
3057 
3058   // Zeroing or merging?
3059   auto Pred = Parser.getTok().getString().lower();
3060   if (Pred != "z" && Pred != "m") {
3061     Error(getLoc(), "expecting 'm' or 'z' predication");
3062     return MatchOperand_ParseFail;
3063   }
3064 
3065   // Add zero/merge token.
3066   const char *ZM = Pred == "z" ? "z" : "m";
3067   Operands.push_back(
3068     AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3069 
3070   Parser.Lex(); // Eat zero/merge token.
3071   return MatchOperand_Success;
3072 }
3073 
3074 /// parseRegister - Parse a register operand.
parseRegister(OperandVector & Operands)3075 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3076   // Try for a Neon vector register.
3077   if (!tryParseNeonVectorRegister(Operands))
3078     return false;
3079 
3080   // Otherwise try for a scalar register.
3081   if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3082     return false;
3083 
3084   return true;
3085 }
3086 
parseSymbolicImmVal(const MCExpr * & ImmVal)3087 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3088   MCAsmParser &Parser = getParser();
3089   bool HasELFModifier = false;
3090   AArch64MCExpr::VariantKind RefKind;
3091 
3092   if (parseOptionalToken(AsmToken::Colon)) {
3093     HasELFModifier = true;
3094 
3095     if (Parser.getTok().isNot(AsmToken::Identifier))
3096       return TokError("expect relocation specifier in operand after ':'");
3097 
3098     std::string LowerCase = Parser.getTok().getIdentifier().lower();
3099     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3100                   .Case("lo12", AArch64MCExpr::VK_LO12)
3101                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3102                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3103                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3104                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3105                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3106                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3107                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3108                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3109                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3110                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3111                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3112                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3113                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3114                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3115                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3116                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3117                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3118                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3119                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3120                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3121                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3122                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3123                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3124                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3125                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3126                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3127                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3128                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3129                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3130                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3131                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3132                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3133                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3134                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3135                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3136                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3137                   .Default(AArch64MCExpr::VK_INVALID);
3138 
3139     if (RefKind == AArch64MCExpr::VK_INVALID)
3140       return TokError("expect relocation specifier in operand after ':'");
3141 
3142     Parser.Lex(); // Eat identifier
3143 
3144     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3145       return true;
3146   }
3147 
3148   if (getParser().parseExpression(ImmVal))
3149     return true;
3150 
3151   if (HasELFModifier)
3152     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3153 
3154   return false;
3155 }
3156 
3157 template <RegKind VectorKind>
3158 OperandMatchResultTy
tryParseVectorList(OperandVector & Operands,bool ExpectMatch)3159 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3160                                      bool ExpectMatch) {
3161   MCAsmParser &Parser = getParser();
3162   if (!Parser.getTok().is(AsmToken::LCurly))
3163     return MatchOperand_NoMatch;
3164 
3165   // Wrapper around parse function
3166   auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3167                                      bool NoMatchIsError) {
3168     auto RegTok = Parser.getTok();
3169     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3170     if (ParseRes == MatchOperand_Success) {
3171       if (parseVectorKind(Kind, VectorKind))
3172         return ParseRes;
3173       llvm_unreachable("Expected a valid vector kind");
3174     }
3175 
3176     if (RegTok.isNot(AsmToken::Identifier) ||
3177         ParseRes == MatchOperand_ParseFail ||
3178         (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3179       Error(Loc, "vector register expected");
3180       return MatchOperand_ParseFail;
3181     }
3182 
3183     return MatchOperand_NoMatch;
3184   };
3185 
3186   SMLoc S = getLoc();
3187   auto LCurly = Parser.getTok();
3188   Parser.Lex(); // Eat left bracket token.
3189 
3190   StringRef Kind;
3191   unsigned FirstReg;
3192   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3193 
3194   // Put back the original left bracket if there was no match, so that
3195   // different types of list-operands can be matched (e.g. SVE, Neon).
3196   if (ParseRes == MatchOperand_NoMatch)
3197     Parser.getLexer().UnLex(LCurly);
3198 
3199   if (ParseRes != MatchOperand_Success)
3200     return ParseRes;
3201 
3202   int64_t PrevReg = FirstReg;
3203   unsigned Count = 1;
3204 
3205   if (parseOptionalToken(AsmToken::Minus)) {
3206     SMLoc Loc = getLoc();
3207     StringRef NextKind;
3208 
3209     unsigned Reg;
3210     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3211     if (ParseRes != MatchOperand_Success)
3212       return ParseRes;
3213 
3214     // Any Kind suffices must match on all regs in the list.
3215     if (Kind != NextKind) {
3216       Error(Loc, "mismatched register size suffix");
3217       return MatchOperand_ParseFail;
3218     }
3219 
3220     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3221 
3222     if (Space == 0 || Space > 3) {
3223       Error(Loc, "invalid number of vectors");
3224       return MatchOperand_ParseFail;
3225     }
3226 
3227     Count += Space;
3228   }
3229   else {
3230     while (parseOptionalToken(AsmToken::Comma)) {
3231       SMLoc Loc = getLoc();
3232       StringRef NextKind;
3233       unsigned Reg;
3234       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3235       if (ParseRes != MatchOperand_Success)
3236         return ParseRes;
3237 
3238       // Any Kind suffices must match on all regs in the list.
3239       if (Kind != NextKind) {
3240         Error(Loc, "mismatched register size suffix");
3241         return MatchOperand_ParseFail;
3242       }
3243 
3244       // Registers must be incremental (with wraparound at 31)
3245       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3246           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3247         Error(Loc, "registers must be sequential");
3248         return MatchOperand_ParseFail;
3249       }
3250 
3251       PrevReg = Reg;
3252       ++Count;
3253     }
3254   }
3255 
3256   if (parseToken(AsmToken::RCurly, "'}' expected"))
3257     return MatchOperand_ParseFail;
3258 
3259   if (Count > 4) {
3260     Error(S, "invalid number of vectors");
3261     return MatchOperand_ParseFail;
3262   }
3263 
3264   unsigned NumElements = 0;
3265   unsigned ElementWidth = 0;
3266   if (!Kind.empty()) {
3267     if (const auto &VK = parseVectorKind(Kind, VectorKind))
3268       std::tie(NumElements, ElementWidth) = *VK;
3269   }
3270 
3271   Operands.push_back(AArch64Operand::CreateVectorList(
3272       FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3273       getContext()));
3274 
3275   return MatchOperand_Success;
3276 }
3277 
3278 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
parseNeonVectorList(OperandVector & Operands)3279 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3280   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3281   if (ParseRes != MatchOperand_Success)
3282     return true;
3283 
3284   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3285 }
3286 
3287 OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector & Operands)3288 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3289   SMLoc StartLoc = getLoc();
3290 
3291   unsigned RegNum;
3292   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3293   if (Res != MatchOperand_Success)
3294     return Res;
3295 
3296   if (!parseOptionalToken(AsmToken::Comma)) {
3297     Operands.push_back(AArch64Operand::CreateReg(
3298         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3299     return MatchOperand_Success;
3300   }
3301 
3302   parseOptionalToken(AsmToken::Hash);
3303 
3304   if (getParser().getTok().isNot(AsmToken::Integer)) {
3305     Error(getLoc(), "index must be absent or #0");
3306     return MatchOperand_ParseFail;
3307   }
3308 
3309   const MCExpr *ImmVal;
3310   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3311       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3312     Error(getLoc(), "index must be absent or #0");
3313     return MatchOperand_ParseFail;
3314   }
3315 
3316   Operands.push_back(AArch64Operand::CreateReg(
3317       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3318   return MatchOperand_Success;
3319 }
3320 
3321 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3322 OperandMatchResultTy
tryParseGPROperand(OperandVector & Operands)3323 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3324   SMLoc StartLoc = getLoc();
3325 
3326   unsigned RegNum;
3327   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3328   if (Res != MatchOperand_Success)
3329     return Res;
3330 
3331   // No shift/extend is the default.
3332   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3333     Operands.push_back(AArch64Operand::CreateReg(
3334         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3335     return MatchOperand_Success;
3336   }
3337 
3338   // Eat the comma
3339   getParser().Lex();
3340 
3341   // Match the shift
3342   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3343   Res = tryParseOptionalShiftExtend(ExtOpnd);
3344   if (Res != MatchOperand_Success)
3345     return Res;
3346 
3347   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3348   Operands.push_back(AArch64Operand::CreateReg(
3349       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3350       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3351       Ext->hasShiftExtendAmount()));
3352 
3353   return MatchOperand_Success;
3354 }
3355 
parseOptionalMulOperand(OperandVector & Operands)3356 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3357   MCAsmParser &Parser = getParser();
3358 
3359   // Some SVE instructions have a decoration after the immediate, i.e.
3360   // "mul vl". We parse them here and add tokens, which must be present in the
3361   // asm string in the tablegen instruction.
3362   bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3363   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3364   if (!Parser.getTok().getString().equals_lower("mul") ||
3365       !(NextIsVL || NextIsHash))
3366     return true;
3367 
3368   Operands.push_back(
3369     AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3370   Parser.Lex(); // Eat the "mul"
3371 
3372   if (NextIsVL) {
3373     Operands.push_back(
3374         AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3375     Parser.Lex(); // Eat the "vl"
3376     return false;
3377   }
3378 
3379   if (NextIsHash) {
3380     Parser.Lex(); // Eat the #
3381     SMLoc S = getLoc();
3382 
3383     // Parse immediate operand.
3384     const MCExpr *ImmVal;
3385     if (!Parser.parseExpression(ImmVal))
3386       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3387         Operands.push_back(AArch64Operand::CreateImm(
3388             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3389             getContext()));
3390         return MatchOperand_Success;
3391       }
3392   }
3393 
3394   return Error(getLoc(), "expected 'vl' or '#<imm>'");
3395 }
3396 
3397 /// parseOperand - Parse a arm instruction operand.  For now this parses the
3398 /// operand regardless of the mnemonic.
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)3399 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3400                                   bool invertCondCode) {
3401   MCAsmParser &Parser = getParser();
3402 
3403   OperandMatchResultTy ResTy =
3404       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3405 
3406   // Check if the current operand has a custom associated parser, if so, try to
3407   // custom parse the operand, or fallback to the general approach.
3408   if (ResTy == MatchOperand_Success)
3409     return false;
3410   // If there wasn't a custom match, try the generic matcher below. Otherwise,
3411   // there was a match, but an error occurred, in which case, just return that
3412   // the operand parsing failed.
3413   if (ResTy == MatchOperand_ParseFail)
3414     return true;
3415 
3416   // Nothing custom, so do general case parsing.
3417   SMLoc S, E;
3418   switch (getLexer().getKind()) {
3419   default: {
3420     SMLoc S = getLoc();
3421     const MCExpr *Expr;
3422     if (parseSymbolicImmVal(Expr))
3423       return Error(S, "invalid operand");
3424 
3425     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3426     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3427     return false;
3428   }
3429   case AsmToken::LBrac: {
3430     SMLoc Loc = Parser.getTok().getLoc();
3431     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3432                                                    getContext()));
3433     Parser.Lex(); // Eat '['
3434 
3435     // There's no comma after a '[', so we can parse the next operand
3436     // immediately.
3437     return parseOperand(Operands, false, false);
3438   }
3439   case AsmToken::LCurly:
3440     return parseNeonVectorList(Operands);
3441   case AsmToken::Identifier: {
3442     // If we're expecting a Condition Code operand, then just parse that.
3443     if (isCondCode)
3444       return parseCondCode(Operands, invertCondCode);
3445 
3446     // If it's a register name, parse it.
3447     if (!parseRegister(Operands))
3448       return false;
3449 
3450     // See if this is a "mul vl" decoration or "mul #<int>" operand used
3451     // by SVE instructions.
3452     if (!parseOptionalMulOperand(Operands))
3453       return false;
3454 
3455     // This could be an optional "shift" or "extend" operand.
3456     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3457     // We can only continue if no tokens were eaten.
3458     if (GotShift != MatchOperand_NoMatch)
3459       return GotShift;
3460 
3461     // This was not a register so parse other operands that start with an
3462     // identifier (like labels) as expressions and create them as immediates.
3463     const MCExpr *IdVal;
3464     S = getLoc();
3465     if (getParser().parseExpression(IdVal))
3466       return true;
3467     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3468     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3469     return false;
3470   }
3471   case AsmToken::Integer:
3472   case AsmToken::Real:
3473   case AsmToken::Hash: {
3474     // #42 -> immediate.
3475     S = getLoc();
3476 
3477     parseOptionalToken(AsmToken::Hash);
3478 
3479     // Parse a negative sign
3480     bool isNegative = false;
3481     if (Parser.getTok().is(AsmToken::Minus)) {
3482       isNegative = true;
3483       // We need to consume this token only when we have a Real, otherwise
3484       // we let parseSymbolicImmVal take care of it
3485       if (Parser.getLexer().peekTok().is(AsmToken::Real))
3486         Parser.Lex();
3487     }
3488 
3489     // The only Real that should come through here is a literal #0.0 for
3490     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3491     // so convert the value.
3492     const AsmToken &Tok = Parser.getTok();
3493     if (Tok.is(AsmToken::Real)) {
3494       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3495       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3496       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3497           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3498           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3499         return TokError("unexpected floating point literal");
3500       else if (IntVal != 0 || isNegative)
3501         return TokError("expected floating-point constant #0.0");
3502       Parser.Lex(); // Eat the token.
3503 
3504       Operands.push_back(
3505           AArch64Operand::CreateToken("#0", false, S, getContext()));
3506       Operands.push_back(
3507           AArch64Operand::CreateToken(".0", false, S, getContext()));
3508       return false;
3509     }
3510 
3511     const MCExpr *ImmVal;
3512     if (parseSymbolicImmVal(ImmVal))
3513       return true;
3514 
3515     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3516     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3517     return false;
3518   }
3519   case AsmToken::Equal: {
3520     SMLoc Loc = getLoc();
3521     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3522       return TokError("unexpected token in operand");
3523     Parser.Lex(); // Eat '='
3524     const MCExpr *SubExprVal;
3525     if (getParser().parseExpression(SubExprVal))
3526       return true;
3527 
3528     if (Operands.size() < 2 ||
3529         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3530       return Error(Loc, "Only valid when first operand is register");
3531 
3532     bool IsXReg =
3533         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3534             Operands[1]->getReg());
3535 
3536     MCContext& Ctx = getContext();
3537     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3538     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3539     if (isa<MCConstantExpr>(SubExprVal)) {
3540       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3541       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3542       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3543         ShiftAmt += 16;
3544         Imm >>= 16;
3545       }
3546       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3547           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3548           Operands.push_back(AArch64Operand::CreateImm(
3549                      MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3550         if (ShiftAmt)
3551           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3552                      ShiftAmt, true, S, E, Ctx));
3553         return false;
3554       }
3555       APInt Simm = APInt(64, Imm << ShiftAmt);
3556       // check if the immediate is an unsigned or signed 32-bit int for W regs
3557       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3558         return Error(Loc, "Immediate too large for register");
3559     }
3560     // If it is a label or an imm that cannot fit in a movz, put it into CP.
3561     const MCExpr *CPLoc =
3562         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3563     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3564     return false;
3565   }
3566   }
3567 }
3568 
regsEqual(const MCParsedAsmOperand & Op1,const MCParsedAsmOperand & Op2) const3569 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3570                                  const MCParsedAsmOperand &Op2) const {
3571   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3572   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3573   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3574       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3575     return MCTargetAsmParser::regsEqual(Op1, Op2);
3576 
3577   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3578          "Testing equality of non-scalar registers not supported");
3579 
3580   // Check if a registers match their sub/super register classes.
3581   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3582     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3583   if (AOp1.getRegEqualityTy() == EqualsSubReg)
3584     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3585   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3586     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3587   if (AOp2.getRegEqualityTy() == EqualsSubReg)
3588     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3589 
3590   return false;
3591 }
3592 
3593 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3594 /// operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)3595 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3596                                         StringRef Name, SMLoc NameLoc,
3597                                         OperandVector &Operands) {
3598   MCAsmParser &Parser = getParser();
3599   Name = StringSwitch<StringRef>(Name.lower())
3600              .Case("beq", "b.eq")
3601              .Case("bne", "b.ne")
3602              .Case("bhs", "b.hs")
3603              .Case("bcs", "b.cs")
3604              .Case("blo", "b.lo")
3605              .Case("bcc", "b.cc")
3606              .Case("bmi", "b.mi")
3607              .Case("bpl", "b.pl")
3608              .Case("bvs", "b.vs")
3609              .Case("bvc", "b.vc")
3610              .Case("bhi", "b.hi")
3611              .Case("bls", "b.ls")
3612              .Case("bge", "b.ge")
3613              .Case("blt", "b.lt")
3614              .Case("bgt", "b.gt")
3615              .Case("ble", "b.le")
3616              .Case("bal", "b.al")
3617              .Case("bnv", "b.nv")
3618              .Default(Name);
3619 
3620   // First check for the AArch64-specific .req directive.
3621   if (Parser.getTok().is(AsmToken::Identifier) &&
3622       Parser.getTok().getIdentifier() == ".req") {
3623     parseDirectiveReq(Name, NameLoc);
3624     // We always return 'error' for this, as we're done with this
3625     // statement and don't need to match the 'instruction."
3626     return true;
3627   }
3628 
3629   // Create the leading tokens for the mnemonic, split by '.' characters.
3630   size_t Start = 0, Next = Name.find('.');
3631   StringRef Head = Name.slice(Start, Next);
3632 
3633   // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3634   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3635     return parseSysAlias(Head, NameLoc, Operands);
3636 
3637   Operands.push_back(
3638       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3639   Mnemonic = Head;
3640 
3641   // Handle condition codes for a branch mnemonic
3642   if (Head == "b" && Next != StringRef::npos) {
3643     Start = Next;
3644     Next = Name.find('.', Start + 1);
3645     Head = Name.slice(Start + 1, Next);
3646 
3647     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3648                                             (Head.data() - Name.data()));
3649     AArch64CC::CondCode CC = parseCondCodeString(Head);
3650     if (CC == AArch64CC::Invalid)
3651       return Error(SuffixLoc, "invalid condition code");
3652     Operands.push_back(
3653         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3654     Operands.push_back(
3655         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3656   }
3657 
3658   // Add the remaining tokens in the mnemonic.
3659   while (Next != StringRef::npos) {
3660     Start = Next;
3661     Next = Name.find('.', Start + 1);
3662     Head = Name.slice(Start, Next);
3663     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3664                                             (Head.data() - Name.data()) + 1);
3665     Operands.push_back(
3666         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3667   }
3668 
3669   // Conditional compare instructions have a Condition Code operand, which needs
3670   // to be parsed and an immediate operand created.
3671   bool condCodeFourthOperand =
3672       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3673        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3674        Head == "csinc" || Head == "csinv" || Head == "csneg");
3675 
3676   // These instructions are aliases to some of the conditional select
3677   // instructions. However, the condition code is inverted in the aliased
3678   // instruction.
3679   //
3680   // FIXME: Is this the correct way to handle these? Or should the parser
3681   //        generate the aliased instructions directly?
3682   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3683   bool condCodeThirdOperand =
3684       (Head == "cinc" || Head == "cinv" || Head == "cneg");
3685 
3686   // Read the remaining operands.
3687   if (getLexer().isNot(AsmToken::EndOfStatement)) {
3688     // Read the first operand.
3689     if (parseOperand(Operands, false, false)) {
3690       return true;
3691     }
3692 
3693     unsigned N = 2;
3694     while (parseOptionalToken(AsmToken::Comma)) {
3695       // Parse and remember the operand.
3696       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3697                                      (N == 3 && condCodeThirdOperand) ||
3698                                      (N == 2 && condCodeSecondOperand),
3699                        condCodeSecondOperand || condCodeThirdOperand)) {
3700         return true;
3701       }
3702 
3703       // After successfully parsing some operands there are two special cases to
3704       // consider (i.e. notional operands not separated by commas). Both are due
3705       // to memory specifiers:
3706       //  + An RBrac will end an address for load/store/prefetch
3707       //  + An '!' will indicate a pre-indexed operation.
3708       //
3709       // It's someone else's responsibility to make sure these tokens are sane
3710       // in the given context!
3711 
3712       SMLoc RLoc = Parser.getTok().getLoc();
3713       if (parseOptionalToken(AsmToken::RBrac))
3714         Operands.push_back(
3715             AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3716       SMLoc ELoc = Parser.getTok().getLoc();
3717       if (parseOptionalToken(AsmToken::Exclaim))
3718         Operands.push_back(
3719             AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3720 
3721       ++N;
3722     }
3723   }
3724 
3725   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3726     return true;
3727 
3728   return false;
3729 }
3730 
isMatchingOrAlias(unsigned ZReg,unsigned Reg)3731 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3732   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3733   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3734          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3735          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3736          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3737          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3738          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3739 }
3740 
3741 // FIXME: This entire function is a giant hack to provide us with decent
3742 // operand range validation/diagnostics until TableGen/MC can be extended
3743 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SMLoc & IDLoc,SmallVectorImpl<SMLoc> & Loc)3744 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3745                                            SmallVectorImpl<SMLoc> &Loc) {
3746   const MCRegisterInfo *RI = getContext().getRegisterInfo();
3747   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3748 
3749   // A prefix only applies to the instruction following it.  Here we extract
3750   // prefix information for the next instruction before validating the current
3751   // one so that in the case of failure we don't erronously continue using the
3752   // current prefix.
3753   PrefixInfo Prefix = NextPrefix;
3754   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3755 
3756   // Before validating the instruction in isolation we run through the rules
3757   // applicable when it follows a prefix instruction.
3758   // NOTE: brk & hlt can be prefixed but require no additional validation.
3759   if (Prefix.isActive() &&
3760       (Inst.getOpcode() != AArch64::BRK) &&
3761       (Inst.getOpcode() != AArch64::HLT)) {
3762 
3763     // Prefixed intructions must have a destructive operand.
3764     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3765         AArch64::NotDestructive)
3766       return Error(IDLoc, "instruction is unpredictable when following a"
3767                    " movprfx, suggest replacing movprfx with mov");
3768 
3769     // Destination operands must match.
3770     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3771       return Error(Loc[0], "instruction is unpredictable when following a"
3772                    " movprfx writing to a different destination");
3773 
3774     // Destination operand must not be used in any other location.
3775     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3776       if (Inst.getOperand(i).isReg() &&
3777           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3778           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3779         return Error(Loc[0], "instruction is unpredictable when following a"
3780                      " movprfx and destination also used as non-destructive"
3781                      " source");
3782     }
3783 
3784     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3785     if (Prefix.isPredicated()) {
3786       int PgIdx = -1;
3787 
3788       // Find the instructions general predicate.
3789       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3790         if (Inst.getOperand(i).isReg() &&
3791             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3792           PgIdx = i;
3793           break;
3794         }
3795 
3796       // Instruction must be predicated if the movprfx is predicated.
3797       if (PgIdx == -1 ||
3798           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3799         return Error(IDLoc, "instruction is unpredictable when following a"
3800                      " predicated movprfx, suggest using unpredicated movprfx");
3801 
3802       // Instruction must use same general predicate as the movprfx.
3803       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3804         return Error(IDLoc, "instruction is unpredictable when following a"
3805                      " predicated movprfx using a different general predicate");
3806 
3807       // Instruction element type must match the movprfx.
3808       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3809         return Error(IDLoc, "instruction is unpredictable when following a"
3810                      " predicated movprfx with a different element size");
3811     }
3812   }
3813 
3814   // Check for indexed addressing modes w/ the base register being the
3815   // same as a destination/source register or pair load where
3816   // the Rt == Rt2. All of those are undefined behaviour.
3817   switch (Inst.getOpcode()) {
3818   case AArch64::LDPSWpre:
3819   case AArch64::LDPWpost:
3820   case AArch64::LDPWpre:
3821   case AArch64::LDPXpost:
3822   case AArch64::LDPXpre: {
3823     unsigned Rt = Inst.getOperand(1).getReg();
3824     unsigned Rt2 = Inst.getOperand(2).getReg();
3825     unsigned Rn = Inst.getOperand(3).getReg();
3826     if (RI->isSubRegisterEq(Rn, Rt))
3827       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3828                            "is also a destination");
3829     if (RI->isSubRegisterEq(Rn, Rt2))
3830       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3831                            "is also a destination");
3832     LLVM_FALLTHROUGH;
3833   }
3834   case AArch64::LDPDi:
3835   case AArch64::LDPQi:
3836   case AArch64::LDPSi:
3837   case AArch64::LDPSWi:
3838   case AArch64::LDPWi:
3839   case AArch64::LDPXi: {
3840     unsigned Rt = Inst.getOperand(0).getReg();
3841     unsigned Rt2 = Inst.getOperand(1).getReg();
3842     if (Rt == Rt2)
3843       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3844     break;
3845   }
3846   case AArch64::LDPDpost:
3847   case AArch64::LDPDpre:
3848   case AArch64::LDPQpost:
3849   case AArch64::LDPQpre:
3850   case AArch64::LDPSpost:
3851   case AArch64::LDPSpre:
3852   case AArch64::LDPSWpost: {
3853     unsigned Rt = Inst.getOperand(1).getReg();
3854     unsigned Rt2 = Inst.getOperand(2).getReg();
3855     if (Rt == Rt2)
3856       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3857     break;
3858   }
3859   case AArch64::STPDpost:
3860   case AArch64::STPDpre:
3861   case AArch64::STPQpost:
3862   case AArch64::STPQpre:
3863   case AArch64::STPSpost:
3864   case AArch64::STPSpre:
3865   case AArch64::STPWpost:
3866   case AArch64::STPWpre:
3867   case AArch64::STPXpost:
3868   case AArch64::STPXpre: {
3869     unsigned Rt = Inst.getOperand(1).getReg();
3870     unsigned Rt2 = Inst.getOperand(2).getReg();
3871     unsigned Rn = Inst.getOperand(3).getReg();
3872     if (RI->isSubRegisterEq(Rn, Rt))
3873       return Error(Loc[0], "unpredictable STP instruction, writeback base "
3874                            "is also a source");
3875     if (RI->isSubRegisterEq(Rn, Rt2))
3876       return Error(Loc[1], "unpredictable STP instruction, writeback base "
3877                            "is also a source");
3878     break;
3879   }
3880   case AArch64::LDRBBpre:
3881   case AArch64::LDRBpre:
3882   case AArch64::LDRHHpre:
3883   case AArch64::LDRHpre:
3884   case AArch64::LDRSBWpre:
3885   case AArch64::LDRSBXpre:
3886   case AArch64::LDRSHWpre:
3887   case AArch64::LDRSHXpre:
3888   case AArch64::LDRSWpre:
3889   case AArch64::LDRWpre:
3890   case AArch64::LDRXpre:
3891   case AArch64::LDRBBpost:
3892   case AArch64::LDRBpost:
3893   case AArch64::LDRHHpost:
3894   case AArch64::LDRHpost:
3895   case AArch64::LDRSBWpost:
3896   case AArch64::LDRSBXpost:
3897   case AArch64::LDRSHWpost:
3898   case AArch64::LDRSHXpost:
3899   case AArch64::LDRSWpost:
3900   case AArch64::LDRWpost:
3901   case AArch64::LDRXpost: {
3902     unsigned Rt = Inst.getOperand(1).getReg();
3903     unsigned Rn = Inst.getOperand(2).getReg();
3904     if (RI->isSubRegisterEq(Rn, Rt))
3905       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3906                            "is also a source");
3907     break;
3908   }
3909   case AArch64::STRBBpost:
3910   case AArch64::STRBpost:
3911   case AArch64::STRHHpost:
3912   case AArch64::STRHpost:
3913   case AArch64::STRWpost:
3914   case AArch64::STRXpost:
3915   case AArch64::STRBBpre:
3916   case AArch64::STRBpre:
3917   case AArch64::STRHHpre:
3918   case AArch64::STRHpre:
3919   case AArch64::STRWpre:
3920   case AArch64::STRXpre: {
3921     unsigned Rt = Inst.getOperand(1).getReg();
3922     unsigned Rn = Inst.getOperand(2).getReg();
3923     if (RI->isSubRegisterEq(Rn, Rt))
3924       return Error(Loc[0], "unpredictable STR instruction, writeback base "
3925                            "is also a source");
3926     break;
3927   }
3928   case AArch64::STXRB:
3929   case AArch64::STXRH:
3930   case AArch64::STXRW:
3931   case AArch64::STXRX:
3932   case AArch64::STLXRB:
3933   case AArch64::STLXRH:
3934   case AArch64::STLXRW:
3935   case AArch64::STLXRX: {
3936     unsigned Rs = Inst.getOperand(0).getReg();
3937     unsigned Rt = Inst.getOperand(1).getReg();
3938     unsigned Rn = Inst.getOperand(2).getReg();
3939     if (RI->isSubRegisterEq(Rt, Rs) ||
3940         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3941       return Error(Loc[0],
3942                    "unpredictable STXR instruction, status is also a source");
3943     break;
3944   }
3945   case AArch64::STXPW:
3946   case AArch64::STXPX:
3947   case AArch64::STLXPW:
3948   case AArch64::STLXPX: {
3949     unsigned Rs = Inst.getOperand(0).getReg();
3950     unsigned Rt1 = Inst.getOperand(1).getReg();
3951     unsigned Rt2 = Inst.getOperand(2).getReg();
3952     unsigned Rn = Inst.getOperand(3).getReg();
3953     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
3954         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3955       return Error(Loc[0],
3956                    "unpredictable STXP instruction, status is also a source");
3957     break;
3958   }
3959   }
3960 
3961 
3962   // Now check immediate ranges. Separate from the above as there is overlap
3963   // in the instructions being checked and this keeps the nested conditionals
3964   // to a minimum.
3965   switch (Inst.getOpcode()) {
3966   case AArch64::ADDSWri:
3967   case AArch64::ADDSXri:
3968   case AArch64::ADDWri:
3969   case AArch64::ADDXri:
3970   case AArch64::SUBSWri:
3971   case AArch64::SUBSXri:
3972   case AArch64::SUBWri:
3973   case AArch64::SUBXri: {
3974     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3975     // some slight duplication here.
3976     if (Inst.getOperand(2).isExpr()) {
3977       const MCExpr *Expr = Inst.getOperand(2).getExpr();
3978       AArch64MCExpr::VariantKind ELFRefKind;
3979       MCSymbolRefExpr::VariantKind DarwinRefKind;
3980       int64_t Addend;
3981       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3982 
3983         // Only allow these with ADDXri.
3984         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3985              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3986             Inst.getOpcode() == AArch64::ADDXri)
3987           return false;
3988 
3989         // Only allow these with ADDXri/ADDWri
3990         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3991              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3992              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3993              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3994              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3995              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3996              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3997              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
3998              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
3999              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4000             (Inst.getOpcode() == AArch64::ADDXri ||
4001              Inst.getOpcode() == AArch64::ADDWri))
4002           return false;
4003 
4004         // Don't allow symbol refs in the immediate field otherwise
4005         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4006         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4007         // 'cmp w0, 'borked')
4008         return Error(Loc.back(), "invalid immediate expression");
4009       }
4010       // We don't validate more complex expressions here
4011     }
4012     return false;
4013   }
4014   default:
4015     return false;
4016   }
4017 }
4018 
4019 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4020                                              unsigned VariantID = 0);
4021 
showMatchError(SMLoc Loc,unsigned ErrCode,uint64_t ErrorInfo,OperandVector & Operands)4022 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4023                                       uint64_t ErrorInfo,
4024                                       OperandVector &Operands) {
4025   switch (ErrCode) {
4026   case Match_InvalidTiedOperand: {
4027     RegConstraintEqualityTy EqTy =
4028         static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4029             .getRegEqualityTy();
4030     switch (EqTy) {
4031     case RegConstraintEqualityTy::EqualsSubReg:
4032       return Error(Loc, "operand must be 64-bit form of destination register");
4033     case RegConstraintEqualityTy::EqualsSuperReg:
4034       return Error(Loc, "operand must be 32-bit form of destination register");
4035     case RegConstraintEqualityTy::EqualsReg:
4036       return Error(Loc, "operand must match destination register");
4037     }
4038     llvm_unreachable("Unknown RegConstraintEqualityTy");
4039   }
4040   case Match_MissingFeature:
4041     return Error(Loc,
4042                  "instruction requires a CPU feature not currently enabled");
4043   case Match_InvalidOperand:
4044     return Error(Loc, "invalid operand for instruction");
4045   case Match_InvalidSuffix:
4046     return Error(Loc, "invalid type suffix for instruction");
4047   case Match_InvalidCondCode:
4048     return Error(Loc, "expected AArch64 condition code");
4049   case Match_AddSubRegExtendSmall:
4050     return Error(Loc,
4051       "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4052   case Match_AddSubRegExtendLarge:
4053     return Error(Loc,
4054       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4055   case Match_AddSubSecondSource:
4056     return Error(Loc,
4057       "expected compatible register, symbol or integer in range [0, 4095]");
4058   case Match_LogicalSecondSource:
4059     return Error(Loc, "expected compatible register or logical immediate");
4060   case Match_InvalidMovImm32Shift:
4061     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4062   case Match_InvalidMovImm64Shift:
4063     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4064   case Match_AddSubRegShift32:
4065     return Error(Loc,
4066        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4067   case Match_AddSubRegShift64:
4068     return Error(Loc,
4069        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4070   case Match_InvalidFPImm:
4071     return Error(Loc,
4072                  "expected compatible register or floating-point constant");
4073   case Match_InvalidMemoryIndexedSImm6:
4074     return Error(Loc, "index must be an integer in range [-32, 31].");
4075   case Match_InvalidMemoryIndexedSImm5:
4076     return Error(Loc, "index must be an integer in range [-16, 15].");
4077   case Match_InvalidMemoryIndexed1SImm4:
4078     return Error(Loc, "index must be an integer in range [-8, 7].");
4079   case Match_InvalidMemoryIndexed2SImm4:
4080     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4081   case Match_InvalidMemoryIndexed3SImm4:
4082     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4083   case Match_InvalidMemoryIndexed4SImm4:
4084     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4085   case Match_InvalidMemoryIndexed16SImm4:
4086     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4087   case Match_InvalidMemoryIndexed1SImm6:
4088     return Error(Loc, "index must be an integer in range [-32, 31].");
4089   case Match_InvalidMemoryIndexedSImm8:
4090     return Error(Loc, "index must be an integer in range [-128, 127].");
4091   case Match_InvalidMemoryIndexedSImm9:
4092     return Error(Loc, "index must be an integer in range [-256, 255].");
4093   case Match_InvalidMemoryIndexed8SImm10:
4094     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4095   case Match_InvalidMemoryIndexed4SImm7:
4096     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4097   case Match_InvalidMemoryIndexed8SImm7:
4098     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4099   case Match_InvalidMemoryIndexed16SImm7:
4100     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4101   case Match_InvalidMemoryIndexed8UImm5:
4102     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4103   case Match_InvalidMemoryIndexed4UImm5:
4104     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4105   case Match_InvalidMemoryIndexed2UImm5:
4106     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4107   case Match_InvalidMemoryIndexed8UImm6:
4108     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4109   case Match_InvalidMemoryIndexed4UImm6:
4110     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4111   case Match_InvalidMemoryIndexed2UImm6:
4112     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4113   case Match_InvalidMemoryIndexed1UImm6:
4114     return Error(Loc, "index must be in range [0, 63].");
4115   case Match_InvalidMemoryWExtend8:
4116     return Error(Loc,
4117                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4118   case Match_InvalidMemoryWExtend16:
4119     return Error(Loc,
4120                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4121   case Match_InvalidMemoryWExtend32:
4122     return Error(Loc,
4123                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4124   case Match_InvalidMemoryWExtend64:
4125     return Error(Loc,
4126                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4127   case Match_InvalidMemoryWExtend128:
4128     return Error(Loc,
4129                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4130   case Match_InvalidMemoryXExtend8:
4131     return Error(Loc,
4132                  "expected 'lsl' or 'sxtx' with optional shift of #0");
4133   case Match_InvalidMemoryXExtend16:
4134     return Error(Loc,
4135                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4136   case Match_InvalidMemoryXExtend32:
4137     return Error(Loc,
4138                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4139   case Match_InvalidMemoryXExtend64:
4140     return Error(Loc,
4141                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4142   case Match_InvalidMemoryXExtend128:
4143     return Error(Loc,
4144                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4145   case Match_InvalidMemoryIndexed1:
4146     return Error(Loc, "index must be an integer in range [0, 4095].");
4147   case Match_InvalidMemoryIndexed2:
4148     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4149   case Match_InvalidMemoryIndexed4:
4150     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4151   case Match_InvalidMemoryIndexed8:
4152     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4153   case Match_InvalidMemoryIndexed16:
4154     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4155   case Match_InvalidImm0_1:
4156     return Error(Loc, "immediate must be an integer in range [0, 1].");
4157   case Match_InvalidImm0_7:
4158     return Error(Loc, "immediate must be an integer in range [0, 7].");
4159   case Match_InvalidImm0_15:
4160     return Error(Loc, "immediate must be an integer in range [0, 15].");
4161   case Match_InvalidImm0_31:
4162     return Error(Loc, "immediate must be an integer in range [0, 31].");
4163   case Match_InvalidImm0_63:
4164     return Error(Loc, "immediate must be an integer in range [0, 63].");
4165   case Match_InvalidImm0_127:
4166     return Error(Loc, "immediate must be an integer in range [0, 127].");
4167   case Match_InvalidImm0_255:
4168     return Error(Loc, "immediate must be an integer in range [0, 255].");
4169   case Match_InvalidImm0_65535:
4170     return Error(Loc, "immediate must be an integer in range [0, 65535].");
4171   case Match_InvalidImm1_8:
4172     return Error(Loc, "immediate must be an integer in range [1, 8].");
4173   case Match_InvalidImm1_16:
4174     return Error(Loc, "immediate must be an integer in range [1, 16].");
4175   case Match_InvalidImm1_32:
4176     return Error(Loc, "immediate must be an integer in range [1, 32].");
4177   case Match_InvalidImm1_64:
4178     return Error(Loc, "immediate must be an integer in range [1, 64].");
4179   case Match_InvalidSVEAddSubImm8:
4180     return Error(Loc, "immediate must be an integer in range [0, 255]"
4181                       " with a shift amount of 0");
4182   case Match_InvalidSVEAddSubImm16:
4183   case Match_InvalidSVEAddSubImm32:
4184   case Match_InvalidSVEAddSubImm64:
4185     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4186                       "multiple of 256 in range [256, 65280]");
4187   case Match_InvalidSVECpyImm8:
4188     return Error(Loc, "immediate must be an integer in range [-128, 255]"
4189                       " with a shift amount of 0");
4190   case Match_InvalidSVECpyImm16:
4191     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4192                       "multiple of 256 in range [-32768, 65280]");
4193   case Match_InvalidSVECpyImm32:
4194   case Match_InvalidSVECpyImm64:
4195     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4196                       "multiple of 256 in range [-32768, 32512]");
4197   case Match_InvalidIndexRange1_1:
4198     return Error(Loc, "expected lane specifier '[1]'");
4199   case Match_InvalidIndexRange0_15:
4200     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4201   case Match_InvalidIndexRange0_7:
4202     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4203   case Match_InvalidIndexRange0_3:
4204     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4205   case Match_InvalidIndexRange0_1:
4206     return Error(Loc, "vector lane must be an integer in range [0, 1].");
4207   case Match_InvalidSVEIndexRange0_63:
4208     return Error(Loc, "vector lane must be an integer in range [0, 63].");
4209   case Match_InvalidSVEIndexRange0_31:
4210     return Error(Loc, "vector lane must be an integer in range [0, 31].");
4211   case Match_InvalidSVEIndexRange0_15:
4212     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4213   case Match_InvalidSVEIndexRange0_7:
4214     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4215   case Match_InvalidSVEIndexRange0_3:
4216     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4217   case Match_InvalidLabel:
4218     return Error(Loc, "expected label or encodable integer pc offset");
4219   case Match_MRS:
4220     return Error(Loc, "expected readable system register");
4221   case Match_MSR:
4222     return Error(Loc, "expected writable system register or pstate");
4223   case Match_InvalidComplexRotationEven:
4224     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4225   case Match_InvalidComplexRotationOdd:
4226     return Error(Loc, "complex rotation must be 90 or 270.");
4227   case Match_MnemonicFail: {
4228     std::string Suggestion = AArch64MnemonicSpellCheck(
4229         ((AArch64Operand &)*Operands[0]).getToken(),
4230         ComputeAvailableFeatures(STI->getFeatureBits()));
4231     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4232   }
4233   case Match_InvalidGPR64shifted8:
4234     return Error(Loc, "register must be x0..x30 or xzr, without shift");
4235   case Match_InvalidGPR64shifted16:
4236     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4237   case Match_InvalidGPR64shifted32:
4238     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4239   case Match_InvalidGPR64shifted64:
4240     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4241   case Match_InvalidGPR64NoXZRshifted8:
4242     return Error(Loc, "register must be x0..x30 without shift");
4243   case Match_InvalidGPR64NoXZRshifted16:
4244     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4245   case Match_InvalidGPR64NoXZRshifted32:
4246     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4247   case Match_InvalidGPR64NoXZRshifted64:
4248     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4249   case Match_InvalidZPR32UXTW8:
4250   case Match_InvalidZPR32SXTW8:
4251     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4252   case Match_InvalidZPR32UXTW16:
4253   case Match_InvalidZPR32SXTW16:
4254     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4255   case Match_InvalidZPR32UXTW32:
4256   case Match_InvalidZPR32SXTW32:
4257     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4258   case Match_InvalidZPR32UXTW64:
4259   case Match_InvalidZPR32SXTW64:
4260     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4261   case Match_InvalidZPR64UXTW8:
4262   case Match_InvalidZPR64SXTW8:
4263     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4264   case Match_InvalidZPR64UXTW16:
4265   case Match_InvalidZPR64SXTW16:
4266     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4267   case Match_InvalidZPR64UXTW32:
4268   case Match_InvalidZPR64SXTW32:
4269     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4270   case Match_InvalidZPR64UXTW64:
4271   case Match_InvalidZPR64SXTW64:
4272     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4273   case Match_InvalidZPR32LSL8:
4274     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4275   case Match_InvalidZPR32LSL16:
4276     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4277   case Match_InvalidZPR32LSL32:
4278     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4279   case Match_InvalidZPR32LSL64:
4280     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4281   case Match_InvalidZPR64LSL8:
4282     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4283   case Match_InvalidZPR64LSL16:
4284     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4285   case Match_InvalidZPR64LSL32:
4286     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4287   case Match_InvalidZPR64LSL64:
4288     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4289   case Match_InvalidZPR0:
4290     return Error(Loc, "expected register without element width sufix");
4291   case Match_InvalidZPR8:
4292   case Match_InvalidZPR16:
4293   case Match_InvalidZPR32:
4294   case Match_InvalidZPR64:
4295   case Match_InvalidZPR128:
4296     return Error(Loc, "invalid element width");
4297   case Match_InvalidZPR_3b8:
4298     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4299   case Match_InvalidZPR_3b16:
4300     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4301   case Match_InvalidZPR_3b32:
4302     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4303   case Match_InvalidZPR_4b16:
4304     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4305   case Match_InvalidZPR_4b32:
4306     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4307   case Match_InvalidZPR_4b64:
4308     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4309   case Match_InvalidSVEPattern:
4310     return Error(Loc, "invalid predicate pattern");
4311   case Match_InvalidSVEPredicateAnyReg:
4312   case Match_InvalidSVEPredicateBReg:
4313   case Match_InvalidSVEPredicateHReg:
4314   case Match_InvalidSVEPredicateSReg:
4315   case Match_InvalidSVEPredicateDReg:
4316     return Error(Loc, "invalid predicate register.");
4317   case Match_InvalidSVEPredicate3bAnyReg:
4318   case Match_InvalidSVEPredicate3bBReg:
4319   case Match_InvalidSVEPredicate3bHReg:
4320   case Match_InvalidSVEPredicate3bSReg:
4321   case Match_InvalidSVEPredicate3bDReg:
4322     return Error(Loc, "restricted predicate has range [0, 7].");
4323   case Match_InvalidSVEExactFPImmOperandHalfOne:
4324     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4325   case Match_InvalidSVEExactFPImmOperandHalfTwo:
4326     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4327   case Match_InvalidSVEExactFPImmOperandZeroOne:
4328     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4329   default:
4330     llvm_unreachable("unexpected error code!");
4331   }
4332 }
4333 
4334 static const char *getSubtargetFeatureName(uint64_t Val);
4335 
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)4336 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4337                                                OperandVector &Operands,
4338                                                MCStreamer &Out,
4339                                                uint64_t &ErrorInfo,
4340                                                bool MatchingInlineAsm) {
4341   assert(!Operands.empty() && "Unexpect empty operand list!");
4342   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4343   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4344 
4345   StringRef Tok = Op.getToken();
4346   unsigned NumOperands = Operands.size();
4347 
4348   if (NumOperands == 4 && Tok == "lsl") {
4349     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4350     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4351     if (Op2.isScalarReg() && Op3.isImm()) {
4352       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4353       if (Op3CE) {
4354         uint64_t Op3Val = Op3CE->getValue();
4355         uint64_t NewOp3Val = 0;
4356         uint64_t NewOp4Val = 0;
4357         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4358                 Op2.getReg())) {
4359           NewOp3Val = (32 - Op3Val) & 0x1f;
4360           NewOp4Val = 31 - Op3Val;
4361         } else {
4362           NewOp3Val = (64 - Op3Val) & 0x3f;
4363           NewOp4Val = 63 - Op3Val;
4364         }
4365 
4366         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4367         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4368 
4369         Operands[0] = AArch64Operand::CreateToken(
4370             "ubfm", false, Op.getStartLoc(), getContext());
4371         Operands.push_back(AArch64Operand::CreateImm(
4372             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4373         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4374                                                 Op3.getEndLoc(), getContext());
4375       }
4376     }
4377   } else if (NumOperands == 4 && Tok == "bfc") {
4378     // FIXME: Horrible hack to handle BFC->BFM alias.
4379     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4380     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4381     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4382 
4383     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4384       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4385       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4386 
4387       if (LSBCE && WidthCE) {
4388         uint64_t LSB = LSBCE->getValue();
4389         uint64_t Width = WidthCE->getValue();
4390 
4391         uint64_t RegWidth = 0;
4392         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4393                 Op1.getReg()))
4394           RegWidth = 64;
4395         else
4396           RegWidth = 32;
4397 
4398         if (LSB >= RegWidth)
4399           return Error(LSBOp.getStartLoc(),
4400                        "expected integer in range [0, 31]");
4401         if (Width < 1 || Width > RegWidth)
4402           return Error(WidthOp.getStartLoc(),
4403                        "expected integer in range [1, 32]");
4404 
4405         uint64_t ImmR = 0;
4406         if (RegWidth == 32)
4407           ImmR = (32 - LSB) & 0x1f;
4408         else
4409           ImmR = (64 - LSB) & 0x3f;
4410 
4411         uint64_t ImmS = Width - 1;
4412 
4413         if (ImmR != 0 && ImmS >= ImmR)
4414           return Error(WidthOp.getStartLoc(),
4415                        "requested insert overflows register");
4416 
4417         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4418         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4419         Operands[0] = AArch64Operand::CreateToken(
4420               "bfm", false, Op.getStartLoc(), getContext());
4421         Operands[2] = AArch64Operand::CreateReg(
4422             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4423             SMLoc(), SMLoc(), getContext());
4424         Operands[3] = AArch64Operand::CreateImm(
4425             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4426         Operands.emplace_back(
4427             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4428                                       WidthOp.getEndLoc(), getContext()));
4429       }
4430     }
4431   } else if (NumOperands == 5) {
4432     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4433     // UBFIZ -> UBFM aliases.
4434     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4435       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4436       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4437       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4438 
4439       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4440         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4441         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4442 
4443         if (Op3CE && Op4CE) {
4444           uint64_t Op3Val = Op3CE->getValue();
4445           uint64_t Op4Val = Op4CE->getValue();
4446 
4447           uint64_t RegWidth = 0;
4448           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4449                   Op1.getReg()))
4450             RegWidth = 64;
4451           else
4452             RegWidth = 32;
4453 
4454           if (Op3Val >= RegWidth)
4455             return Error(Op3.getStartLoc(),
4456                          "expected integer in range [0, 31]");
4457           if (Op4Val < 1 || Op4Val > RegWidth)
4458             return Error(Op4.getStartLoc(),
4459                          "expected integer in range [1, 32]");
4460 
4461           uint64_t NewOp3Val = 0;
4462           if (RegWidth == 32)
4463             NewOp3Val = (32 - Op3Val) & 0x1f;
4464           else
4465             NewOp3Val = (64 - Op3Val) & 0x3f;
4466 
4467           uint64_t NewOp4Val = Op4Val - 1;
4468 
4469           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4470             return Error(Op4.getStartLoc(),
4471                          "requested insert overflows register");
4472 
4473           const MCExpr *NewOp3 =
4474               MCConstantExpr::create(NewOp3Val, getContext());
4475           const MCExpr *NewOp4 =
4476               MCConstantExpr::create(NewOp4Val, getContext());
4477           Operands[3] = AArch64Operand::CreateImm(
4478               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4479           Operands[4] = AArch64Operand::CreateImm(
4480               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4481           if (Tok == "bfi")
4482             Operands[0] = AArch64Operand::CreateToken(
4483                 "bfm", false, Op.getStartLoc(), getContext());
4484           else if (Tok == "sbfiz")
4485             Operands[0] = AArch64Operand::CreateToken(
4486                 "sbfm", false, Op.getStartLoc(), getContext());
4487           else if (Tok == "ubfiz")
4488             Operands[0] = AArch64Operand::CreateToken(
4489                 "ubfm", false, Op.getStartLoc(), getContext());
4490           else
4491             llvm_unreachable("No valid mnemonic for alias?");
4492         }
4493       }
4494 
4495       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4496       // UBFX -> UBFM aliases.
4497     } else if (NumOperands == 5 &&
4498                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4499       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4500       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4501       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4502 
4503       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4504         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4505         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4506 
4507         if (Op3CE && Op4CE) {
4508           uint64_t Op3Val = Op3CE->getValue();
4509           uint64_t Op4Val = Op4CE->getValue();
4510 
4511           uint64_t RegWidth = 0;
4512           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4513                   Op1.getReg()))
4514             RegWidth = 64;
4515           else
4516             RegWidth = 32;
4517 
4518           if (Op3Val >= RegWidth)
4519             return Error(Op3.getStartLoc(),
4520                          "expected integer in range [0, 31]");
4521           if (Op4Val < 1 || Op4Val > RegWidth)
4522             return Error(Op4.getStartLoc(),
4523                          "expected integer in range [1, 32]");
4524 
4525           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4526 
4527           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4528             return Error(Op4.getStartLoc(),
4529                          "requested extract overflows register");
4530 
4531           const MCExpr *NewOp4 =
4532               MCConstantExpr::create(NewOp4Val, getContext());
4533           Operands[4] = AArch64Operand::CreateImm(
4534               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4535           if (Tok == "bfxil")
4536             Operands[0] = AArch64Operand::CreateToken(
4537                 "bfm", false, Op.getStartLoc(), getContext());
4538           else if (Tok == "sbfx")
4539             Operands[0] = AArch64Operand::CreateToken(
4540                 "sbfm", false, Op.getStartLoc(), getContext());
4541           else if (Tok == "ubfx")
4542             Operands[0] = AArch64Operand::CreateToken(
4543                 "ubfm", false, Op.getStartLoc(), getContext());
4544           else
4545             llvm_unreachable("No valid mnemonic for alias?");
4546         }
4547       }
4548     }
4549   }
4550 
4551   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4552   // instruction for FP registers correctly in some rare circumstances. Convert
4553   // it to a safe instruction and warn (because silently changing someone's
4554   // assembly is rude).
4555   if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4556       NumOperands == 4 && Tok == "movi") {
4557     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4558     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4559     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4560     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4561         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4562       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4563       if (Suffix.lower() == ".2d" &&
4564           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4565         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4566                 " correctly on this CPU, converting to equivalent movi.16b");
4567         // Switch the suffix to .16b.
4568         unsigned Idx = Op1.isToken() ? 1 : 2;
4569         Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4570                                                   getContext());
4571       }
4572     }
4573   }
4574 
4575   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4576   //        InstAlias can't quite handle this since the reg classes aren't
4577   //        subclasses.
4578   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4579     // The source register can be Wn here, but the matcher expects a
4580     // GPR64. Twiddle it here if necessary.
4581     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4582     if (Op.isScalarReg()) {
4583       unsigned Reg = getXRegFromWReg(Op.getReg());
4584       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4585                                               Op.getStartLoc(), Op.getEndLoc(),
4586                                               getContext());
4587     }
4588   }
4589   // FIXME: Likewise for sxt[bh] with a Xd dst operand
4590   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4591     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4592     if (Op.isScalarReg() &&
4593         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4594             Op.getReg())) {
4595       // The source register can be Wn here, but the matcher expects a
4596       // GPR64. Twiddle it here if necessary.
4597       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4598       if (Op.isScalarReg()) {
4599         unsigned Reg = getXRegFromWReg(Op.getReg());
4600         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4601                                                 Op.getStartLoc(),
4602                                                 Op.getEndLoc(), getContext());
4603       }
4604     }
4605   }
4606   // FIXME: Likewise for uxt[bh] with a Xd dst operand
4607   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4608     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4609     if (Op.isScalarReg() &&
4610         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4611             Op.getReg())) {
4612       // The source register can be Wn here, but the matcher expects a
4613       // GPR32. Twiddle it here if necessary.
4614       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4615       if (Op.isScalarReg()) {
4616         unsigned Reg = getWRegFromXReg(Op.getReg());
4617         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4618                                                 Op.getStartLoc(),
4619                                                 Op.getEndLoc(), getContext());
4620       }
4621     }
4622   }
4623 
4624   MCInst Inst;
4625   // First try to match against the secondary set of tables containing the
4626   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4627   unsigned MatchResult =
4628       MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4629 
4630   // If that fails, try against the alternate table containing long-form NEON:
4631   // "fadd v0.2s, v1.2s, v2.2s"
4632   if (MatchResult != Match_Success) {
4633     // But first, save the short-form match result: we can use it in case the
4634     // long-form match also fails.
4635     auto ShortFormNEONErrorInfo = ErrorInfo;
4636     auto ShortFormNEONMatchResult = MatchResult;
4637 
4638     MatchResult =
4639         MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4640 
4641     // Now, both matches failed, and the long-form match failed on the mnemonic
4642     // suffix token operand.  The short-form match failure is probably more
4643     // relevant: use it instead.
4644     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4645         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4646         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4647       MatchResult = ShortFormNEONMatchResult;
4648       ErrorInfo = ShortFormNEONErrorInfo;
4649     }
4650   }
4651 
4652   switch (MatchResult) {
4653   case Match_Success: {
4654     // Perform range checking and other semantic validations
4655     SmallVector<SMLoc, 8> OperandLocs;
4656     NumOperands = Operands.size();
4657     for (unsigned i = 1; i < NumOperands; ++i)
4658       OperandLocs.push_back(Operands[i]->getStartLoc());
4659     if (validateInstruction(Inst, IDLoc, OperandLocs))
4660       return true;
4661 
4662     Inst.setLoc(IDLoc);
4663     Out.EmitInstruction(Inst, getSTI());
4664     return false;
4665   }
4666   case Match_MissingFeature: {
4667     assert(ErrorInfo && "Unknown missing feature!");
4668     // Special case the error message for the very common case where only
4669     // a single subtarget feature is missing (neon, e.g.).
4670     std::string Msg = "instruction requires:";
4671     uint64_t Mask = 1;
4672     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4673       if (ErrorInfo & Mask) {
4674         Msg += " ";
4675         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4676       }
4677       Mask <<= 1;
4678     }
4679     return Error(IDLoc, Msg);
4680   }
4681   case Match_MnemonicFail:
4682     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4683   case Match_InvalidOperand: {
4684     SMLoc ErrorLoc = IDLoc;
4685 
4686     if (ErrorInfo != ~0ULL) {
4687       if (ErrorInfo >= Operands.size())
4688         return Error(IDLoc, "too few operands for instruction",
4689                      SMRange(IDLoc, getTok().getLoc()));
4690 
4691       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4692       if (ErrorLoc == SMLoc())
4693         ErrorLoc = IDLoc;
4694     }
4695     // If the match failed on a suffix token operand, tweak the diagnostic
4696     // accordingly.
4697     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4698         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4699       MatchResult = Match_InvalidSuffix;
4700 
4701     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4702   }
4703   case Match_InvalidTiedOperand:
4704   case Match_InvalidMemoryIndexed1:
4705   case Match_InvalidMemoryIndexed2:
4706   case Match_InvalidMemoryIndexed4:
4707   case Match_InvalidMemoryIndexed8:
4708   case Match_InvalidMemoryIndexed16:
4709   case Match_InvalidCondCode:
4710   case Match_AddSubRegExtendSmall:
4711   case Match_AddSubRegExtendLarge:
4712   case Match_AddSubSecondSource:
4713   case Match_LogicalSecondSource:
4714   case Match_AddSubRegShift32:
4715   case Match_AddSubRegShift64:
4716   case Match_InvalidMovImm32Shift:
4717   case Match_InvalidMovImm64Shift:
4718   case Match_InvalidFPImm:
4719   case Match_InvalidMemoryWExtend8:
4720   case Match_InvalidMemoryWExtend16:
4721   case Match_InvalidMemoryWExtend32:
4722   case Match_InvalidMemoryWExtend64:
4723   case Match_InvalidMemoryWExtend128:
4724   case Match_InvalidMemoryXExtend8:
4725   case Match_InvalidMemoryXExtend16:
4726   case Match_InvalidMemoryXExtend32:
4727   case Match_InvalidMemoryXExtend64:
4728   case Match_InvalidMemoryXExtend128:
4729   case Match_InvalidMemoryIndexed1SImm4:
4730   case Match_InvalidMemoryIndexed2SImm4:
4731   case Match_InvalidMemoryIndexed3SImm4:
4732   case Match_InvalidMemoryIndexed4SImm4:
4733   case Match_InvalidMemoryIndexed1SImm6:
4734   case Match_InvalidMemoryIndexed16SImm4:
4735   case Match_InvalidMemoryIndexed4SImm7:
4736   case Match_InvalidMemoryIndexed8SImm7:
4737   case Match_InvalidMemoryIndexed16SImm7:
4738   case Match_InvalidMemoryIndexed8UImm5:
4739   case Match_InvalidMemoryIndexed4UImm5:
4740   case Match_InvalidMemoryIndexed2UImm5:
4741   case Match_InvalidMemoryIndexed1UImm6:
4742   case Match_InvalidMemoryIndexed2UImm6:
4743   case Match_InvalidMemoryIndexed4UImm6:
4744   case Match_InvalidMemoryIndexed8UImm6:
4745   case Match_InvalidMemoryIndexedSImm6:
4746   case Match_InvalidMemoryIndexedSImm5:
4747   case Match_InvalidMemoryIndexedSImm8:
4748   case Match_InvalidMemoryIndexedSImm9:
4749   case Match_InvalidMemoryIndexed8SImm10:
4750   case Match_InvalidImm0_1:
4751   case Match_InvalidImm0_7:
4752   case Match_InvalidImm0_15:
4753   case Match_InvalidImm0_31:
4754   case Match_InvalidImm0_63:
4755   case Match_InvalidImm0_127:
4756   case Match_InvalidImm0_255:
4757   case Match_InvalidImm0_65535:
4758   case Match_InvalidImm1_8:
4759   case Match_InvalidImm1_16:
4760   case Match_InvalidImm1_32:
4761   case Match_InvalidImm1_64:
4762   case Match_InvalidSVEAddSubImm8:
4763   case Match_InvalidSVEAddSubImm16:
4764   case Match_InvalidSVEAddSubImm32:
4765   case Match_InvalidSVEAddSubImm64:
4766   case Match_InvalidSVECpyImm8:
4767   case Match_InvalidSVECpyImm16:
4768   case Match_InvalidSVECpyImm32:
4769   case Match_InvalidSVECpyImm64:
4770   case Match_InvalidIndexRange1_1:
4771   case Match_InvalidIndexRange0_15:
4772   case Match_InvalidIndexRange0_7:
4773   case Match_InvalidIndexRange0_3:
4774   case Match_InvalidIndexRange0_1:
4775   case Match_InvalidSVEIndexRange0_63:
4776   case Match_InvalidSVEIndexRange0_31:
4777   case Match_InvalidSVEIndexRange0_15:
4778   case Match_InvalidSVEIndexRange0_7:
4779   case Match_InvalidSVEIndexRange0_3:
4780   case Match_InvalidLabel:
4781   case Match_InvalidComplexRotationEven:
4782   case Match_InvalidComplexRotationOdd:
4783   case Match_InvalidGPR64shifted8:
4784   case Match_InvalidGPR64shifted16:
4785   case Match_InvalidGPR64shifted32:
4786   case Match_InvalidGPR64shifted64:
4787   case Match_InvalidGPR64NoXZRshifted8:
4788   case Match_InvalidGPR64NoXZRshifted16:
4789   case Match_InvalidGPR64NoXZRshifted32:
4790   case Match_InvalidGPR64NoXZRshifted64:
4791   case Match_InvalidZPR32UXTW8:
4792   case Match_InvalidZPR32UXTW16:
4793   case Match_InvalidZPR32UXTW32:
4794   case Match_InvalidZPR32UXTW64:
4795   case Match_InvalidZPR32SXTW8:
4796   case Match_InvalidZPR32SXTW16:
4797   case Match_InvalidZPR32SXTW32:
4798   case Match_InvalidZPR32SXTW64:
4799   case Match_InvalidZPR64UXTW8:
4800   case Match_InvalidZPR64SXTW8:
4801   case Match_InvalidZPR64UXTW16:
4802   case Match_InvalidZPR64SXTW16:
4803   case Match_InvalidZPR64UXTW32:
4804   case Match_InvalidZPR64SXTW32:
4805   case Match_InvalidZPR64UXTW64:
4806   case Match_InvalidZPR64SXTW64:
4807   case Match_InvalidZPR32LSL8:
4808   case Match_InvalidZPR32LSL16:
4809   case Match_InvalidZPR32LSL32:
4810   case Match_InvalidZPR32LSL64:
4811   case Match_InvalidZPR64LSL8:
4812   case Match_InvalidZPR64LSL16:
4813   case Match_InvalidZPR64LSL32:
4814   case Match_InvalidZPR64LSL64:
4815   case Match_InvalidZPR0:
4816   case Match_InvalidZPR8:
4817   case Match_InvalidZPR16:
4818   case Match_InvalidZPR32:
4819   case Match_InvalidZPR64:
4820   case Match_InvalidZPR128:
4821   case Match_InvalidZPR_3b8:
4822   case Match_InvalidZPR_3b16:
4823   case Match_InvalidZPR_3b32:
4824   case Match_InvalidZPR_4b16:
4825   case Match_InvalidZPR_4b32:
4826   case Match_InvalidZPR_4b64:
4827   case Match_InvalidSVEPredicateAnyReg:
4828   case Match_InvalidSVEPattern:
4829   case Match_InvalidSVEPredicateBReg:
4830   case Match_InvalidSVEPredicateHReg:
4831   case Match_InvalidSVEPredicateSReg:
4832   case Match_InvalidSVEPredicateDReg:
4833   case Match_InvalidSVEPredicate3bAnyReg:
4834   case Match_InvalidSVEPredicate3bBReg:
4835   case Match_InvalidSVEPredicate3bHReg:
4836   case Match_InvalidSVEPredicate3bSReg:
4837   case Match_InvalidSVEPredicate3bDReg:
4838   case Match_InvalidSVEExactFPImmOperandHalfOne:
4839   case Match_InvalidSVEExactFPImmOperandHalfTwo:
4840   case Match_InvalidSVEExactFPImmOperandZeroOne:
4841   case Match_MSR:
4842   case Match_MRS: {
4843     if (ErrorInfo >= Operands.size())
4844       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4845     // Any time we get here, there's nothing fancy to do. Just get the
4846     // operand SMLoc and display the diagnostic.
4847     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4848     if (ErrorLoc == SMLoc())
4849       ErrorLoc = IDLoc;
4850     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4851   }
4852   }
4853 
4854   llvm_unreachable("Implement any new match types added!");
4855 }
4856 
4857 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)4858 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4859   const MCObjectFileInfo::Environment Format =
4860     getContext().getObjectFileInfo()->getObjectFileType();
4861   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4862 
4863   StringRef IDVal = DirectiveID.getIdentifier();
4864   SMLoc Loc = DirectiveID.getLoc();
4865   if (IDVal == ".arch")
4866     parseDirectiveArch(Loc);
4867   else if (IDVal == ".cpu")
4868     parseDirectiveCPU(Loc);
4869   else if (IDVal == ".tlsdesccall")
4870     parseDirectiveTLSDescCall(Loc);
4871   else if (IDVal == ".ltorg" || IDVal == ".pool")
4872     parseDirectiveLtorg(Loc);
4873   else if (IDVal == ".unreq")
4874     parseDirectiveUnreq(Loc);
4875   else if (IDVal == ".inst")
4876     parseDirectiveInst(Loc);
4877   else if (IsMachO) {
4878     if (IDVal == MCLOHDirectiveName())
4879       parseDirectiveLOH(IDVal, Loc);
4880     else
4881       return true;
4882   } else
4883     return true;
4884   return false;
4885 }
4886 
4887 static const struct {
4888   const char *Name;
4889   const FeatureBitset Features;
4890 } ExtensionMap[] = {
4891   { "crc",  {AArch64::FeatureCRC} },
4892   { "sm4",  {AArch64::FeatureSM4} },
4893   { "sha3", {AArch64::FeatureSHA3} },
4894   { "sha2", {AArch64::FeatureSHA2} },
4895   { "aes",  {AArch64::FeatureAES} },
4896   { "crypto", {AArch64::FeatureCrypto} },
4897   { "fp", {AArch64::FeatureFPARMv8} },
4898   { "simd", {AArch64::FeatureNEON} },
4899   { "ras", {AArch64::FeatureRAS} },
4900   { "lse", {AArch64::FeatureLSE} },
4901 
4902   // FIXME: Unsupported extensions
4903   { "pan", {} },
4904   { "lor", {} },
4905   { "rdma", {} },
4906   { "profile", {} },
4907 };
4908 
ExpandCryptoAEK(AArch64::ArchKind ArchKind,SmallVector<StringRef,4> & RequestedExtensions)4909 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
4910                             SmallVector<StringRef, 4> &RequestedExtensions) {
4911   const bool NoCrypto =
4912       (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4913                  "nocrypto") != std::end(RequestedExtensions));
4914   const bool Crypto =
4915       (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4916                  "crypto") != std::end(RequestedExtensions));
4917 
4918   if (!NoCrypto && Crypto) {
4919     switch (ArchKind) {
4920     default:
4921       // Map 'generic' (and others) to sha2 and aes, because
4922       // that was the traditional meaning of crypto.
4923     case AArch64::ArchKind::ARMV8_1A:
4924     case AArch64::ArchKind::ARMV8_2A:
4925     case AArch64::ArchKind::ARMV8_3A:
4926       RequestedExtensions.push_back("sha2");
4927       RequestedExtensions.push_back("aes");
4928       break;
4929     case AArch64::ArchKind::ARMV8_4A:
4930       RequestedExtensions.push_back("sm4");
4931       RequestedExtensions.push_back("sha3");
4932       RequestedExtensions.push_back("sha2");
4933       RequestedExtensions.push_back("aes");
4934       break;
4935     }
4936   } else if (NoCrypto) {
4937     switch (ArchKind) {
4938     default:
4939       // Map 'generic' (and others) to sha2 and aes, because
4940       // that was the traditional meaning of crypto.
4941     case AArch64::ArchKind::ARMV8_1A:
4942     case AArch64::ArchKind::ARMV8_2A:
4943     case AArch64::ArchKind::ARMV8_3A:
4944       RequestedExtensions.push_back("nosha2");
4945       RequestedExtensions.push_back("noaes");
4946       break;
4947     case AArch64::ArchKind::ARMV8_4A:
4948       RequestedExtensions.push_back("nosm4");
4949       RequestedExtensions.push_back("nosha3");
4950       RequestedExtensions.push_back("nosha2");
4951       RequestedExtensions.push_back("noaes");
4952       break;
4953     }
4954   }
4955 }
4956 
4957 /// parseDirectiveArch
4958 ///   ::= .arch token
parseDirectiveArch(SMLoc L)4959 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4960   SMLoc ArchLoc = getLoc();
4961 
4962   StringRef Arch, ExtensionString;
4963   std::tie(Arch, ExtensionString) =
4964       getParser().parseStringToEndOfStatement().trim().split('+');
4965 
4966   AArch64::ArchKind ID = AArch64::parseArch(Arch);
4967   if (ID == AArch64::ArchKind::INVALID)
4968     return Error(ArchLoc, "unknown arch name");
4969 
4970   if (parseToken(AsmToken::EndOfStatement))
4971     return true;
4972 
4973   // Get the architecture and extension features.
4974   std::vector<StringRef> AArch64Features;
4975   AArch64::getArchFeatures(ID, AArch64Features);
4976   AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4977                                 AArch64Features);
4978 
4979   MCSubtargetInfo &STI = copySTI();
4980   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4981   STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4982 
4983   SmallVector<StringRef, 4> RequestedExtensions;
4984   if (!ExtensionString.empty())
4985     ExtensionString.split(RequestedExtensions, '+');
4986 
4987   ExpandCryptoAEK(ID, RequestedExtensions);
4988 
4989   FeatureBitset Features = STI.getFeatureBits();
4990   for (auto Name : RequestedExtensions) {
4991     bool EnableFeature = true;
4992 
4993     if (Name.startswith_lower("no")) {
4994       EnableFeature = false;
4995       Name = Name.substr(2);
4996     }
4997 
4998     for (const auto &Extension : ExtensionMap) {
4999       if (Extension.Name != Name)
5000         continue;
5001 
5002       if (Extension.Features.none())
5003         report_fatal_error("unsupported architectural extension: " + Name);
5004 
5005       FeatureBitset ToggleFeatures = EnableFeature
5006                                          ? (~Features & Extension.Features)
5007                                          : ( Features & Extension.Features);
5008       uint64_t Features =
5009           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5010       setAvailableFeatures(Features);
5011       break;
5012     }
5013   }
5014   return false;
5015 }
5016 
incrementLoc(SMLoc L,int Offset)5017 static SMLoc incrementLoc(SMLoc L, int Offset) {
5018   return SMLoc::getFromPointer(L.getPointer() + Offset);
5019 }
5020 
5021 /// parseDirectiveCPU
5022 ///   ::= .cpu id
parseDirectiveCPU(SMLoc L)5023 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5024   SMLoc CurLoc = getLoc();
5025 
5026   StringRef CPU, ExtensionString;
5027   std::tie(CPU, ExtensionString) =
5028       getParser().parseStringToEndOfStatement().trim().split('+');
5029 
5030   if (parseToken(AsmToken::EndOfStatement))
5031     return true;
5032 
5033   SmallVector<StringRef, 4> RequestedExtensions;
5034   if (!ExtensionString.empty())
5035     ExtensionString.split(RequestedExtensions, '+');
5036 
5037   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5038   // once that is tablegen'ed
5039   if (!getSTI().isCPUStringValid(CPU)) {
5040     Error(CurLoc, "unknown CPU name");
5041     return false;
5042   }
5043 
5044   MCSubtargetInfo &STI = copySTI();
5045   STI.setDefaultFeatures(CPU, "");
5046   CurLoc = incrementLoc(CurLoc, CPU.size());
5047 
5048   ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5049 
5050   FeatureBitset Features = STI.getFeatureBits();
5051   for (auto Name : RequestedExtensions) {
5052     // Advance source location past '+'.
5053     CurLoc = incrementLoc(CurLoc, 1);
5054 
5055     bool EnableFeature = true;
5056 
5057     if (Name.startswith_lower("no")) {
5058       EnableFeature = false;
5059       Name = Name.substr(2);
5060     }
5061 
5062     bool FoundExtension = false;
5063     for (const auto &Extension : ExtensionMap) {
5064       if (Extension.Name != Name)
5065         continue;
5066 
5067       if (Extension.Features.none())
5068         report_fatal_error("unsupported architectural extension: " + Name);
5069 
5070       FeatureBitset ToggleFeatures = EnableFeature
5071                                          ? (~Features & Extension.Features)
5072                                          : ( Features & Extension.Features);
5073       uint64_t Features =
5074           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5075       setAvailableFeatures(Features);
5076       FoundExtension = true;
5077 
5078       break;
5079     }
5080 
5081     if (!FoundExtension)
5082       Error(CurLoc, "unsupported architectural extension");
5083 
5084     CurLoc = incrementLoc(CurLoc, Name.size());
5085   }
5086   return false;
5087 }
5088 
5089 /// parseDirectiveInst
5090 ///  ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)5091 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5092   if (getLexer().is(AsmToken::EndOfStatement))
5093     return Error(Loc, "expected expression following '.inst' directive");
5094 
5095   auto parseOp = [&]() -> bool {
5096     SMLoc L = getLoc();
5097     const MCExpr *Expr;
5098     if (check(getParser().parseExpression(Expr), L, "expected expression"))
5099       return true;
5100     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5101     if (check(!Value, L, "expected constant expression"))
5102       return true;
5103     getTargetStreamer().emitInst(Value->getValue());
5104     return false;
5105   };
5106 
5107   if (parseMany(parseOp))
5108     return addErrorSuffix(" in '.inst' directive");
5109   return false;
5110 }
5111 
5112 // parseDirectiveTLSDescCall:
5113 //   ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)5114 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5115   StringRef Name;
5116   if (check(getParser().parseIdentifier(Name), L,
5117             "expected symbol after directive") ||
5118       parseToken(AsmToken::EndOfStatement))
5119     return true;
5120 
5121   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5122   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5123   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5124 
5125   MCInst Inst;
5126   Inst.setOpcode(AArch64::TLSDESCCALL);
5127   Inst.addOperand(MCOperand::createExpr(Expr));
5128 
5129   getParser().getStreamer().EmitInstruction(Inst, getSTI());
5130   return false;
5131 }
5132 
5133 /// ::= .loh <lohName | lohId> label1, ..., labelN
5134 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)5135 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5136   MCLOHType Kind;
5137   if (getParser().getTok().isNot(AsmToken::Identifier)) {
5138     if (getParser().getTok().isNot(AsmToken::Integer))
5139       return TokError("expected an identifier or a number in directive");
5140     // We successfully get a numeric value for the identifier.
5141     // Check if it is valid.
5142     int64_t Id = getParser().getTok().getIntVal();
5143     if (Id <= -1U && !isValidMCLOHType(Id))
5144       return TokError("invalid numeric identifier in directive");
5145     Kind = (MCLOHType)Id;
5146   } else {
5147     StringRef Name = getTok().getIdentifier();
5148     // We successfully parse an identifier.
5149     // Check if it is a recognized one.
5150     int Id = MCLOHNameToId(Name);
5151 
5152     if (Id == -1)
5153       return TokError("invalid identifier in directive");
5154     Kind = (MCLOHType)Id;
5155   }
5156   // Consume the identifier.
5157   Lex();
5158   // Get the number of arguments of this LOH.
5159   int NbArgs = MCLOHIdToNbArgs(Kind);
5160 
5161   assert(NbArgs != -1 && "Invalid number of arguments");
5162 
5163   SmallVector<MCSymbol *, 3> Args;
5164   for (int Idx = 0; Idx < NbArgs; ++Idx) {
5165     StringRef Name;
5166     if (getParser().parseIdentifier(Name))
5167       return TokError("expected identifier in directive");
5168     Args.push_back(getContext().getOrCreateSymbol(Name));
5169 
5170     if (Idx + 1 == NbArgs)
5171       break;
5172     if (parseToken(AsmToken::Comma,
5173                    "unexpected token in '" + Twine(IDVal) + "' directive"))
5174       return true;
5175   }
5176   if (parseToken(AsmToken::EndOfStatement,
5177                  "unexpected token in '" + Twine(IDVal) + "' directive"))
5178     return true;
5179 
5180   getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5181   return false;
5182 }
5183 
5184 /// parseDirectiveLtorg
5185 ///  ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)5186 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5187   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5188     return true;
5189   getTargetStreamer().emitCurrentConstantPool();
5190   return false;
5191 }
5192 
5193 /// parseDirectiveReq
5194 ///  ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)5195 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5196   MCAsmParser &Parser = getParser();
5197   Parser.Lex(); // Eat the '.req' token.
5198   SMLoc SRegLoc = getLoc();
5199   RegKind RegisterKind = RegKind::Scalar;
5200   unsigned RegNum;
5201   OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5202 
5203   if (ParseRes != MatchOperand_Success) {
5204     StringRef Kind;
5205     RegisterKind = RegKind::NeonVector;
5206     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5207 
5208     if (ParseRes == MatchOperand_ParseFail)
5209       return true;
5210 
5211     if (ParseRes == MatchOperand_Success && !Kind.empty())
5212       return Error(SRegLoc, "vector register without type specifier expected");
5213   }
5214 
5215   if (ParseRes != MatchOperand_Success) {
5216     StringRef Kind;
5217     RegisterKind = RegKind::SVEDataVector;
5218     ParseRes =
5219         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5220 
5221     if (ParseRes == MatchOperand_ParseFail)
5222       return true;
5223 
5224     if (ParseRes == MatchOperand_Success && !Kind.empty())
5225       return Error(SRegLoc,
5226                    "sve vector register without type specifier expected");
5227   }
5228 
5229   if (ParseRes != MatchOperand_Success) {
5230     StringRef Kind;
5231     RegisterKind = RegKind::SVEPredicateVector;
5232     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5233 
5234     if (ParseRes == MatchOperand_ParseFail)
5235       return true;
5236 
5237     if (ParseRes == MatchOperand_Success && !Kind.empty())
5238       return Error(SRegLoc,
5239                    "sve predicate register without type specifier expected");
5240   }
5241 
5242   if (ParseRes != MatchOperand_Success)
5243     return Error(SRegLoc, "register name or alias expected");
5244 
5245   // Shouldn't be anything else.
5246   if (parseToken(AsmToken::EndOfStatement,
5247                  "unexpected input in .req directive"))
5248     return true;
5249 
5250   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5251   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5252     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5253 
5254   return false;
5255 }
5256 
5257 /// parseDirectiveUneq
5258 ///  ::= .unreq registername
parseDirectiveUnreq(SMLoc L)5259 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5260   MCAsmParser &Parser = getParser();
5261   if (getTok().isNot(AsmToken::Identifier))
5262     return TokError("unexpected input in .unreq directive.");
5263   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5264   Parser.Lex(); // Eat the identifier.
5265   if (parseToken(AsmToken::EndOfStatement))
5266     return addErrorSuffix("in '.unreq' directive");
5267   return false;
5268 }
5269 
5270 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)5271 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5272                                     AArch64MCExpr::VariantKind &ELFRefKind,
5273                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
5274                                     int64_t &Addend) {
5275   ELFRefKind = AArch64MCExpr::VK_INVALID;
5276   DarwinRefKind = MCSymbolRefExpr::VK_None;
5277   Addend = 0;
5278 
5279   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5280     ELFRefKind = AE->getKind();
5281     Expr = AE->getSubExpr();
5282   }
5283 
5284   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5285   if (SE) {
5286     // It's a simple symbol reference with no addend.
5287     DarwinRefKind = SE->getKind();
5288     return true;
5289   }
5290 
5291   const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
5292   if (!BE)
5293     return false;
5294 
5295   SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
5296   if (!SE)
5297     return false;
5298   DarwinRefKind = SE->getKind();
5299 
5300   if (BE->getOpcode() != MCBinaryExpr::Add &&
5301       BE->getOpcode() != MCBinaryExpr::Sub)
5302     return false;
5303 
5304   // See if the addend is a constant, otherwise there's more going
5305   // on here than we can deal with.
5306   auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
5307   if (!AddendExpr)
5308     return false;
5309 
5310   Addend = AddendExpr->getValue();
5311   if (BE->getOpcode() == MCBinaryExpr::Sub)
5312     Addend = -Addend;
5313 
5314   // It's some symbol reference + a constant addend, but really
5315   // shouldn't use both Darwin and ELF syntax.
5316   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5317          DarwinRefKind == MCSymbolRefExpr::VK_None;
5318 }
5319 
5320 /// Force static initialization.
LLVMInitializeAArch64AsmParser()5321 extern "C" void LLVMInitializeAArch64AsmParser() {
5322   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5323   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5324   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5325 }
5326 
5327 #define GET_REGISTER_MATCHER
5328 #define GET_SUBTARGET_FEATURE_NAME
5329 #define GET_MATCHER_IMPLEMENTATION
5330 #define GET_MNEMONIC_SPELL_CHECKER
5331 #include "AArch64GenAsmMatcher.inc"
5332 
5333 // Define this matcher function after the auto-generated include so we
5334 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)5335 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5336                                                       unsigned Kind) {
5337   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5338   // If the kind is a token for a literal immediate, check if our asm
5339   // operand matches. This is for InstAliases which have a fixed-value
5340   // immediate in the syntax.
5341   int64_t ExpectedVal;
5342   switch (Kind) {
5343   default:
5344     return Match_InvalidOperand;
5345   case MCK__35_0:
5346     ExpectedVal = 0;
5347     break;
5348   case MCK__35_1:
5349     ExpectedVal = 1;
5350     break;
5351   case MCK__35_12:
5352     ExpectedVal = 12;
5353     break;
5354   case MCK__35_16:
5355     ExpectedVal = 16;
5356     break;
5357   case MCK__35_2:
5358     ExpectedVal = 2;
5359     break;
5360   case MCK__35_24:
5361     ExpectedVal = 24;
5362     break;
5363   case MCK__35_3:
5364     ExpectedVal = 3;
5365     break;
5366   case MCK__35_32:
5367     ExpectedVal = 32;
5368     break;
5369   case MCK__35_4:
5370     ExpectedVal = 4;
5371     break;
5372   case MCK__35_48:
5373     ExpectedVal = 48;
5374     break;
5375   case MCK__35_6:
5376     ExpectedVal = 6;
5377     break;
5378   case MCK__35_64:
5379     ExpectedVal = 64;
5380     break;
5381   case MCK__35_8:
5382     ExpectedVal = 8;
5383     break;
5384   }
5385   if (!Op.isImm())
5386     return Match_InvalidOperand;
5387   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5388   if (!CE)
5389     return Match_InvalidOperand;
5390   if (CE->getValue() == ExpectedVal)
5391     return Match_Success;
5392   return Match_InvalidOperand;
5393 }
5394 
5395 OperandMatchResultTy
tryParseGPRSeqPair(OperandVector & Operands)5396 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5397 
5398   SMLoc S = getLoc();
5399 
5400   if (getParser().getTok().isNot(AsmToken::Identifier)) {
5401     Error(S, "expected register");
5402     return MatchOperand_ParseFail;
5403   }
5404 
5405   unsigned FirstReg;
5406   OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5407   if (Res != MatchOperand_Success)
5408     return MatchOperand_ParseFail;
5409 
5410   const MCRegisterClass &WRegClass =
5411       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5412   const MCRegisterClass &XRegClass =
5413       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5414 
5415   bool isXReg = XRegClass.contains(FirstReg),
5416        isWReg = WRegClass.contains(FirstReg);
5417   if (!isXReg && !isWReg) {
5418     Error(S, "expected first even register of a "
5419              "consecutive same-size even/odd register pair");
5420     return MatchOperand_ParseFail;
5421   }
5422 
5423   const MCRegisterInfo *RI = getContext().getRegisterInfo();
5424   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5425 
5426   if (FirstEncoding & 0x1) {
5427     Error(S, "expected first even register of a "
5428              "consecutive same-size even/odd register pair");
5429     return MatchOperand_ParseFail;
5430   }
5431 
5432   if (getParser().getTok().isNot(AsmToken::Comma)) {
5433     Error(getLoc(), "expected comma");
5434     return MatchOperand_ParseFail;
5435   }
5436   // Eat the comma
5437   getParser().Lex();
5438 
5439   SMLoc E = getLoc();
5440   unsigned SecondReg;
5441   Res = tryParseScalarRegister(SecondReg);
5442   if (Res != MatchOperand_Success)
5443     return MatchOperand_ParseFail;
5444 
5445   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5446       (isXReg && !XRegClass.contains(SecondReg)) ||
5447       (isWReg && !WRegClass.contains(SecondReg))) {
5448     Error(E,"expected second odd register of a "
5449              "consecutive same-size even/odd register pair");
5450     return MatchOperand_ParseFail;
5451   }
5452 
5453   unsigned Pair = 0;
5454   if (isXReg) {
5455     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5456            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5457   } else {
5458     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5459            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5460   }
5461 
5462   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5463       getLoc(), getContext()));
5464 
5465   return MatchOperand_Success;
5466 }
5467 
5468 template <bool ParseShiftExtend, bool ParseSuffix>
5469 OperandMatchResultTy
tryParseSVEDataVector(OperandVector & Operands)5470 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5471   const SMLoc S = getLoc();
5472   // Check for a SVE vector register specifier first.
5473   unsigned RegNum;
5474   StringRef Kind;
5475 
5476   OperandMatchResultTy Res =
5477       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5478 
5479   if (Res != MatchOperand_Success)
5480     return Res;
5481 
5482   if (ParseSuffix && Kind.empty())
5483     return MatchOperand_NoMatch;
5484 
5485   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5486   if (!KindRes)
5487     return MatchOperand_NoMatch;
5488 
5489   unsigned ElementWidth = KindRes->second;
5490 
5491   // No shift/extend is the default.
5492   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5493     Operands.push_back(AArch64Operand::CreateVectorReg(
5494         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5495 
5496     OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5497     if (Res == MatchOperand_ParseFail)
5498       return MatchOperand_ParseFail;
5499     return MatchOperand_Success;
5500   }
5501 
5502   // Eat the comma
5503   getParser().Lex();
5504 
5505   // Match the shift
5506   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5507   Res = tryParseOptionalShiftExtend(ExtOpnd);
5508   if (Res != MatchOperand_Success)
5509     return Res;
5510 
5511   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5512   Operands.push_back(AArch64Operand::CreateVectorReg(
5513       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5514       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5515       Ext->hasShiftExtendAmount()));
5516 
5517   return MatchOperand_Success;
5518 }
5519 
5520 OperandMatchResultTy
tryParseSVEPattern(OperandVector & Operands)5521 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5522   MCAsmParser &Parser = getParser();
5523 
5524   SMLoc SS = getLoc();
5525   const AsmToken &TokE = Parser.getTok();
5526   bool IsHash = TokE.is(AsmToken::Hash);
5527 
5528   if (!IsHash && TokE.isNot(AsmToken::Identifier))
5529     return MatchOperand_NoMatch;
5530 
5531   int64_t Pattern;
5532   if (IsHash) {
5533     Parser.Lex(); // Eat hash
5534 
5535     // Parse the immediate operand.
5536     const MCExpr *ImmVal;
5537     SS = getLoc();
5538     if (Parser.parseExpression(ImmVal))
5539       return MatchOperand_ParseFail;
5540 
5541     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5542     if (!MCE)
5543       return MatchOperand_ParseFail;
5544 
5545     Pattern = MCE->getValue();
5546   } else {
5547     // Parse the pattern
5548     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5549     if (!Pat)
5550       return MatchOperand_NoMatch;
5551 
5552     Parser.Lex();
5553     Pattern = Pat->Encoding;
5554     assert(Pattern >= 0 && Pattern < 32);
5555   }
5556 
5557   Operands.push_back(
5558       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5559                                 SS, getLoc(), getContext()));
5560 
5561   return MatchOperand_Success;
5562 }
5563