• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AMDKernelCodeT.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUTargetStreamer.h"
13 #include "SIDefines.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "Utils/AMDKernelCodeTUtils.h"
16 #include "Utils/AMDGPUAsmUtils.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCInstrInfo.h"
27 #include "llvm/MC/MCParser/MCAsmLexer.h"
28 #include "llvm/MC/MCParser/MCAsmParser.h"
29 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
30 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
31 #include "llvm/MC/MCRegisterInfo.h"
32 #include "llvm/MC/MCStreamer.h"
33 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/MC/MCSymbolELF.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ELF.h"
37 #include "llvm/Support/SourceMgr.h"
38 #include "llvm/Support/TargetRegistry.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Support/MathExtras.h"
41 
42 using namespace llvm;
43 
44 namespace {
45 
46 struct OptionalOperand;
47 
48 enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
49 
50 class AMDGPUOperand : public MCParsedAsmOperand {
51   enum KindTy {
52     Token,
53     Immediate,
54     Register,
55     Expression
56   } Kind;
57 
58   SMLoc StartLoc, EndLoc;
59 
60 public:
AMDGPUOperand(enum KindTy K)61   AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
62 
63   typedef std::unique_ptr<AMDGPUOperand> Ptr;
64 
65   struct Modifiers {
66     bool Abs;
67     bool Neg;
68     bool Sext;
69 
hasFPModifiers__anonfb5739a90111::AMDGPUOperand::Modifiers70     bool hasFPModifiers() const { return Abs || Neg; }
hasIntModifiers__anonfb5739a90111::AMDGPUOperand::Modifiers71     bool hasIntModifiers() const { return Sext; }
hasModifiers__anonfb5739a90111::AMDGPUOperand::Modifiers72     bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
73 
getFPModifiersOperand__anonfb5739a90111::AMDGPUOperand::Modifiers74     int64_t getFPModifiersOperand() const {
75       int64_t Operand = 0;
76       Operand |= Abs ? SISrcMods::ABS : 0;
77       Operand |= Neg ? SISrcMods::NEG : 0;
78       return Operand;
79     }
80 
getIntModifiersOperand__anonfb5739a90111::AMDGPUOperand::Modifiers81     int64_t getIntModifiersOperand() const {
82       int64_t Operand = 0;
83       Operand |= Sext ? SISrcMods::SEXT : 0;
84       return Operand;
85     }
86 
getModifiersOperand__anonfb5739a90111::AMDGPUOperand::Modifiers87     int64_t getModifiersOperand() const {
88       assert(!(hasFPModifiers() && hasIntModifiers())
89            && "fp and int modifiers should not be used simultaneously");
90       if (hasFPModifiers()) {
91         return getFPModifiersOperand();
92       } else if (hasIntModifiers()) {
93         return getIntModifiersOperand();
94       } else {
95         return 0;
96       }
97     }
98 
99     friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
100   };
101 
102   enum ImmTy {
103     ImmTyNone,
104     ImmTyGDS,
105     ImmTyOffen,
106     ImmTyIdxen,
107     ImmTyAddr64,
108     ImmTyOffset,
109     ImmTyOffset0,
110     ImmTyOffset1,
111     ImmTyGLC,
112     ImmTySLC,
113     ImmTyTFE,
114     ImmTyClampSI,
115     ImmTyOModSI,
116     ImmTyDppCtrl,
117     ImmTyDppRowMask,
118     ImmTyDppBankMask,
119     ImmTyDppBoundCtrl,
120     ImmTySdwaDstSel,
121     ImmTySdwaSrc0Sel,
122     ImmTySdwaSrc1Sel,
123     ImmTySdwaDstUnused,
124     ImmTyDMask,
125     ImmTyUNorm,
126     ImmTyDA,
127     ImmTyR128,
128     ImmTyLWE,
129     ImmTyHwreg,
130     ImmTySendMsg,
131   };
132 
133   struct TokOp {
134     const char *Data;
135     unsigned Length;
136   };
137 
138   struct ImmOp {
139     bool IsFPImm;
140     ImmTy Type;
141     int64_t Val;
142     Modifiers Mods;
143   };
144 
145   struct RegOp {
146     unsigned RegNo;
147     Modifiers Mods;
148     const MCRegisterInfo *TRI;
149     const MCSubtargetInfo *STI;
150     bool IsForcedVOP3;
151   };
152 
153   union {
154     TokOp Tok;
155     ImmOp Imm;
156     RegOp Reg;
157     const MCExpr *Expr;
158   };
159 
isToken() const160   bool isToken() const override {
161     if (Kind == Token)
162       return true;
163 
164     if (Kind != Expression || !Expr)
165       return false;
166 
167     // When parsing operands, we can't always tell if something was meant to be
168     // a token, like 'gds', or an expression that references a global variable.
169     // In this case, we assume the string is an expression, and if we need to
170     // interpret is a token, then we treat the symbol name as the token.
171     return isa<MCSymbolRefExpr>(Expr);
172   }
173 
isImm() const174   bool isImm() const override {
175     return Kind == Immediate;
176   }
177 
isInlinableImm() const178   bool isInlinableImm() const {
179     if (!isImmTy(ImmTyNone)) {
180       // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
181       return false;
182     }
183     // TODO: We should avoid using host float here. It would be better to
184     // check the float bit values which is what a few other places do.
185     // We've had bot failures before due to weird NaN support on mips hosts.
186     const float F = BitsToFloat(Imm.Val);
187     // TODO: Add 1/(2*pi) for VI
188     return (Imm.Val <= 64 && Imm.Val >= -16) ||
189            (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
190            F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
191   }
192 
isRegKind() const193   bool isRegKind() const {
194     return Kind == Register;
195   }
196 
isReg() const197   bool isReg() const override {
198     return isRegKind() && !Reg.Mods.hasModifiers();
199   }
200 
isRegOrImmWithInputMods() const201   bool isRegOrImmWithInputMods() const {
202     return isRegKind() || isInlinableImm();
203   }
204 
isImmTy(ImmTy ImmT) const205   bool isImmTy(ImmTy ImmT) const {
206     return isImm() && Imm.Type == ImmT;
207   }
208 
isImmModifier() const209   bool isImmModifier() const {
210     return isImm() && Imm.Type != ImmTyNone;
211   }
212 
isClampSI() const213   bool isClampSI() const { return isImmTy(ImmTyClampSI); }
isOModSI() const214   bool isOModSI() const { return isImmTy(ImmTyOModSI); }
isDMask() const215   bool isDMask() const { return isImmTy(ImmTyDMask); }
isUNorm() const216   bool isUNorm() const { return isImmTy(ImmTyUNorm); }
isDA() const217   bool isDA() const { return isImmTy(ImmTyDA); }
isR128() const218   bool isR128() const { return isImmTy(ImmTyUNorm); }
isLWE() const219   bool isLWE() const { return isImmTy(ImmTyLWE); }
isOffen() const220   bool isOffen() const { return isImmTy(ImmTyOffen); }
isIdxen() const221   bool isIdxen() const { return isImmTy(ImmTyIdxen); }
isAddr64() const222   bool isAddr64() const { return isImmTy(ImmTyAddr64); }
isOffset() const223   bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
isOffset0() const224   bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
isOffset1() const225   bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
isGDS() const226   bool isGDS() const { return isImmTy(ImmTyGDS); }
isGLC() const227   bool isGLC() const { return isImmTy(ImmTyGLC); }
isSLC() const228   bool isSLC() const { return isImmTy(ImmTySLC); }
isTFE() const229   bool isTFE() const { return isImmTy(ImmTyTFE); }
isBankMask() const230   bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
isRowMask() const231   bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
isBoundCtrl() const232   bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
isSDWADstSel() const233   bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
isSDWASrc0Sel() const234   bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
isSDWASrc1Sel() const235   bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
isSDWADstUnused() const236   bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
237 
isMod() const238   bool isMod() const {
239     return isClampSI() || isOModSI();
240   }
241 
isRegOrImm() const242   bool isRegOrImm() const {
243     return isReg() || isImm();
244   }
245 
isRegClass(unsigned RCID) const246   bool isRegClass(unsigned RCID) const {
247     return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
248   }
249 
isSCSrc32() const250   bool isSCSrc32() const {
251     return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
252   }
253 
isSCSrc64() const254   bool isSCSrc64() const {
255     return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
256   }
257 
isSSrc32() const258   bool isSSrc32() const {
259     return isImm() || isSCSrc32() || isExpr();
260   }
261 
isSSrc64() const262   bool isSSrc64() const {
263     // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
264     // See isVSrc64().
265     return isImm() || isSCSrc64();
266   }
267 
isVCSrc32() const268   bool isVCSrc32() const {
269     return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
270   }
271 
isVCSrc64() const272   bool isVCSrc64() const {
273     return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
274   }
275 
isVSrc32() const276   bool isVSrc32() const {
277     return isImm() || isVCSrc32();
278   }
279 
isVSrc64() const280   bool isVSrc64() const {
281     // TODO: Check if the 64-bit value (coming from assembly source) can be
282     // narrowed to 32 bits (in the instruction stream). That require knowledge
283     // of instruction type (unsigned/signed, floating or "untyped"/B64),
284     // see [AMD GCN3 ISA 6.3.1].
285     // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
286     return isImm() || isVCSrc64();
287   }
288 
isMem() const289   bool isMem() const override {
290     return false;
291   }
292 
isExpr() const293   bool isExpr() const {
294     return Kind == Expression;
295   }
296 
isSoppBrTarget() const297   bool isSoppBrTarget() const {
298     return isExpr() || isImm();
299   }
300 
301   bool isSWaitCnt() const;
302   bool isHwreg() const;
303   bool isSendMsg() const;
304   bool isSMRDOffset() const;
305   bool isSMRDLiteralOffset() const;
306   bool isDPPCtrl() const;
307 
getExpressionAsToken() const308   StringRef getExpressionAsToken() const {
309     assert(isExpr());
310     const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
311     return S->getSymbol().getName();
312   }
313 
314 
getToken() const315   StringRef getToken() const {
316     assert(isToken());
317 
318     if (Kind == Expression)
319       return getExpressionAsToken();
320 
321     return StringRef(Tok.Data, Tok.Length);
322   }
323 
getImm() const324   int64_t getImm() const {
325     assert(isImm());
326     return Imm.Val;
327   }
328 
getImmTy() const329   enum ImmTy getImmTy() const {
330     assert(isImm());
331     return Imm.Type;
332   }
333 
getReg() const334   unsigned getReg() const override {
335     return Reg.RegNo;
336   }
337 
getStartLoc() const338   SMLoc getStartLoc() const override {
339     return StartLoc;
340   }
341 
getEndLoc() const342   SMLoc getEndLoc() const override {
343     return EndLoc;
344   }
345 
getModifiers() const346   Modifiers getModifiers() const {
347     assert(isRegKind() || isImmTy(ImmTyNone));
348     return isRegKind() ? Reg.Mods : Imm.Mods;
349   }
350 
setModifiers(Modifiers Mods)351   void setModifiers(Modifiers Mods) {
352     assert(isRegKind() || isImmTy(ImmTyNone));
353     if (isRegKind())
354       Reg.Mods = Mods;
355     else
356       Imm.Mods = Mods;
357   }
358 
hasModifiers() const359   bool hasModifiers() const {
360     return getModifiers().hasModifiers();
361   }
362 
hasFPModifiers() const363   bool hasFPModifiers() const {
364     return getModifiers().hasFPModifiers();
365   }
366 
hasIntModifiers() const367   bool hasIntModifiers() const {
368     return getModifiers().hasIntModifiers();
369   }
370 
addImmOperands(MCInst & Inst,unsigned N,bool ApplyModifiers=true) const371   void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
372     if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers()) {
373       // Apply modifiers to immediate value
374       int64_t Val = Imm.Val;
375       bool Negate = Imm.Mods.Neg; // Only negate can get here
376       if (Imm.IsFPImm) {
377         APFloat F(BitsToFloat(Val));
378         if (Negate) {
379           F.changeSign();
380         }
381         Val = F.bitcastToAPInt().getZExtValue();
382       } else {
383         Val = Negate ? -Val : Val;
384       }
385       Inst.addOperand(MCOperand::createImm(Val));
386     } else {
387       Inst.addOperand(MCOperand::createImm(getImm()));
388     }
389   }
390 
addRegOperands(MCInst & Inst,unsigned N) const391   void addRegOperands(MCInst &Inst, unsigned N) const {
392     Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
393   }
394 
addRegOrImmOperands(MCInst & Inst,unsigned N) const395   void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
396     if (isRegKind())
397       addRegOperands(Inst, N);
398     else if (isExpr())
399       Inst.addOperand(MCOperand::createExpr(Expr));
400     else
401       addImmOperands(Inst, N);
402   }
403 
addRegOrImmWithInputModsOperands(MCInst & Inst,unsigned N) const404   void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
405     Modifiers Mods = getModifiers();
406     Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
407     if (isRegKind()) {
408       addRegOperands(Inst, N);
409     } else {
410       addImmOperands(Inst, N, false);
411     }
412   }
413 
addRegOrImmWithFPInputModsOperands(MCInst & Inst,unsigned N) const414   void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
415     assert(!hasIntModifiers());
416     addRegOrImmWithInputModsOperands(Inst, N);
417   }
418 
addRegOrImmWithIntInputModsOperands(MCInst & Inst,unsigned N) const419   void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
420     assert(!hasFPModifiers());
421     addRegOrImmWithInputModsOperands(Inst, N);
422   }
423 
addSoppBrTargetOperands(MCInst & Inst,unsigned N) const424   void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
425     if (isImm())
426       addImmOperands(Inst, N);
427     else {
428       assert(isExpr());
429       Inst.addOperand(MCOperand::createExpr(Expr));
430     }
431   }
432 
printImmTy(raw_ostream & OS,ImmTy Type) const433   void printImmTy(raw_ostream& OS, ImmTy Type) const {
434     switch (Type) {
435     case ImmTyNone: OS << "None"; break;
436     case ImmTyGDS: OS << "GDS"; break;
437     case ImmTyOffen: OS << "Offen"; break;
438     case ImmTyIdxen: OS << "Idxen"; break;
439     case ImmTyAddr64: OS << "Addr64"; break;
440     case ImmTyOffset: OS << "Offset"; break;
441     case ImmTyOffset0: OS << "Offset0"; break;
442     case ImmTyOffset1: OS << "Offset1"; break;
443     case ImmTyGLC: OS << "GLC"; break;
444     case ImmTySLC: OS << "SLC"; break;
445     case ImmTyTFE: OS << "TFE"; break;
446     case ImmTyClampSI: OS << "ClampSI"; break;
447     case ImmTyOModSI: OS << "OModSI"; break;
448     case ImmTyDppCtrl: OS << "DppCtrl"; break;
449     case ImmTyDppRowMask: OS << "DppRowMask"; break;
450     case ImmTyDppBankMask: OS << "DppBankMask"; break;
451     case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
452     case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
453     case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
454     case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
455     case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
456     case ImmTyDMask: OS << "DMask"; break;
457     case ImmTyUNorm: OS << "UNorm"; break;
458     case ImmTyDA: OS << "DA"; break;
459     case ImmTyR128: OS << "R128"; break;
460     case ImmTyLWE: OS << "LWE"; break;
461     case ImmTyHwreg: OS << "Hwreg"; break;
462     case ImmTySendMsg: OS << "SendMsg"; break;
463     }
464   }
465 
print(raw_ostream & OS) const466   void print(raw_ostream &OS) const override {
467     switch (Kind) {
468     case Register:
469       OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
470       break;
471     case Immediate:
472       OS << '<' << getImm();
473       if (getImmTy() != ImmTyNone) {
474         OS << " type: "; printImmTy(OS, getImmTy());
475       }
476       OS << " mods: " << Imm.Mods << '>';
477       break;
478     case Token:
479       OS << '\'' << getToken() << '\'';
480       break;
481     case Expression:
482       OS << "<expr " << *Expr << '>';
483       break;
484     }
485   }
486 
CreateImm(int64_t Val,SMLoc Loc,enum ImmTy Type=ImmTyNone,bool IsFPImm=false)487   static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
488                                       enum ImmTy Type = ImmTyNone,
489                                       bool IsFPImm = false) {
490     auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
491     Op->Imm.Val = Val;
492     Op->Imm.IsFPImm = IsFPImm;
493     Op->Imm.Type = Type;
494     Op->Imm.Mods = {false, false, false};
495     Op->StartLoc = Loc;
496     Op->EndLoc = Loc;
497     return Op;
498   }
499 
CreateToken(StringRef Str,SMLoc Loc,bool HasExplicitEncodingSize=true)500   static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
501                                         bool HasExplicitEncodingSize = true) {
502     auto Res = llvm::make_unique<AMDGPUOperand>(Token);
503     Res->Tok.Data = Str.data();
504     Res->Tok.Length = Str.size();
505     Res->StartLoc = Loc;
506     Res->EndLoc = Loc;
507     return Res;
508   }
509 
CreateReg(unsigned RegNo,SMLoc S,SMLoc E,const MCRegisterInfo * TRI,const MCSubtargetInfo * STI,bool ForceVOP3)510   static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
511                                       SMLoc E,
512                                       const MCRegisterInfo *TRI,
513                                       const MCSubtargetInfo *STI,
514                                       bool ForceVOP3) {
515     auto Op = llvm::make_unique<AMDGPUOperand>(Register);
516     Op->Reg.RegNo = RegNo;
517     Op->Reg.TRI = TRI;
518     Op->Reg.STI = STI;
519     Op->Reg.Mods = {false, false, false};
520     Op->Reg.IsForcedVOP3 = ForceVOP3;
521     Op->StartLoc = S;
522     Op->EndLoc = E;
523     return Op;
524   }
525 
CreateExpr(const class MCExpr * Expr,SMLoc S)526   static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
527     auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
528     Op->Expr = Expr;
529     Op->StartLoc = S;
530     Op->EndLoc = S;
531     return Op;
532   }
533 };
534 
operator <<(raw_ostream & OS,AMDGPUOperand::Modifiers Mods)535 raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
536   OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
537   return OS;
538 }
539 
540 class AMDGPUAsmParser : public MCTargetAsmParser {
541   const MCInstrInfo &MII;
542   MCAsmParser &Parser;
543 
544   unsigned ForcedEncodingSize;
545   bool ForcedDPP;
546   bool ForcedSDWA;
547 
isSI() const548   bool isSI() const {
549     return AMDGPU::isSI(getSTI());
550   }
551 
isCI() const552   bool isCI() const {
553     return AMDGPU::isCI(getSTI());
554   }
555 
isVI() const556   bool isVI() const {
557     return AMDGPU::isVI(getSTI());
558   }
559 
hasSGPR102_SGPR103() const560   bool hasSGPR102_SGPR103() const {
561     return !isVI();
562   }
563 
564   /// @name Auto-generated Match Functions
565   /// {
566 
567 #define GET_ASSEMBLER_HEADER
568 #include "AMDGPUGenAsmMatcher.inc"
569 
570   /// }
571 
572 private:
573   bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
574   bool ParseDirectiveHSACodeObjectVersion();
575   bool ParseDirectiveHSACodeObjectISA();
576   bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
577   bool ParseDirectiveAMDKernelCodeT();
578   bool ParseSectionDirectiveHSAText();
579   bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
580   bool ParseDirectiveAMDGPUHsaKernel();
581   bool ParseDirectiveAMDGPUHsaModuleGlobal();
582   bool ParseDirectiveAMDGPUHsaProgramGlobal();
583   bool ParseSectionDirectiveHSADataGlobalAgent();
584   bool ParseSectionDirectiveHSADataGlobalProgram();
585   bool ParseSectionDirectiveHSARodataReadonlyAgent();
586   bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
587   bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
588   void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
589 
590 public:
591   enum AMDGPUMatchResultTy {
592     Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
593   };
594 
AMDGPUAsmParser(const MCSubtargetInfo & STI,MCAsmParser & _Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)595   AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
596                const MCInstrInfo &MII,
597                const MCTargetOptions &Options)
598       : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
599         ForcedEncodingSize(0),
600         ForcedDPP(false),
601         ForcedSDWA(false) {
602     MCAsmParserExtension::Initialize(Parser);
603 
604     if (getSTI().getFeatureBits().none()) {
605       // Set default features.
606       copySTI().ToggleFeature("SOUTHERN_ISLANDS");
607     }
608 
609     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
610 
611     {
612       // TODO: make those pre-defined variables read-only.
613       // Currently there is none suitable machinery in the core llvm-mc for this.
614       // MCSymbol::isRedefinable is intended for another purpose, and
615       // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
616       AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
617       MCContext &Ctx = getContext();
618       MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
619       Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
620       Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
621       Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
622       Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
623       Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
624     }
625   }
626 
getTargetStreamer()627   AMDGPUTargetStreamer &getTargetStreamer() {
628     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
629     return static_cast<AMDGPUTargetStreamer &>(TS);
630   }
631 
setForcedEncodingSize(unsigned Size)632   void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
setForcedDPP(bool ForceDPP_)633   void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
setForcedSDWA(bool ForceSDWA_)634   void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
635 
getForcedEncodingSize() const636   unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
isForcedVOP3() const637   bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
isForcedDPP() const638   bool isForcedDPP() const { return ForcedDPP; }
isForcedSDWA() const639   bool isForcedSDWA() const { return ForcedSDWA; }
640 
641   std::unique_ptr<AMDGPUOperand> parseRegister();
642   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
643   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
644   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
645                                       unsigned Kind) override;
646   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
647                                OperandVector &Operands, MCStreamer &Out,
648                                uint64_t &ErrorInfo,
649                                bool MatchingInlineAsm) override;
650   bool ParseDirective(AsmToken DirectiveID) override;
651   OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
652   StringRef parseMnemonicSuffix(StringRef Name);
653   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
654                         SMLoc NameLoc, OperandVector &Operands) override;
655 
656   OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
657   OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
658                                           OperandVector &Operands,
659                                           enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
660                                           bool (*ConvertResult)(int64_t&) = 0);
661   OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
662                                      enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
663   OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
664 
665   OperandMatchResultTy parseImm(OperandVector &Operands);
666   OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
667   OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
668   OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
669 
670   void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
671   void cvtDS(MCInst &Inst, const OperandVector &Operands);
672 
673   bool parseCnt(int64_t &IntVal);
674   OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
675   OperandMatchResultTy parseHwreg(OperandVector &Operands);
676 
677 private:
678   struct OperandInfoTy {
679     int64_t Id;
680     bool IsSymbolic;
OperandInfoTy__anonfb5739a90111::AMDGPUAsmParser::OperandInfoTy681     OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
682   };
683 
684   bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
685   bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
686 public:
687   OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
688 
689   OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
690   OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
691 
cvtMubuf(MCInst & Inst,const OperandVector & Operands)692   void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
cvtMubufAtomic(MCInst & Inst,const OperandVector & Operands)693   void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
cvtMubufAtomicReturn(MCInst & Inst,const OperandVector & Operands)694   void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
695   AMDGPUOperand::Ptr defaultGLC() const;
696   AMDGPUOperand::Ptr defaultSLC() const;
697   AMDGPUOperand::Ptr defaultTFE() const;
698 
699   AMDGPUOperand::Ptr defaultDMask() const;
700   AMDGPUOperand::Ptr defaultUNorm() const;
701   AMDGPUOperand::Ptr defaultDA() const;
702   AMDGPUOperand::Ptr defaultR128() const;
703   AMDGPUOperand::Ptr defaultLWE() const;
704   AMDGPUOperand::Ptr defaultSMRDOffset() const;
705   AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
706 
707   OperandMatchResultTy parseOModOperand(OperandVector &Operands);
708 
709   void cvtId(MCInst &Inst, const OperandVector &Operands);
710   void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
711   void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
712 
713   void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
714   void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
715 
716   OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
717   AMDGPUOperand::Ptr defaultRowMask() const;
718   AMDGPUOperand::Ptr defaultBankMask() const;
719   AMDGPUOperand::Ptr defaultBoundCtrl() const;
720   void cvtDPP(MCInst &Inst, const OperandVector &Operands);
721 
722   OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
723                                     AMDGPUOperand::ImmTy Type);
724   OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
725   void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
726   void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
727   void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
728   void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
729                uint64_t BasicInstType);
730 };
731 
732 struct OptionalOperand {
733   const char *Name;
734   AMDGPUOperand::ImmTy Type;
735   bool IsBit;
736   bool (*ConvertResult)(int64_t&);
737 };
738 
739 }
740 
getRegClass(RegisterKind Is,unsigned RegWidth)741 static int getRegClass(RegisterKind Is, unsigned RegWidth) {
742   if (Is == IS_VGPR) {
743     switch (RegWidth) {
744       default: return -1;
745       case 1: return AMDGPU::VGPR_32RegClassID;
746       case 2: return AMDGPU::VReg_64RegClassID;
747       case 3: return AMDGPU::VReg_96RegClassID;
748       case 4: return AMDGPU::VReg_128RegClassID;
749       case 8: return AMDGPU::VReg_256RegClassID;
750       case 16: return AMDGPU::VReg_512RegClassID;
751     }
752   } else if (Is == IS_TTMP) {
753     switch (RegWidth) {
754       default: return -1;
755       case 1: return AMDGPU::TTMP_32RegClassID;
756       case 2: return AMDGPU::TTMP_64RegClassID;
757       case 4: return AMDGPU::TTMP_128RegClassID;
758     }
759   } else if (Is == IS_SGPR) {
760     switch (RegWidth) {
761       default: return -1;
762       case 1: return AMDGPU::SGPR_32RegClassID;
763       case 2: return AMDGPU::SGPR_64RegClassID;
764       case 4: return AMDGPU::SGPR_128RegClassID;
765       case 8: return AMDGPU::SReg_256RegClassID;
766       case 16: return AMDGPU::SReg_512RegClassID;
767     }
768   }
769   return -1;
770 }
771 
getSpecialRegForName(StringRef RegName)772 static unsigned getSpecialRegForName(StringRef RegName) {
773   return StringSwitch<unsigned>(RegName)
774     .Case("exec", AMDGPU::EXEC)
775     .Case("vcc", AMDGPU::VCC)
776     .Case("flat_scratch", AMDGPU::FLAT_SCR)
777     .Case("m0", AMDGPU::M0)
778     .Case("scc", AMDGPU::SCC)
779     .Case("tba", AMDGPU::TBA)
780     .Case("tma", AMDGPU::TMA)
781     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
782     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
783     .Case("vcc_lo", AMDGPU::VCC_LO)
784     .Case("vcc_hi", AMDGPU::VCC_HI)
785     .Case("exec_lo", AMDGPU::EXEC_LO)
786     .Case("exec_hi", AMDGPU::EXEC_HI)
787     .Case("tma_lo", AMDGPU::TMA_LO)
788     .Case("tma_hi", AMDGPU::TMA_HI)
789     .Case("tba_lo", AMDGPU::TBA_LO)
790     .Case("tba_hi", AMDGPU::TBA_HI)
791     .Default(0);
792 }
793 
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)794 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
795   auto R = parseRegister();
796   if (!R) return true;
797   assert(R->isReg());
798   RegNo = R->getReg();
799   StartLoc = R->getStartLoc();
800   EndLoc = R->getEndLoc();
801   return false;
802 }
803 
AddNextRegisterToList(unsigned & Reg,unsigned & RegWidth,RegisterKind RegKind,unsigned Reg1,unsigned RegNum)804 bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
805 {
806   switch (RegKind) {
807   case IS_SPECIAL:
808     if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
809     if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
810     if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
811     if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
812     if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
813     return false;
814   case IS_VGPR:
815   case IS_SGPR:
816   case IS_TTMP:
817     if (Reg1 != Reg + RegWidth) { return false; }
818     RegWidth++;
819     return true;
820   default:
821     assert(false); return false;
822   }
823 }
824 
ParseAMDGPURegister(RegisterKind & RegKind,unsigned & Reg,unsigned & RegNum,unsigned & RegWidth)825 bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
826 {
827   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
828   if (getLexer().is(AsmToken::Identifier)) {
829     StringRef RegName = Parser.getTok().getString();
830     if ((Reg = getSpecialRegForName(RegName))) {
831       Parser.Lex();
832       RegKind = IS_SPECIAL;
833     } else {
834       unsigned RegNumIndex = 0;
835       if (RegName[0] == 'v') {
836         RegNumIndex = 1;
837         RegKind = IS_VGPR;
838       } else if (RegName[0] == 's') {
839         RegNumIndex = 1;
840         RegKind = IS_SGPR;
841       } else if (RegName.startswith("ttmp")) {
842         RegNumIndex = strlen("ttmp");
843         RegKind = IS_TTMP;
844       } else {
845         return false;
846       }
847       if (RegName.size() > RegNumIndex) {
848         // Single 32-bit register: vXX.
849         if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
850           return false;
851         Parser.Lex();
852         RegWidth = 1;
853       } else {
854         // Range of registers: v[XX:YY]. ":YY" is optional.
855         Parser.Lex();
856         int64_t RegLo, RegHi;
857         if (getLexer().isNot(AsmToken::LBrac))
858           return false;
859         Parser.Lex();
860 
861         if (getParser().parseAbsoluteExpression(RegLo))
862           return false;
863 
864         const bool isRBrace = getLexer().is(AsmToken::RBrac);
865         if (!isRBrace && getLexer().isNot(AsmToken::Colon))
866           return false;
867         Parser.Lex();
868 
869         if (isRBrace) {
870           RegHi = RegLo;
871         } else {
872           if (getParser().parseAbsoluteExpression(RegHi))
873             return false;
874 
875           if (getLexer().isNot(AsmToken::RBrac))
876             return false;
877           Parser.Lex();
878         }
879         RegNum = (unsigned) RegLo;
880         RegWidth = (RegHi - RegLo) + 1;
881       }
882     }
883   } else if (getLexer().is(AsmToken::LBrac)) {
884     // List of consecutive registers: [s0,s1,s2,s3]
885     Parser.Lex();
886     if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
887       return false;
888     if (RegWidth != 1)
889       return false;
890     RegisterKind RegKind1;
891     unsigned Reg1, RegNum1, RegWidth1;
892     do {
893       if (getLexer().is(AsmToken::Comma)) {
894         Parser.Lex();
895       } else if (getLexer().is(AsmToken::RBrac)) {
896         Parser.Lex();
897         break;
898       } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
899         if (RegWidth1 != 1) {
900           return false;
901         }
902         if (RegKind1 != RegKind) {
903           return false;
904         }
905         if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
906           return false;
907         }
908       } else {
909         return false;
910       }
911     } while (true);
912   } else {
913     return false;
914   }
915   switch (RegKind) {
916   case IS_SPECIAL:
917     RegNum = 0;
918     RegWidth = 1;
919     break;
920   case IS_VGPR:
921   case IS_SGPR:
922   case IS_TTMP:
923   {
924     unsigned Size = 1;
925     if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
926       // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
927       Size = std::min(RegWidth, 4u);
928     }
929     if (RegNum % Size != 0)
930       return false;
931     RegNum = RegNum / Size;
932     int RCID = getRegClass(RegKind, RegWidth);
933     if (RCID == -1)
934       return false;
935     const MCRegisterClass RC = TRI->getRegClass(RCID);
936     if (RegNum >= RC.getNumRegs())
937       return false;
938     Reg = RC.getRegister(RegNum);
939     break;
940   }
941 
942   default:
943     assert(false); return false;
944   }
945 
946   if (!subtargetHasRegister(*TRI, Reg))
947     return false;
948   return true;
949 }
950 
parseRegister()951 std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
952   const auto &Tok = Parser.getTok();
953   SMLoc StartLoc = Tok.getLoc();
954   SMLoc EndLoc = Tok.getEndLoc();
955   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
956 
957   RegisterKind RegKind;
958   unsigned Reg, RegNum, RegWidth;
959 
960   if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
961     return nullptr;
962   }
963   return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
964                                   TRI, &getSTI(), false);
965 }
966 
967 AMDGPUAsmParser::OperandMatchResultTy
parseImm(OperandVector & Operands)968 AMDGPUAsmParser::parseImm(OperandVector &Operands) {
969   bool Minus = false;
970   if (getLexer().getKind() == AsmToken::Minus) {
971     Minus = true;
972     Parser.Lex();
973   }
974 
975   SMLoc S = Parser.getTok().getLoc();
976   switch(getLexer().getKind()) {
977   case AsmToken::Integer: {
978     int64_t IntVal;
979     if (getParser().parseAbsoluteExpression(IntVal))
980       return MatchOperand_ParseFail;
981     if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
982       Error(S, "invalid immediate: only 32-bit values are legal");
983       return MatchOperand_ParseFail;
984     }
985 
986     if (Minus)
987       IntVal *= -1;
988     Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
989     return MatchOperand_Success;
990   }
991   case AsmToken::Real: {
992     // FIXME: We should emit an error if a double precisions floating-point
993     // value is used.  I'm not sure the best way to detect this.
994     int64_t IntVal;
995     if (getParser().parseAbsoluteExpression(IntVal))
996       return MatchOperand_ParseFail;
997 
998     APFloat F((float)BitsToDouble(IntVal));
999     if (Minus)
1000       F.changeSign();
1001     Operands.push_back(
1002         AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
1003                                  AMDGPUOperand::ImmTyNone, true));
1004     return MatchOperand_Success;
1005   }
1006   default:
1007     return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1008   }
1009 }
1010 
1011 AMDGPUAsmParser::OperandMatchResultTy
parseRegOrImm(OperandVector & Operands)1012 AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1013   auto res = parseImm(Operands);
1014   if (res != MatchOperand_NoMatch) {
1015     return res;
1016   }
1017 
1018   if (auto R = parseRegister()) {
1019     assert(R->isReg());
1020     R->Reg.IsForcedVOP3 = isForcedVOP3();
1021     Operands.push_back(std::move(R));
1022     return MatchOperand_Success;
1023   }
1024   return MatchOperand_ParseFail;
1025 }
1026 
1027 AMDGPUAsmParser::OperandMatchResultTy
parseRegOrImmWithFPInputMods(OperandVector & Operands)1028 AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
1029   // XXX: During parsing we can't determine if minus sign means
1030   // negate-modifier or negative immediate value.
1031   // By default we suppose it is modifier.
1032   bool Negate = false, Abs = false, Abs2 = false;
1033 
1034   if (getLexer().getKind()== AsmToken::Minus) {
1035     Parser.Lex();
1036     Negate = true;
1037   }
1038 
1039   if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1040     Parser.Lex();
1041     Abs2 = true;
1042     if (getLexer().isNot(AsmToken::LParen)) {
1043       Error(Parser.getTok().getLoc(), "expected left paren after abs");
1044       return MatchOperand_ParseFail;
1045     }
1046     Parser.Lex();
1047   }
1048 
1049   if (getLexer().getKind() == AsmToken::Pipe) {
1050     if (Abs2) {
1051       Error(Parser.getTok().getLoc(), "expected register or immediate");
1052       return MatchOperand_ParseFail;
1053     }
1054     Parser.Lex();
1055     Abs = true;
1056   }
1057 
1058   auto Res = parseRegOrImm(Operands);
1059   if (Res != MatchOperand_Success) {
1060     return Res;
1061   }
1062 
1063   AMDGPUOperand::Modifiers Mods = {false, false, false};
1064   if (Negate) {
1065     Mods.Neg = true;
1066   }
1067   if (Abs) {
1068     if (getLexer().getKind() != AsmToken::Pipe) {
1069       Error(Parser.getTok().getLoc(), "expected vertical bar");
1070       return MatchOperand_ParseFail;
1071     }
1072     Parser.Lex();
1073     Mods.Abs = true;
1074   }
1075   if (Abs2) {
1076     if (getLexer().isNot(AsmToken::RParen)) {
1077       Error(Parser.getTok().getLoc(), "expected closing parentheses");
1078       return MatchOperand_ParseFail;
1079     }
1080     Parser.Lex();
1081     Mods.Abs = true;
1082   }
1083 
1084   if (Mods.hasFPModifiers()) {
1085     AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1086     Op.setModifiers(Mods);
1087   }
1088   return MatchOperand_Success;
1089 }
1090 
1091 AMDGPUAsmParser::OperandMatchResultTy
parseRegOrImmWithIntInputMods(OperandVector & Operands)1092 AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1093   bool Sext = false;
1094 
1095   if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1096     Parser.Lex();
1097     Sext = true;
1098     if (getLexer().isNot(AsmToken::LParen)) {
1099       Error(Parser.getTok().getLoc(), "expected left paren after sext");
1100       return MatchOperand_ParseFail;
1101     }
1102     Parser.Lex();
1103   }
1104 
1105   auto Res = parseRegOrImm(Operands);
1106   if (Res != MatchOperand_Success) {
1107     return Res;
1108   }
1109 
1110   AMDGPUOperand::Modifiers Mods = {false, false, false};
1111   if (Sext) {
1112     if (getLexer().isNot(AsmToken::RParen)) {
1113       Error(Parser.getTok().getLoc(), "expected closing parentheses");
1114       return MatchOperand_ParseFail;
1115     }
1116     Parser.Lex();
1117     Mods.Sext = true;
1118   }
1119 
1120   if (Mods.hasIntModifiers()) {
1121     AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1122     Op.setModifiers(Mods);
1123   }
1124   return MatchOperand_Success;
1125 }
1126 
checkTargetMatchPredicate(MCInst & Inst)1127 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1128 
1129   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1130 
1131   if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
1132       (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1133       (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1134       (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
1135     return Match_InvalidOperand;
1136 
1137   if ((TSFlags & SIInstrFlags::VOP3) &&
1138       (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1139       getForcedEncodingSize() != 64)
1140     return Match_PreferE32;
1141 
1142   return Match_Success;
1143 }
1144 
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)1145 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1146                                               OperandVector &Operands,
1147                                               MCStreamer &Out,
1148                                               uint64_t &ErrorInfo,
1149                                               bool MatchingInlineAsm) {
1150   MCInst Inst;
1151 
1152   switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
1153     default: break;
1154     case Match_Success:
1155       Inst.setLoc(IDLoc);
1156       Out.EmitInstruction(Inst, getSTI());
1157       return false;
1158     case Match_MissingFeature:
1159       return Error(IDLoc, "instruction not supported on this GPU");
1160 
1161     case Match_MnemonicFail:
1162       return Error(IDLoc, "unrecognized instruction mnemonic");
1163 
1164     case Match_InvalidOperand: {
1165       SMLoc ErrorLoc = IDLoc;
1166       if (ErrorInfo != ~0ULL) {
1167         if (ErrorInfo >= Operands.size()) {
1168           return Error(IDLoc, "too few operands for instruction");
1169         }
1170         ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1171         if (ErrorLoc == SMLoc())
1172           ErrorLoc = IDLoc;
1173       }
1174       return Error(ErrorLoc, "invalid operand for instruction");
1175     }
1176     case Match_PreferE32:
1177       return Error(IDLoc, "internal error: instruction without _e64 suffix "
1178                           "should be encoded as e32");
1179   }
1180   llvm_unreachable("Implement any new match types added!");
1181 }
1182 
ParseDirectiveMajorMinor(uint32_t & Major,uint32_t & Minor)1183 bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1184                                                uint32_t &Minor) {
1185   if (getLexer().isNot(AsmToken::Integer))
1186     return TokError("invalid major version");
1187 
1188   Major = getLexer().getTok().getIntVal();
1189   Lex();
1190 
1191   if (getLexer().isNot(AsmToken::Comma))
1192     return TokError("minor version number required, comma expected");
1193   Lex();
1194 
1195   if (getLexer().isNot(AsmToken::Integer))
1196     return TokError("invalid minor version");
1197 
1198   Minor = getLexer().getTok().getIntVal();
1199   Lex();
1200 
1201   return false;
1202 }
1203 
ParseDirectiveHSACodeObjectVersion()1204 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1205 
1206   uint32_t Major;
1207   uint32_t Minor;
1208 
1209   if (ParseDirectiveMajorMinor(Major, Minor))
1210     return true;
1211 
1212   getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1213   return false;
1214 }
1215 
ParseDirectiveHSACodeObjectISA()1216 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1217 
1218   uint32_t Major;
1219   uint32_t Minor;
1220   uint32_t Stepping;
1221   StringRef VendorName;
1222   StringRef ArchName;
1223 
1224   // If this directive has no arguments, then use the ISA version for the
1225   // targeted GPU.
1226   if (getLexer().is(AsmToken::EndOfStatement)) {
1227     AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
1228     getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1229                                                       Isa.Stepping,
1230                                                       "AMD", "AMDGPU");
1231     return false;
1232   }
1233 
1234 
1235   if (ParseDirectiveMajorMinor(Major, Minor))
1236     return true;
1237 
1238   if (getLexer().isNot(AsmToken::Comma))
1239     return TokError("stepping version number required, comma expected");
1240   Lex();
1241 
1242   if (getLexer().isNot(AsmToken::Integer))
1243     return TokError("invalid stepping version");
1244 
1245   Stepping = getLexer().getTok().getIntVal();
1246   Lex();
1247 
1248   if (getLexer().isNot(AsmToken::Comma))
1249     return TokError("vendor name required, comma expected");
1250   Lex();
1251 
1252   if (getLexer().isNot(AsmToken::String))
1253     return TokError("invalid vendor name");
1254 
1255   VendorName = getLexer().getTok().getStringContents();
1256   Lex();
1257 
1258   if (getLexer().isNot(AsmToken::Comma))
1259     return TokError("arch name required, comma expected");
1260   Lex();
1261 
1262   if (getLexer().isNot(AsmToken::String))
1263     return TokError("invalid arch name");
1264 
1265   ArchName = getLexer().getTok().getStringContents();
1266   Lex();
1267 
1268   getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1269                                                     VendorName, ArchName);
1270   return false;
1271 }
1272 
ParseAMDKernelCodeTValue(StringRef ID,amd_kernel_code_t & Header)1273 bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1274                                                amd_kernel_code_t &Header) {
1275   SmallString<40> ErrStr;
1276   raw_svector_ostream Err(ErrStr);
1277   if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
1278     return TokError(Err.str());
1279   }
1280   Lex();
1281   return false;
1282 }
1283 
ParseDirectiveAMDKernelCodeT()1284 bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1285 
1286   amd_kernel_code_t Header;
1287   AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
1288 
1289   while (true) {
1290 
1291     // Lex EndOfStatement.  This is in a while loop, because lexing a comment
1292     // will set the current token to EndOfStatement.
1293     while(getLexer().is(AsmToken::EndOfStatement))
1294       Lex();
1295 
1296     if (getLexer().isNot(AsmToken::Identifier))
1297       return TokError("expected value identifier or .end_amd_kernel_code_t");
1298 
1299     StringRef ID = getLexer().getTok().getIdentifier();
1300     Lex();
1301 
1302     if (ID == ".end_amd_kernel_code_t")
1303       break;
1304 
1305     if (ParseAMDKernelCodeTValue(ID, Header))
1306       return true;
1307   }
1308 
1309   getTargetStreamer().EmitAMDKernelCodeT(Header);
1310 
1311   return false;
1312 }
1313 
ParseSectionDirectiveHSAText()1314 bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1315   getParser().getStreamer().SwitchSection(
1316       AMDGPU::getHSATextSection(getContext()));
1317   return false;
1318 }
1319 
ParseDirectiveAMDGPUHsaKernel()1320 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1321   if (getLexer().isNot(AsmToken::Identifier))
1322     return TokError("expected symbol name");
1323 
1324   StringRef KernelName = Parser.getTok().getString();
1325 
1326   getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1327                                            ELF::STT_AMDGPU_HSA_KERNEL);
1328   Lex();
1329   return false;
1330 }
1331 
ParseDirectiveAMDGPUHsaModuleGlobal()1332 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1333   if (getLexer().isNot(AsmToken::Identifier))
1334     return TokError("expected symbol name");
1335 
1336   StringRef GlobalName = Parser.getTok().getIdentifier();
1337 
1338   getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1339   Lex();
1340   return false;
1341 }
1342 
ParseDirectiveAMDGPUHsaProgramGlobal()1343 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1344   if (getLexer().isNot(AsmToken::Identifier))
1345     return TokError("expected symbol name");
1346 
1347   StringRef GlobalName = Parser.getTok().getIdentifier();
1348 
1349   getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1350   Lex();
1351   return false;
1352 }
1353 
ParseSectionDirectiveHSADataGlobalAgent()1354 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1355   getParser().getStreamer().SwitchSection(
1356       AMDGPU::getHSADataGlobalAgentSection(getContext()));
1357   return false;
1358 }
1359 
ParseSectionDirectiveHSADataGlobalProgram()1360 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1361   getParser().getStreamer().SwitchSection(
1362       AMDGPU::getHSADataGlobalProgramSection(getContext()));
1363   return false;
1364 }
1365 
ParseSectionDirectiveHSARodataReadonlyAgent()1366 bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1367   getParser().getStreamer().SwitchSection(
1368       AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1369   return false;
1370 }
1371 
ParseDirective(AsmToken DirectiveID)1372 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
1373   StringRef IDVal = DirectiveID.getString();
1374 
1375   if (IDVal == ".hsa_code_object_version")
1376     return ParseDirectiveHSACodeObjectVersion();
1377 
1378   if (IDVal == ".hsa_code_object_isa")
1379     return ParseDirectiveHSACodeObjectISA();
1380 
1381   if (IDVal == ".amd_kernel_code_t")
1382     return ParseDirectiveAMDKernelCodeT();
1383 
1384   if (IDVal == ".hsatext")
1385     return ParseSectionDirectiveHSAText();
1386 
1387   if (IDVal == ".amdgpu_hsa_kernel")
1388     return ParseDirectiveAMDGPUHsaKernel();
1389 
1390   if (IDVal == ".amdgpu_hsa_module_global")
1391     return ParseDirectiveAMDGPUHsaModuleGlobal();
1392 
1393   if (IDVal == ".amdgpu_hsa_program_global")
1394     return ParseDirectiveAMDGPUHsaProgramGlobal();
1395 
1396   if (IDVal == ".hsadata_global_agent")
1397     return ParseSectionDirectiveHSADataGlobalAgent();
1398 
1399   if (IDVal == ".hsadata_global_program")
1400     return ParseSectionDirectiveHSADataGlobalProgram();
1401 
1402   if (IDVal == ".hsarodata_readonly_agent")
1403     return ParseSectionDirectiveHSARodataReadonlyAgent();
1404 
1405   return true;
1406 }
1407 
subtargetHasRegister(const MCRegisterInfo & MRI,unsigned RegNo) const1408 bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1409                                            unsigned RegNo) const {
1410   if (isCI())
1411     return true;
1412 
1413   if (isSI()) {
1414     // No flat_scr
1415     switch (RegNo) {
1416     case AMDGPU::FLAT_SCR:
1417     case AMDGPU::FLAT_SCR_LO:
1418     case AMDGPU::FLAT_SCR_HI:
1419       return false;
1420     default:
1421       return true;
1422     }
1423   }
1424 
1425   // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1426   // SI/CI have.
1427   for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1428        R.isValid(); ++R) {
1429     if (*R == RegNo)
1430       return false;
1431   }
1432 
1433   return true;
1434 }
1435 
1436 AMDGPUAsmParser::OperandMatchResultTy
parseOperand(OperandVector & Operands,StringRef Mnemonic)1437 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1438 
1439   // Try to parse with a custom parser
1440   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1441 
1442   // If we successfully parsed the operand or if there as an error parsing,
1443   // we are done.
1444   //
1445   // If we are parsing after we reach EndOfStatement then this means we
1446   // are appending default values to the Operands list.  This is only done
1447   // by custom parser, so we shouldn't continue on to the generic parsing.
1448   if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
1449       getLexer().is(AsmToken::EndOfStatement))
1450     return ResTy;
1451 
1452   ResTy = parseRegOrImm(Operands);
1453 
1454   if (ResTy == MatchOperand_Success)
1455     return ResTy;
1456 
1457   if (getLexer().getKind() == AsmToken::Identifier) {
1458     // If this identifier is a symbol, we want to create an expression for it.
1459     // It is a little difficult to distinguish between a symbol name, and
1460     // an instruction flag like 'gds'.  In order to do this, we parse
1461     // all tokens as expressions and then treate the symbol name as the token
1462     // string when we want to interpret the operand as a token.
1463     const auto &Tok = Parser.getTok();
1464     SMLoc S = Tok.getLoc();
1465     const MCExpr *Expr = nullptr;
1466     if (!Parser.parseExpression(Expr)) {
1467       Operands.push_back(AMDGPUOperand::CreateExpr(Expr, S));
1468       return MatchOperand_Success;
1469     }
1470 
1471     Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
1472     Parser.Lex();
1473     return MatchOperand_Success;
1474   }
1475   return MatchOperand_NoMatch;
1476 }
1477 
parseMnemonicSuffix(StringRef Name)1478 StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1479   // Clear any forced encodings from the previous instruction.
1480   setForcedEncodingSize(0);
1481   setForcedDPP(false);
1482   setForcedSDWA(false);
1483 
1484   if (Name.endswith("_e64")) {
1485     setForcedEncodingSize(64);
1486     return Name.substr(0, Name.size() - 4);
1487   } else if (Name.endswith("_e32")) {
1488     setForcedEncodingSize(32);
1489     return Name.substr(0, Name.size() - 4);
1490   } else if (Name.endswith("_dpp")) {
1491     setForcedDPP(true);
1492     return Name.substr(0, Name.size() - 4);
1493   } else if (Name.endswith("_sdwa")) {
1494     setForcedSDWA(true);
1495     return Name.substr(0, Name.size() - 5);
1496   }
1497   return Name;
1498 }
1499 
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)1500 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1501                                        StringRef Name,
1502                                        SMLoc NameLoc, OperandVector &Operands) {
1503   // Add the instruction mnemonic
1504   Name = parseMnemonicSuffix(Name);
1505   Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1506 
1507   while (!getLexer().is(AsmToken::EndOfStatement)) {
1508     AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1509 
1510     // Eat the comma or space if there is one.
1511     if (getLexer().is(AsmToken::Comma))
1512       Parser.Lex();
1513 
1514     switch (Res) {
1515       case MatchOperand_Success: break;
1516       case MatchOperand_ParseFail:
1517         Error(getLexer().getLoc(), "failed parsing operand.");
1518         while (!getLexer().is(AsmToken::EndOfStatement)) {
1519           Parser.Lex();
1520         }
1521         return true;
1522       case MatchOperand_NoMatch:
1523         Error(getLexer().getLoc(), "not a valid operand.");
1524         while (!getLexer().is(AsmToken::EndOfStatement)) {
1525           Parser.Lex();
1526         }
1527         return true;
1528     }
1529   }
1530 
1531   return false;
1532 }
1533 
1534 //===----------------------------------------------------------------------===//
1535 // Utility functions
1536 //===----------------------------------------------------------------------===//
1537 
1538 AMDGPUAsmParser::OperandMatchResultTy
parseIntWithPrefix(const char * Prefix,int64_t & Int)1539 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
1540   switch(getLexer().getKind()) {
1541     default: return MatchOperand_NoMatch;
1542     case AsmToken::Identifier: {
1543       StringRef Name = Parser.getTok().getString();
1544       if (!Name.equals(Prefix)) {
1545         return MatchOperand_NoMatch;
1546       }
1547 
1548       Parser.Lex();
1549       if (getLexer().isNot(AsmToken::Colon))
1550         return MatchOperand_ParseFail;
1551 
1552       Parser.Lex();
1553       if (getLexer().isNot(AsmToken::Integer))
1554         return MatchOperand_ParseFail;
1555 
1556       if (getParser().parseAbsoluteExpression(Int))
1557         return MatchOperand_ParseFail;
1558       break;
1559     }
1560   }
1561   return MatchOperand_Success;
1562 }
1563 
1564 AMDGPUAsmParser::OperandMatchResultTy
parseIntWithPrefix(const char * Prefix,OperandVector & Operands,enum AMDGPUOperand::ImmTy ImmTy,bool (* ConvertResult)(int64_t &))1565 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1566                                     enum AMDGPUOperand::ImmTy ImmTy,
1567                                     bool (*ConvertResult)(int64_t&)) {
1568 
1569   SMLoc S = Parser.getTok().getLoc();
1570   int64_t Value = 0;
1571 
1572   AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
1573   if (Res != MatchOperand_Success)
1574     return Res;
1575 
1576   if (ConvertResult && !ConvertResult(Value)) {
1577     return MatchOperand_ParseFail;
1578   }
1579 
1580   Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
1581   return MatchOperand_Success;
1582 }
1583 
1584 AMDGPUAsmParser::OperandMatchResultTy
parseNamedBit(const char * Name,OperandVector & Operands,enum AMDGPUOperand::ImmTy ImmTy)1585 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1586                                enum AMDGPUOperand::ImmTy ImmTy) {
1587   int64_t Bit = 0;
1588   SMLoc S = Parser.getTok().getLoc();
1589 
1590   // We are at the end of the statement, and this is a default argument, so
1591   // use a default value.
1592   if (getLexer().isNot(AsmToken::EndOfStatement)) {
1593     switch(getLexer().getKind()) {
1594       case AsmToken::Identifier: {
1595         StringRef Tok = Parser.getTok().getString();
1596         if (Tok == Name) {
1597           Bit = 1;
1598           Parser.Lex();
1599         } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1600           Bit = 0;
1601           Parser.Lex();
1602         } else {
1603           return MatchOperand_NoMatch;
1604         }
1605         break;
1606       }
1607       default:
1608         return MatchOperand_NoMatch;
1609     }
1610   }
1611 
1612   Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1613   return MatchOperand_Success;
1614 }
1615 
1616 typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1617 
addOptionalImmOperand(MCInst & Inst,const OperandVector & Operands,OptionalImmIndexMap & OptionalIdx,enum AMDGPUOperand::ImmTy ImmT,int64_t Default=0)1618 void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1619                            OptionalImmIndexMap& OptionalIdx,
1620                            enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
1621   auto i = OptionalIdx.find(ImmT);
1622   if (i != OptionalIdx.end()) {
1623     unsigned Idx = i->second;
1624     ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1625   } else {
1626     Inst.addOperand(MCOperand::createImm(Default));
1627   }
1628 }
1629 
1630 AMDGPUAsmParser::OperandMatchResultTy
parseStringWithPrefix(StringRef Prefix,StringRef & Value)1631 AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
1632   if (getLexer().isNot(AsmToken::Identifier)) {
1633     return MatchOperand_NoMatch;
1634   }
1635   StringRef Tok = Parser.getTok().getString();
1636   if (Tok != Prefix) {
1637     return MatchOperand_NoMatch;
1638   }
1639 
1640   Parser.Lex();
1641   if (getLexer().isNot(AsmToken::Colon)) {
1642     return MatchOperand_ParseFail;
1643   }
1644 
1645   Parser.Lex();
1646   if (getLexer().isNot(AsmToken::Identifier)) {
1647     return MatchOperand_ParseFail;
1648   }
1649 
1650   Value = Parser.getTok().getString();
1651   return MatchOperand_Success;
1652 }
1653 
1654 //===----------------------------------------------------------------------===//
1655 // ds
1656 //===----------------------------------------------------------------------===//
1657 
cvtDSOffset01(MCInst & Inst,const OperandVector & Operands)1658 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1659                                     const OperandVector &Operands) {
1660 
1661   OptionalImmIndexMap OptionalIdx;
1662 
1663   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1664     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1665 
1666     // Add the register arguments
1667     if (Op.isReg()) {
1668       Op.addRegOperands(Inst, 1);
1669       continue;
1670     }
1671 
1672     // Handle optional arguments
1673     OptionalIdx[Op.getImmTy()] = i;
1674   }
1675 
1676   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1677   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
1678   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1679 
1680   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1681 }
1682 
cvtDS(MCInst & Inst,const OperandVector & Operands)1683 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1684 
1685   std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1686   bool GDSOnly = false;
1687 
1688   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1689     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1690 
1691     // Add the register arguments
1692     if (Op.isReg()) {
1693       Op.addRegOperands(Inst, 1);
1694       continue;
1695     }
1696 
1697     if (Op.isToken() && Op.getToken() == "gds") {
1698       GDSOnly = true;
1699       continue;
1700     }
1701 
1702     // Handle optional arguments
1703     OptionalIdx[Op.getImmTy()] = i;
1704   }
1705 
1706   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1707   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1708 
1709   if (!GDSOnly) {
1710     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1711   }
1712   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1713 }
1714 
1715 
1716 //===----------------------------------------------------------------------===//
1717 // s_waitcnt
1718 //===----------------------------------------------------------------------===//
1719 
parseCnt(int64_t & IntVal)1720 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1721   StringRef CntName = Parser.getTok().getString();
1722   int64_t CntVal;
1723 
1724   Parser.Lex();
1725   if (getLexer().isNot(AsmToken::LParen))
1726     return true;
1727 
1728   Parser.Lex();
1729   if (getLexer().isNot(AsmToken::Integer))
1730     return true;
1731 
1732   if (getParser().parseAbsoluteExpression(CntVal))
1733     return true;
1734 
1735   if (getLexer().isNot(AsmToken::RParen))
1736     return true;
1737 
1738   Parser.Lex();
1739   if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1740     Parser.Lex();
1741 
1742   int CntShift;
1743   int CntMask;
1744 
1745   if (CntName == "vmcnt") {
1746     CntMask = 0xf;
1747     CntShift = 0;
1748   } else if (CntName == "expcnt") {
1749     CntMask = 0x7;
1750     CntShift = 4;
1751   } else if (CntName == "lgkmcnt") {
1752     CntMask = 0xf;
1753     CntShift = 8;
1754   } else {
1755     return true;
1756   }
1757 
1758   IntVal &= ~(CntMask << CntShift);
1759   IntVal |= (CntVal << CntShift);
1760   return false;
1761 }
1762 
1763 AMDGPUAsmParser::OperandMatchResultTy
parseSWaitCntOps(OperandVector & Operands)1764 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1765   // Disable all counters by default.
1766   // vmcnt   [3:0]
1767   // expcnt  [6:4]
1768   // lgkmcnt [11:8]
1769   int64_t CntVal = 0xf7f;
1770   SMLoc S = Parser.getTok().getLoc();
1771 
1772   switch(getLexer().getKind()) {
1773     default: return MatchOperand_ParseFail;
1774     case AsmToken::Integer:
1775       // The operand can be an integer value.
1776       if (getParser().parseAbsoluteExpression(CntVal))
1777         return MatchOperand_ParseFail;
1778       break;
1779 
1780     case AsmToken::Identifier:
1781       do {
1782         if (parseCnt(CntVal))
1783           return MatchOperand_ParseFail;
1784       } while(getLexer().isNot(AsmToken::EndOfStatement));
1785       break;
1786   }
1787   Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1788   return MatchOperand_Success;
1789 }
1790 
parseHwregConstruct(OperandInfoTy & HwReg,int64_t & Offset,int64_t & Width)1791 bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
1792   using namespace llvm::AMDGPU::Hwreg;
1793 
1794   if (Parser.getTok().getString() != "hwreg")
1795     return true;
1796   Parser.Lex();
1797 
1798   if (getLexer().isNot(AsmToken::LParen))
1799     return true;
1800   Parser.Lex();
1801 
1802   if (getLexer().is(AsmToken::Identifier)) {
1803     HwReg.IsSymbolic = true;
1804     HwReg.Id = ID_UNKNOWN_;
1805     const StringRef tok = Parser.getTok().getString();
1806     for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
1807       if (tok == IdSymbolic[i]) {
1808         HwReg.Id = i;
1809         break;
1810       }
1811     }
1812     Parser.Lex();
1813   } else {
1814     HwReg.IsSymbolic = false;
1815     if (getLexer().isNot(AsmToken::Integer))
1816       return true;
1817     if (getParser().parseAbsoluteExpression(HwReg.Id))
1818       return true;
1819   }
1820 
1821   if (getLexer().is(AsmToken::RParen)) {
1822     Parser.Lex();
1823     return false;
1824   }
1825 
1826   // optional params
1827   if (getLexer().isNot(AsmToken::Comma))
1828     return true;
1829   Parser.Lex();
1830 
1831   if (getLexer().isNot(AsmToken::Integer))
1832     return true;
1833   if (getParser().parseAbsoluteExpression(Offset))
1834     return true;
1835 
1836   if (getLexer().isNot(AsmToken::Comma))
1837     return true;
1838   Parser.Lex();
1839 
1840   if (getLexer().isNot(AsmToken::Integer))
1841     return true;
1842   if (getParser().parseAbsoluteExpression(Width))
1843     return true;
1844 
1845   if (getLexer().isNot(AsmToken::RParen))
1846     return true;
1847   Parser.Lex();
1848 
1849   return false;
1850 }
1851 
1852 AMDGPUAsmParser::OperandMatchResultTy
parseHwreg(OperandVector & Operands)1853 AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
1854   using namespace llvm::AMDGPU::Hwreg;
1855 
1856   int64_t Imm16Val = 0;
1857   SMLoc S = Parser.getTok().getLoc();
1858 
1859   switch(getLexer().getKind()) {
1860     default: return MatchOperand_NoMatch;
1861     case AsmToken::Integer:
1862       // The operand can be an integer value.
1863       if (getParser().parseAbsoluteExpression(Imm16Val))
1864         return MatchOperand_NoMatch;
1865       if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
1866         Error(S, "invalid immediate: only 16-bit values are legal");
1867         // Do not return error code, but create an imm operand anyway and proceed
1868         // to the next operand, if any. That avoids unneccessary error messages.
1869       }
1870       break;
1871 
1872     case AsmToken::Identifier: {
1873         OperandInfoTy HwReg(ID_UNKNOWN_);
1874         int64_t Offset = OFFSET_DEFAULT_;
1875         int64_t Width = WIDTH_M1_DEFAULT_ + 1;
1876         if (parseHwregConstruct(HwReg, Offset, Width))
1877           return MatchOperand_ParseFail;
1878         if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
1879           if (HwReg.IsSymbolic)
1880             Error(S, "invalid symbolic name of hardware register");
1881           else
1882             Error(S, "invalid code of hardware register: only 6-bit values are legal");
1883         }
1884         if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
1885           Error(S, "invalid bit offset: only 5-bit values are legal");
1886         if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
1887           Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1888         Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
1889       }
1890       break;
1891   }
1892   Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1893   return MatchOperand_Success;
1894 }
1895 
isSWaitCnt() const1896 bool AMDGPUOperand::isSWaitCnt() const {
1897   return isImm();
1898 }
1899 
isHwreg() const1900 bool AMDGPUOperand::isHwreg() const {
1901   return isImmTy(ImmTyHwreg);
1902 }
1903 
parseSendMsgConstruct(OperandInfoTy & Msg,OperandInfoTy & Operation,int64_t & StreamId)1904 bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
1905   using namespace llvm::AMDGPU::SendMsg;
1906 
1907   if (Parser.getTok().getString() != "sendmsg")
1908     return true;
1909   Parser.Lex();
1910 
1911   if (getLexer().isNot(AsmToken::LParen))
1912     return true;
1913   Parser.Lex();
1914 
1915   if (getLexer().is(AsmToken::Identifier)) {
1916     Msg.IsSymbolic = true;
1917     Msg.Id = ID_UNKNOWN_;
1918     const std::string tok = Parser.getTok().getString();
1919     for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1920       switch(i) {
1921         default: continue; // Omit gaps.
1922         case ID_INTERRUPT: case ID_GS: case ID_GS_DONE:  case ID_SYSMSG: break;
1923       }
1924       if (tok == IdSymbolic[i]) {
1925         Msg.Id = i;
1926         break;
1927       }
1928     }
1929     Parser.Lex();
1930   } else {
1931     Msg.IsSymbolic = false;
1932     if (getLexer().isNot(AsmToken::Integer))
1933       return true;
1934     if (getParser().parseAbsoluteExpression(Msg.Id))
1935       return true;
1936     if (getLexer().is(AsmToken::Integer))
1937       if (getParser().parseAbsoluteExpression(Msg.Id))
1938         Msg.Id = ID_UNKNOWN_;
1939   }
1940   if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1941     return false;
1942 
1943   if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1944     if (getLexer().isNot(AsmToken::RParen))
1945       return true;
1946     Parser.Lex();
1947     return false;
1948   }
1949 
1950   if (getLexer().isNot(AsmToken::Comma))
1951     return true;
1952   Parser.Lex();
1953 
1954   assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1955   Operation.Id = ID_UNKNOWN_;
1956   if (getLexer().is(AsmToken::Identifier)) {
1957     Operation.IsSymbolic = true;
1958     const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1959     const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1960     const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1961     const StringRef Tok = Parser.getTok().getString();
1962     for (int i = F; i < L; ++i) {
1963       if (Tok == S[i]) {
1964         Operation.Id = i;
1965         break;
1966       }
1967     }
1968     Parser.Lex();
1969   } else {
1970     Operation.IsSymbolic = false;
1971     if (getLexer().isNot(AsmToken::Integer))
1972       return true;
1973     if (getParser().parseAbsoluteExpression(Operation.Id))
1974       return true;
1975   }
1976 
1977   if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1978     // Stream id is optional.
1979     if (getLexer().is(AsmToken::RParen)) {
1980       Parser.Lex();
1981       return false;
1982     }
1983 
1984     if (getLexer().isNot(AsmToken::Comma))
1985       return true;
1986     Parser.Lex();
1987 
1988     if (getLexer().isNot(AsmToken::Integer))
1989       return true;
1990     if (getParser().parseAbsoluteExpression(StreamId))
1991       return true;
1992   }
1993 
1994   if (getLexer().isNot(AsmToken::RParen))
1995     return true;
1996   Parser.Lex();
1997   return false;
1998 }
1999 
2000 AMDGPUAsmParser::OperandMatchResultTy
parseSendMsgOp(OperandVector & Operands)2001 AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2002   using namespace llvm::AMDGPU::SendMsg;
2003 
2004   int64_t Imm16Val = 0;
2005   SMLoc S = Parser.getTok().getLoc();
2006 
2007   switch(getLexer().getKind()) {
2008   default:
2009     return MatchOperand_NoMatch;
2010   case AsmToken::Integer:
2011     // The operand can be an integer value.
2012     if (getParser().parseAbsoluteExpression(Imm16Val))
2013       return MatchOperand_NoMatch;
2014     if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
2015       Error(S, "invalid immediate: only 16-bit values are legal");
2016       // Do not return error code, but create an imm operand anyway and proceed
2017       // to the next operand, if any. That avoids unneccessary error messages.
2018     }
2019     break;
2020   case AsmToken::Identifier: {
2021       OperandInfoTy Msg(ID_UNKNOWN_);
2022       OperandInfoTy Operation(OP_UNKNOWN_);
2023       int64_t StreamId = STREAM_ID_DEFAULT_;
2024       if (parseSendMsgConstruct(Msg, Operation, StreamId))
2025         return MatchOperand_ParseFail;
2026       do {
2027         // Validate and encode message ID.
2028         if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2029                 || Msg.Id == ID_SYSMSG)) {
2030           if (Msg.IsSymbolic)
2031             Error(S, "invalid/unsupported symbolic name of message");
2032           else
2033             Error(S, "invalid/unsupported code of message");
2034           break;
2035         }
2036         Imm16Val = (Msg.Id << ID_SHIFT_);
2037         // Validate and encode operation ID.
2038         if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2039           if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2040             if (Operation.IsSymbolic)
2041               Error(S, "invalid symbolic name of GS_OP");
2042             else
2043               Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2044             break;
2045           }
2046           if (Operation.Id == OP_GS_NOP
2047               && Msg.Id != ID_GS_DONE) {
2048             Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2049             break;
2050           }
2051           Imm16Val |= (Operation.Id << OP_SHIFT_);
2052         }
2053         if (Msg.Id == ID_SYSMSG) {
2054           if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2055             if (Operation.IsSymbolic)
2056               Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2057             else
2058               Error(S, "invalid/unsupported code of SYSMSG_OP");
2059             break;
2060           }
2061           Imm16Val |= (Operation.Id << OP_SHIFT_);
2062         }
2063         // Validate and encode stream ID.
2064         if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2065           if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2066             Error(S, "invalid stream id: only 2-bit values are legal");
2067             break;
2068           }
2069           Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2070         }
2071       } while (0);
2072     }
2073     break;
2074   }
2075   Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
2076   return MatchOperand_Success;
2077 }
2078 
isSendMsg() const2079 bool AMDGPUOperand::isSendMsg() const {
2080   return isImmTy(ImmTySendMsg);
2081 }
2082 
2083 //===----------------------------------------------------------------------===//
2084 // sopp branch targets
2085 //===----------------------------------------------------------------------===//
2086 
2087 AMDGPUAsmParser::OperandMatchResultTy
parseSOppBrTarget(OperandVector & Operands)2088 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2089   SMLoc S = Parser.getTok().getLoc();
2090 
2091   switch (getLexer().getKind()) {
2092     default: return MatchOperand_ParseFail;
2093     case AsmToken::Integer: {
2094       int64_t Imm;
2095       if (getParser().parseAbsoluteExpression(Imm))
2096         return MatchOperand_ParseFail;
2097       Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
2098       return MatchOperand_Success;
2099     }
2100 
2101     case AsmToken::Identifier:
2102       Operands.push_back(AMDGPUOperand::CreateExpr(
2103           MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2104                                   Parser.getTok().getString()), getContext()), S));
2105       Parser.Lex();
2106       return MatchOperand_Success;
2107   }
2108 }
2109 
2110 //===----------------------------------------------------------------------===//
2111 // mubuf
2112 //===----------------------------------------------------------------------===//
2113 
defaultGLC() const2114 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2115   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2116 }
2117 
defaultSLC() const2118 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2119   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2120 }
2121 
defaultTFE() const2122 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2123   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2124 }
2125 
cvtMubufImpl(MCInst & Inst,const OperandVector & Operands,bool IsAtomic,bool IsAtomicReturn)2126 void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2127                                const OperandVector &Operands,
2128                                bool IsAtomic, bool IsAtomicReturn) {
2129   OptionalImmIndexMap OptionalIdx;
2130   assert(IsAtomicReturn ? IsAtomic : true);
2131 
2132   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2133     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2134 
2135     // Add the register arguments
2136     if (Op.isReg()) {
2137       Op.addRegOperands(Inst, 1);
2138       continue;
2139     }
2140 
2141     // Handle the case where soffset is an immediate
2142     if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2143       Op.addImmOperands(Inst, 1);
2144       continue;
2145     }
2146 
2147     // Handle tokens like 'offen' which are sometimes hard-coded into the
2148     // asm string.  There are no MCInst operands for these.
2149     if (Op.isToken()) {
2150       continue;
2151     }
2152     assert(Op.isImm());
2153 
2154     // Handle optional arguments
2155     OptionalIdx[Op.getImmTy()] = i;
2156   }
2157 
2158   // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2159   if (IsAtomicReturn) {
2160     MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2161     Inst.insert(I, *I);
2162   }
2163 
2164   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2165   if (!IsAtomic) { // glc is hard-coded.
2166     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2167   }
2168   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2169   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2170 }
2171 
2172 //===----------------------------------------------------------------------===//
2173 // mimg
2174 //===----------------------------------------------------------------------===//
2175 
cvtMIMG(MCInst & Inst,const OperandVector & Operands)2176 void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2177   unsigned I = 1;
2178   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2179   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2180     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2181   }
2182 
2183   OptionalImmIndexMap OptionalIdx;
2184 
2185   for (unsigned E = Operands.size(); I != E; ++I) {
2186     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2187 
2188     // Add the register arguments
2189     if (Op.isRegOrImm()) {
2190       Op.addRegOrImmOperands(Inst, 1);
2191       continue;
2192     } else if (Op.isImmModifier()) {
2193       OptionalIdx[Op.getImmTy()] = I;
2194     } else {
2195       assert(false);
2196     }
2197   }
2198 
2199   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2200   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2201   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2202   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2203   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2204   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2205   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2206   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2207 }
2208 
cvtMIMGAtomic(MCInst & Inst,const OperandVector & Operands)2209 void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2210   unsigned I = 1;
2211   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2212   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2213     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2214   }
2215 
2216   // Add src, same as dst
2217   ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2218 
2219   OptionalImmIndexMap OptionalIdx;
2220 
2221   for (unsigned E = Operands.size(); I != E; ++I) {
2222     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2223 
2224     // Add the register arguments
2225     if (Op.isRegOrImm()) {
2226       Op.addRegOrImmOperands(Inst, 1);
2227       continue;
2228     } else if (Op.isImmModifier()) {
2229       OptionalIdx[Op.getImmTy()] = I;
2230     } else {
2231       assert(false);
2232     }
2233   }
2234 
2235   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2236   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2237   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2238   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2239   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2240   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2241   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2242   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2243 }
2244 
defaultDMask() const2245 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2246   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2247 }
2248 
defaultUNorm() const2249 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2250   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2251 }
2252 
defaultDA() const2253 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2254   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2255 }
2256 
defaultR128() const2257 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2258   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2259 }
2260 
defaultLWE() const2261 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2262   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2263 }
2264 
2265 //===----------------------------------------------------------------------===//
2266 // smrd
2267 //===----------------------------------------------------------------------===//
2268 
isSMRDOffset() const2269 bool AMDGPUOperand::isSMRDOffset() const {
2270 
2271   // FIXME: Support 20-bit offsets on VI.  We need to to pass subtarget
2272   // information here.
2273   return isImm() && isUInt<8>(getImm());
2274 }
2275 
isSMRDLiteralOffset() const2276 bool AMDGPUOperand::isSMRDLiteralOffset() const {
2277   // 32-bit literals are only supported on CI and we only want to use them
2278   // when the offset is > 8-bits.
2279   return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2280 }
2281 
defaultSMRDOffset() const2282 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2283   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2284 }
2285 
defaultSMRDLiteralOffset() const2286 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2287   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2288 }
2289 
2290 //===----------------------------------------------------------------------===//
2291 // vop3
2292 //===----------------------------------------------------------------------===//
2293 
ConvertOmodMul(int64_t & Mul)2294 static bool ConvertOmodMul(int64_t &Mul) {
2295   if (Mul != 1 && Mul != 2 && Mul != 4)
2296     return false;
2297 
2298   Mul >>= 1;
2299   return true;
2300 }
2301 
ConvertOmodDiv(int64_t & Div)2302 static bool ConvertOmodDiv(int64_t &Div) {
2303   if (Div == 1) {
2304     Div = 0;
2305     return true;
2306   }
2307 
2308   if (Div == 2) {
2309     Div = 3;
2310     return true;
2311   }
2312 
2313   return false;
2314 }
2315 
ConvertBoundCtrl(int64_t & BoundCtrl)2316 static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2317   if (BoundCtrl == 0) {
2318     BoundCtrl = 1;
2319     return true;
2320   } else if (BoundCtrl == -1) {
2321     BoundCtrl = 0;
2322     return true;
2323   }
2324   return false;
2325 }
2326 
2327 // Note: the order in this table matches the order of operands in AsmString.
2328 static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2329   {"offen",   AMDGPUOperand::ImmTyOffen, true, nullptr},
2330   {"idxen",   AMDGPUOperand::ImmTyIdxen, true, nullptr},
2331   {"addr64",  AMDGPUOperand::ImmTyAddr64, true, nullptr},
2332   {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2333   {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2334   {"gds",     AMDGPUOperand::ImmTyGDS, true, nullptr},
2335   {"offset",  AMDGPUOperand::ImmTyOffset, false, nullptr},
2336   {"glc",     AMDGPUOperand::ImmTyGLC, true, nullptr},
2337   {"slc",     AMDGPUOperand::ImmTySLC, true, nullptr},
2338   {"tfe",     AMDGPUOperand::ImmTyTFE, true, nullptr},
2339   {"clamp",   AMDGPUOperand::ImmTyClampSI, true, nullptr},
2340   {"omod",    AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2341   {"unorm",   AMDGPUOperand::ImmTyUNorm, true, nullptr},
2342   {"da",      AMDGPUOperand::ImmTyDA,    true, nullptr},
2343   {"r128",    AMDGPUOperand::ImmTyR128,  true, nullptr},
2344   {"lwe",     AMDGPUOperand::ImmTyLWE,   true, nullptr},
2345   {"dmask",   AMDGPUOperand::ImmTyDMask, false, nullptr},
2346   {"row_mask",   AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2347   {"bank_mask",  AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2348   {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
2349   {"dst_sel",    AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2350   {"src0_sel",   AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2351   {"src1_sel",   AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
2352   {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
2353 };
2354 
parseOptionalOperand(OperandVector & Operands)2355 AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2356   OperandMatchResultTy res;
2357   for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2358     // try to parse any optional operand here
2359     if (Op.IsBit) {
2360       res = parseNamedBit(Op.Name, Operands, Op.Type);
2361     } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2362       res = parseOModOperand(Operands);
2363     } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2364                Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2365                Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2366       res = parseSDWASel(Operands, Op.Name, Op.Type);
2367     } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2368       res = parseSDWADstUnused(Operands);
2369     } else {
2370       res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2371     }
2372     if (res != MatchOperand_NoMatch) {
2373       return res;
2374     }
2375   }
2376   return MatchOperand_NoMatch;
2377 }
2378 
parseOModOperand(OperandVector & Operands)2379 AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2380 {
2381   StringRef Name = Parser.getTok().getString();
2382   if (Name == "mul") {
2383     return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
2384   } else if (Name == "div") {
2385     return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
2386   } else {
2387     return MatchOperand_NoMatch;
2388   }
2389 }
2390 
cvtId(MCInst & Inst,const OperandVector & Operands)2391 void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2392   unsigned I = 1;
2393   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2394   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2395     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2396   }
2397   for (unsigned E = Operands.size(); I != E; ++I)
2398     ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2399 }
2400 
cvtVOP3_2_mod(MCInst & Inst,const OperandVector & Operands)2401 void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
2402   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2403   if (TSFlags & SIInstrFlags::VOP3) {
2404     cvtVOP3(Inst, Operands);
2405   } else {
2406     cvtId(Inst, Operands);
2407   }
2408 }
2409 
cvtVOP3(MCInst & Inst,const OperandVector & Operands)2410 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
2411   OptionalImmIndexMap OptionalIdx;
2412   unsigned I = 1;
2413   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2414   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2415     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2416   }
2417 
2418   for (unsigned E = Operands.size(); I != E; ++I) {
2419     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2420     if (Op.isRegOrImmWithInputMods()) {
2421       // only fp modifiers allowed in VOP3
2422       Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
2423     } else if (Op.isImm()) {
2424       OptionalIdx[Op.getImmTy()] = I;
2425     } else {
2426       assert(false);
2427     }
2428   }
2429 
2430   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2431   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
2432 }
2433 
2434 //===----------------------------------------------------------------------===//
2435 // dpp
2436 //===----------------------------------------------------------------------===//
2437 
isDPPCtrl() const2438 bool AMDGPUOperand::isDPPCtrl() const {
2439   bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2440   if (result) {
2441     int64_t Imm = getImm();
2442     return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2443            ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2444            ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2445            ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2446            (Imm == 0x130) ||
2447            (Imm == 0x134) ||
2448            (Imm == 0x138) ||
2449            (Imm == 0x13c) ||
2450            (Imm == 0x140) ||
2451            (Imm == 0x141) ||
2452            (Imm == 0x142) ||
2453            (Imm == 0x143);
2454   }
2455   return false;
2456 }
2457 
2458 AMDGPUAsmParser::OperandMatchResultTy
parseDPPCtrl(OperandVector & Operands)2459 AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
2460   SMLoc S = Parser.getTok().getLoc();
2461   StringRef Prefix;
2462   int64_t Int;
2463 
2464   if (getLexer().getKind() == AsmToken::Identifier) {
2465     Prefix = Parser.getTok().getString();
2466   } else {
2467     return MatchOperand_NoMatch;
2468   }
2469 
2470   if (Prefix == "row_mirror") {
2471     Int = 0x140;
2472   } else if (Prefix == "row_half_mirror") {
2473     Int = 0x141;
2474   } else {
2475     // Check to prevent parseDPPCtrlOps from eating invalid tokens
2476     if (Prefix != "quad_perm"
2477         && Prefix != "row_shl"
2478         && Prefix != "row_shr"
2479         && Prefix != "row_ror"
2480         && Prefix != "wave_shl"
2481         && Prefix != "wave_rol"
2482         && Prefix != "wave_shr"
2483         && Prefix != "wave_ror"
2484         && Prefix != "row_bcast") {
2485       return MatchOperand_NoMatch;
2486     }
2487 
2488     Parser.Lex();
2489     if (getLexer().isNot(AsmToken::Colon))
2490       return MatchOperand_ParseFail;
2491 
2492     if (Prefix == "quad_perm") {
2493       // quad_perm:[%d,%d,%d,%d]
2494       Parser.Lex();
2495       if (getLexer().isNot(AsmToken::LBrac))
2496         return MatchOperand_ParseFail;
2497 
2498       Parser.Lex();
2499       if (getLexer().isNot(AsmToken::Integer))
2500         return MatchOperand_ParseFail;
2501       Int = getLexer().getTok().getIntVal();
2502 
2503       Parser.Lex();
2504       if (getLexer().isNot(AsmToken::Comma))
2505         return MatchOperand_ParseFail;
2506       Parser.Lex();
2507       if (getLexer().isNot(AsmToken::Integer))
2508         return MatchOperand_ParseFail;
2509       Int += (getLexer().getTok().getIntVal() << 2);
2510 
2511       Parser.Lex();
2512       if (getLexer().isNot(AsmToken::Comma))
2513         return MatchOperand_ParseFail;
2514       Parser.Lex();
2515       if (getLexer().isNot(AsmToken::Integer))
2516         return MatchOperand_ParseFail;
2517       Int += (getLexer().getTok().getIntVal() << 4);
2518 
2519       Parser.Lex();
2520       if (getLexer().isNot(AsmToken::Comma))
2521         return MatchOperand_ParseFail;
2522       Parser.Lex();
2523       if (getLexer().isNot(AsmToken::Integer))
2524         return MatchOperand_ParseFail;
2525       Int += (getLexer().getTok().getIntVal() << 6);
2526 
2527       Parser.Lex();
2528       if (getLexer().isNot(AsmToken::RBrac))
2529         return MatchOperand_ParseFail;
2530 
2531     } else {
2532       // sel:%d
2533       Parser.Lex();
2534       if (getLexer().isNot(AsmToken::Integer))
2535         return MatchOperand_ParseFail;
2536       Int = getLexer().getTok().getIntVal();
2537 
2538       if (Prefix == "row_shl") {
2539         Int |= 0x100;
2540       } else if (Prefix == "row_shr") {
2541         Int |= 0x110;
2542       } else if (Prefix == "row_ror") {
2543         Int |= 0x120;
2544       } else if (Prefix == "wave_shl") {
2545         Int = 0x130;
2546       } else if (Prefix == "wave_rol") {
2547         Int = 0x134;
2548       } else if (Prefix == "wave_shr") {
2549         Int = 0x138;
2550       } else if (Prefix == "wave_ror") {
2551         Int = 0x13C;
2552       } else if (Prefix == "row_bcast") {
2553         if (Int == 15) {
2554           Int = 0x142;
2555         } else if (Int == 31) {
2556           Int = 0x143;
2557         } else {
2558           return MatchOperand_ParseFail;
2559         }
2560       } else {
2561         return MatchOperand_ParseFail;
2562       }
2563     }
2564   }
2565   Parser.Lex(); // eat last token
2566 
2567   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2568                                               AMDGPUOperand::ImmTyDppCtrl));
2569   return MatchOperand_Success;
2570 }
2571 
defaultRowMask() const2572 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2573   return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
2574 }
2575 
defaultBankMask() const2576 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2577   return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
2578 }
2579 
defaultBoundCtrl() const2580 AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2581   return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2582 }
2583 
cvtDPP(MCInst & Inst,const OperandVector & Operands)2584 void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
2585   OptionalImmIndexMap OptionalIdx;
2586 
2587   unsigned I = 1;
2588   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2589   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2590     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2591   }
2592 
2593   for (unsigned E = Operands.size(); I != E; ++I) {
2594     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2595     // Add the register arguments
2596     if (Op.isRegOrImmWithInputMods()) {
2597       // Only float modifiers supported in DPP
2598       Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
2599     } else if (Op.isDPPCtrl()) {
2600       Op.addImmOperands(Inst, 1);
2601     } else if (Op.isImm()) {
2602       // Handle optional arguments
2603       OptionalIdx[Op.getImmTy()] = I;
2604     } else {
2605       llvm_unreachable("Invalid operand type");
2606     }
2607   }
2608 
2609   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2610   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2611   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2612 }
2613 
2614 //===----------------------------------------------------------------------===//
2615 // sdwa
2616 //===----------------------------------------------------------------------===//
2617 
2618 AMDGPUAsmParser::OperandMatchResultTy
parseSDWASel(OperandVector & Operands,StringRef Prefix,AMDGPUOperand::ImmTy Type)2619 AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
2620                               AMDGPUOperand::ImmTy Type) {
2621   SMLoc S = Parser.getTok().getLoc();
2622   StringRef Value;
2623   AMDGPUAsmParser::OperandMatchResultTy res;
2624 
2625   res = parseStringWithPrefix(Prefix, Value);
2626   if (res != MatchOperand_Success) {
2627     return res;
2628   }
2629 
2630   int64_t Int;
2631   Int = StringSwitch<int64_t>(Value)
2632         .Case("BYTE_0", 0)
2633         .Case("BYTE_1", 1)
2634         .Case("BYTE_2", 2)
2635         .Case("BYTE_3", 3)
2636         .Case("WORD_0", 4)
2637         .Case("WORD_1", 5)
2638         .Case("DWORD", 6)
2639         .Default(0xffffffff);
2640   Parser.Lex(); // eat last token
2641 
2642   if (Int == 0xffffffff) {
2643     return MatchOperand_ParseFail;
2644   }
2645 
2646   Operands.push_back(AMDGPUOperand::CreateImm(Int, S, Type));
2647   return MatchOperand_Success;
2648 }
2649 
2650 AMDGPUAsmParser::OperandMatchResultTy
parseSDWADstUnused(OperandVector & Operands)2651 AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2652   SMLoc S = Parser.getTok().getLoc();
2653   StringRef Value;
2654   AMDGPUAsmParser::OperandMatchResultTy res;
2655 
2656   res = parseStringWithPrefix("dst_unused", Value);
2657   if (res != MatchOperand_Success) {
2658     return res;
2659   }
2660 
2661   int64_t Int;
2662   Int = StringSwitch<int64_t>(Value)
2663         .Case("UNUSED_PAD", 0)
2664         .Case("UNUSED_SEXT", 1)
2665         .Case("UNUSED_PRESERVE", 2)
2666         .Default(0xffffffff);
2667   Parser.Lex(); // eat last token
2668 
2669   if (Int == 0xffffffff) {
2670     return MatchOperand_ParseFail;
2671   }
2672 
2673   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2674                                               AMDGPUOperand::ImmTySdwaDstUnused));
2675   return MatchOperand_Success;
2676 }
2677 
cvtSdwaVOP1(MCInst & Inst,const OperandVector & Operands)2678 void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
2679   cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
2680 }
2681 
cvtSdwaVOP2(MCInst & Inst,const OperandVector & Operands)2682 void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
2683   cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
2684 }
2685 
cvtSdwaVOPC(MCInst & Inst,const OperandVector & Operands)2686 void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
2687   cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
2688 }
2689 
cvtSDWA(MCInst & Inst,const OperandVector & Operands,uint64_t BasicInstType)2690 void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
2691                               uint64_t BasicInstType) {
2692   OptionalImmIndexMap OptionalIdx;
2693 
2694   unsigned I = 1;
2695   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2696   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2697     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2698   }
2699 
2700   for (unsigned E = Operands.size(); I != E; ++I) {
2701     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2702     // Add the register arguments
2703     if (BasicInstType == SIInstrFlags::VOPC &&
2704         Op.isReg() &&
2705         Op.Reg.RegNo == AMDGPU::VCC) {
2706       // VOPC sdwa use "vcc" token as dst. Skip it.
2707       continue;
2708     } else if (Op.isRegOrImmWithInputMods()) {
2709        Op.addRegOrImmWithInputModsOperands(Inst, 2);
2710     } else if (Op.isImm()) {
2711       // Handle optional arguments
2712       OptionalIdx[Op.getImmTy()] = I;
2713     } else {
2714       llvm_unreachable("Invalid operand type");
2715     }
2716   }
2717 
2718   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
2719 
2720   if (Inst.getOpcode() == AMDGPU::V_NOP_sdwa) {
2721     // V_NOP_sdwa has no optional sdwa arguments
2722     return;
2723   }
2724   switch (BasicInstType) {
2725   case SIInstrFlags::VOP1: {
2726     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2727     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2728     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2729     break;
2730   }
2731   case SIInstrFlags::VOP2: {
2732     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2733     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2734     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2735     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
2736     break;
2737   }
2738   case SIInstrFlags::VOPC: {
2739     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2740     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
2741     break;
2742   }
2743   default:
2744     llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
2745   }
2746 }
2747 
2748 /// Force static initialization.
LLVMInitializeAMDGPUAsmParser()2749 extern "C" void LLVMInitializeAMDGPUAsmParser() {
2750   RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2751   RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2752 }
2753 
2754 #define GET_REGISTER_MATCHER
2755 #define GET_MATCHER_IMPLEMENTATION
2756 #include "AMDGPUGenAsmMatcher.inc"
2757 
2758 
2759 // This fuction should be defined after auto-generated include so that we have
2760 // MatchClassKind enum defined
validateTargetOperandClass(MCParsedAsmOperand & Op,unsigned Kind)2761 unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
2762                                                      unsigned Kind) {
2763   // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
2764   // But MatchInstructionImpl() expects to meet token and fails to validate
2765   // operand. This method checks if we are given immediate operand but expect to
2766   // get corresponding token.
2767   AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
2768   switch (Kind) {
2769   case MCK_addr64:
2770     return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
2771   case MCK_gds:
2772     return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
2773   case MCK_glc:
2774     return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
2775   case MCK_idxen:
2776     return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
2777   case MCK_offen:
2778     return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
2779   case MCK_SSrc32:
2780     // When operands have expression values, they will return true for isToken,
2781     // because it is not possible to distinguish between a token and an
2782     // expression at parse time. MatchInstructionImpl() will always try to
2783     // match an operand as a token, when isToken returns true, and when the
2784     // name of the expression is not a valid token, the match will fail,
2785     // so we need to handle it here.
2786     return Operand.isSSrc32() ? Match_Success : Match_InvalidOperand;
2787   case MCK_SoppBrTarget:
2788     return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
2789   default: return Match_InvalidOperand;
2790   }
2791 }
2792