1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMMCExpr.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCAsmInfo.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
24 #include "llvm/MC/MCELFStreamer.h"
25 #include "llvm/MC/MCExpr.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCInstrDesc.h"
28 #include "llvm/MC/MCInstrInfo.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCSection.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/Support/ARMBuildAttributes.h"
41 #include "llvm/Support/ARMEHABI.h"
42 #include "llvm/Support/COFF.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ELF.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/SourceMgr.h"
47 #include "llvm/Support/TargetParser.h"
48 #include "llvm/Support/TargetRegistry.h"
49 #include "llvm/Support/raw_ostream.h"
50
51 using namespace llvm;
52
53 namespace {
54
55 class ARMOperand;
56
57 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
58
59 class UnwindContext {
60 MCAsmParser &Parser;
61
62 typedef SmallVector<SMLoc, 4> Locs;
63
64 Locs FnStartLocs;
65 Locs CantUnwindLocs;
66 Locs PersonalityLocs;
67 Locs PersonalityIndexLocs;
68 Locs HandlerDataLocs;
69 int FPReg;
70
71 public:
UnwindContext(MCAsmParser & P)72 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
73
hasFnStart() const74 bool hasFnStart() const { return !FnStartLocs.empty(); }
cantUnwind() const75 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
hasHandlerData() const76 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
hasPersonality() const77 bool hasPersonality() const {
78 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
79 }
80
recordFnStart(SMLoc L)81 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
recordCantUnwind(SMLoc L)82 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
recordPersonality(SMLoc L)83 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
recordHandlerData(SMLoc L)84 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
recordPersonalityIndex(SMLoc L)85 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
86
saveFPReg(int Reg)87 void saveFPReg(int Reg) { FPReg = Reg; }
getFPReg() const88 int getFPReg() const { return FPReg; }
89
emitFnStartLocNotes() const90 void emitFnStartLocNotes() const {
91 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
92 FI != FE; ++FI)
93 Parser.Note(*FI, ".fnstart was specified here");
94 }
emitCantUnwindLocNotes() const95 void emitCantUnwindLocNotes() const {
96 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
97 UE = CantUnwindLocs.end(); UI != UE; ++UI)
98 Parser.Note(*UI, ".cantunwind was specified here");
99 }
emitHandlerDataLocNotes() const100 void emitHandlerDataLocNotes() const {
101 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
102 HE = HandlerDataLocs.end(); HI != HE; ++HI)
103 Parser.Note(*HI, ".handlerdata was specified here");
104 }
emitPersonalityLocNotes() const105 void emitPersonalityLocNotes() const {
106 for (Locs::const_iterator PI = PersonalityLocs.begin(),
107 PE = PersonalityLocs.end(),
108 PII = PersonalityIndexLocs.begin(),
109 PIE = PersonalityIndexLocs.end();
110 PI != PE || PII != PIE;) {
111 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
112 Parser.Note(*PI++, ".personality was specified here");
113 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
114 Parser.Note(*PII++, ".personalityindex was specified here");
115 else
116 llvm_unreachable(".personality and .personalityindex cannot be "
117 "at the same location");
118 }
119 }
120
reset()121 void reset() {
122 FnStartLocs = Locs();
123 CantUnwindLocs = Locs();
124 PersonalityLocs = Locs();
125 HandlerDataLocs = Locs();
126 PersonalityIndexLocs = Locs();
127 FPReg = ARM::SP;
128 }
129 };
130
131 class ARMAsmParser : public MCTargetAsmParser {
132 const MCInstrInfo &MII;
133 const MCRegisterInfo *MRI;
134 UnwindContext UC;
135
getTargetStreamer()136 ARMTargetStreamer &getTargetStreamer() {
137 assert(getParser().getStreamer().getTargetStreamer() &&
138 "do not have a target streamer");
139 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
140 return static_cast<ARMTargetStreamer &>(TS);
141 }
142
143 // Map of register aliases registers via the .req directive.
144 StringMap<unsigned> RegisterReqs;
145
146 bool NextSymbolIsThumb;
147
148 struct {
149 ARMCC::CondCodes Cond; // Condition for IT block.
150 unsigned Mask:4; // Condition mask for instructions.
151 // Starting at first 1 (from lsb).
152 // '1' condition as indicated in IT.
153 // '0' inverse of condition (else).
154 // Count of instructions in IT block is
155 // 4 - trailingzeroes(mask)
156
157 bool FirstCond; // Explicit flag for when we're parsing the
158 // First instruction in the IT block. It's
159 // implied in the mask, so needs special
160 // handling.
161
162 unsigned CurPosition; // Current position in parsing of IT
163 // block. In range [0,3]. Initialized
164 // according to count of instructions in block.
165 // ~0U if no active IT block.
166 } ITState;
inITBlock()167 bool inITBlock() { return ITState.CurPosition != ~0U; }
lastInITBlock()168 bool lastInITBlock() {
169 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
170 }
forwardITPosition()171 void forwardITPosition() {
172 if (!inITBlock()) return;
173 // Move to the next instruction in the IT block, if there is one. If not,
174 // mark the block as done.
175 unsigned TZ = countTrailingZeros(ITState.Mask);
176 if (++ITState.CurPosition == 5 - TZ)
177 ITState.CurPosition = ~0U; // Done with the IT block after this.
178 }
179
Note(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)180 void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
181 return getParser().Note(L, Msg, Ranges);
182 }
Warning(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)183 bool Warning(SMLoc L, const Twine &Msg,
184 ArrayRef<SMRange> Ranges = None) {
185 return getParser().Warning(L, Msg, Ranges);
186 }
Error(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)187 bool Error(SMLoc L, const Twine &Msg,
188 ArrayRef<SMRange> Ranges = None) {
189 return getParser().Error(L, Msg, Ranges);
190 }
191
192 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
193 unsigned ListNo, bool IsARPop = false);
194 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
195 unsigned ListNo);
196
197 int tryParseRegister();
198 bool tryParseRegisterWithWriteBack(OperandVector &);
199 int tryParseShiftRegister(OperandVector &);
200 bool parseRegisterList(OperandVector &);
201 bool parseMemory(OperandVector &);
202 bool parseOperand(OperandVector &, StringRef Mnemonic);
203 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
204 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
205 unsigned &ShiftAmount);
206 bool parseLiteralValues(unsigned Size, SMLoc L);
207 bool parseDirectiveThumb(SMLoc L);
208 bool parseDirectiveARM(SMLoc L);
209 bool parseDirectiveThumbFunc(SMLoc L);
210 bool parseDirectiveCode(SMLoc L);
211 bool parseDirectiveSyntax(SMLoc L);
212 bool parseDirectiveReq(StringRef Name, SMLoc L);
213 bool parseDirectiveUnreq(SMLoc L);
214 bool parseDirectiveArch(SMLoc L);
215 bool parseDirectiveEabiAttr(SMLoc L);
216 bool parseDirectiveCPU(SMLoc L);
217 bool parseDirectiveFPU(SMLoc L);
218 bool parseDirectiveFnStart(SMLoc L);
219 bool parseDirectiveFnEnd(SMLoc L);
220 bool parseDirectiveCantUnwind(SMLoc L);
221 bool parseDirectivePersonality(SMLoc L);
222 bool parseDirectiveHandlerData(SMLoc L);
223 bool parseDirectiveSetFP(SMLoc L);
224 bool parseDirectivePad(SMLoc L);
225 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
226 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
227 bool parseDirectiveLtorg(SMLoc L);
228 bool parseDirectiveEven(SMLoc L);
229 bool parseDirectivePersonalityIndex(SMLoc L);
230 bool parseDirectiveUnwindRaw(SMLoc L);
231 bool parseDirectiveTLSDescSeq(SMLoc L);
232 bool parseDirectiveMovSP(SMLoc L);
233 bool parseDirectiveObjectArch(SMLoc L);
234 bool parseDirectiveArchExtension(SMLoc L);
235 bool parseDirectiveAlign(SMLoc L);
236 bool parseDirectiveThumbSet(SMLoc L);
237
238 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
239 bool &CarrySetting, unsigned &ProcessorIMod,
240 StringRef &ITMask);
241 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
242 bool &CanAcceptCarrySet,
243 bool &CanAcceptPredicationCode);
244
245 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
246 OperandVector &Operands);
isThumb() const247 bool isThumb() const {
248 // FIXME: Can tablegen auto-generate this?
249 return getSTI().getFeatureBits()[ARM::ModeThumb];
250 }
isThumbOne() const251 bool isThumbOne() const {
252 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
253 }
isThumbTwo() const254 bool isThumbTwo() const {
255 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
256 }
hasThumb() const257 bool hasThumb() const {
258 return getSTI().getFeatureBits()[ARM::HasV4TOps];
259 }
hasThumb2() const260 bool hasThumb2() const {
261 return getSTI().getFeatureBits()[ARM::FeatureThumb2];
262 }
hasV6Ops() const263 bool hasV6Ops() const {
264 return getSTI().getFeatureBits()[ARM::HasV6Ops];
265 }
hasV6T2Ops() const266 bool hasV6T2Ops() const {
267 return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
268 }
hasV6MOps() const269 bool hasV6MOps() const {
270 return getSTI().getFeatureBits()[ARM::HasV6MOps];
271 }
hasV7Ops() const272 bool hasV7Ops() const {
273 return getSTI().getFeatureBits()[ARM::HasV7Ops];
274 }
hasV8Ops() const275 bool hasV8Ops() const {
276 return getSTI().getFeatureBits()[ARM::HasV8Ops];
277 }
hasV8MBaseline() const278 bool hasV8MBaseline() const {
279 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
280 }
hasV8MMainline() const281 bool hasV8MMainline() const {
282 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
283 }
has8MSecExt() const284 bool has8MSecExt() const {
285 return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
286 }
hasARM() const287 bool hasARM() const {
288 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
289 }
hasDSP() const290 bool hasDSP() const {
291 return getSTI().getFeatureBits()[ARM::FeatureDSP];
292 }
hasD16() const293 bool hasD16() const {
294 return getSTI().getFeatureBits()[ARM::FeatureD16];
295 }
hasV8_1aOps() const296 bool hasV8_1aOps() const {
297 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
298 }
hasRAS() const299 bool hasRAS() const {
300 return getSTI().getFeatureBits()[ARM::FeatureRAS];
301 }
302
SwitchMode()303 void SwitchMode() {
304 MCSubtargetInfo &STI = copySTI();
305 uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
306 setAvailableFeatures(FB);
307 }
308 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
isMClass() const309 bool isMClass() const {
310 return getSTI().getFeatureBits()[ARM::FeatureMClass];
311 }
312
313 /// @name Auto-generated Match Functions
314 /// {
315
316 #define GET_ASSEMBLER_HEADER
317 #include "ARMGenAsmMatcher.inc"
318
319 /// }
320
321 OperandMatchResultTy parseITCondCode(OperandVector &);
322 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
323 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
324 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
325 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
326 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
327 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
328 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
329 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
330 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
331 int High);
parsePKHLSLImm(OperandVector & O)332 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
333 return parsePKHImm(O, "lsl", 0, 31);
334 }
parsePKHASRImm(OperandVector & O)335 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
336 return parsePKHImm(O, "asr", 1, 32);
337 }
338 OperandMatchResultTy parseSetEndImm(OperandVector &);
339 OperandMatchResultTy parseShifterImm(OperandVector &);
340 OperandMatchResultTy parseRotImm(OperandVector &);
341 OperandMatchResultTy parseModImm(OperandVector &);
342 OperandMatchResultTy parseBitfield(OperandVector &);
343 OperandMatchResultTy parsePostIdxReg(OperandVector &);
344 OperandMatchResultTy parseAM3Offset(OperandVector &);
345 OperandMatchResultTy parseFPImm(OperandVector &);
346 OperandMatchResultTy parseVectorList(OperandVector &);
347 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
348 SMLoc &EndLoc);
349
350 // Asm Match Converter Methods
351 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
352 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
353
354 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
355 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
356 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
357 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
358
359 public:
360 enum ARMMatchResultTy {
361 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
362 Match_RequiresNotITBlock,
363 Match_RequiresV6,
364 Match_RequiresThumb2,
365 Match_RequiresV8,
366 #define GET_OPERAND_DIAGNOSTIC_TYPES
367 #include "ARMGenAsmMatcher.inc"
368
369 };
370
ARMAsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)371 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
372 const MCInstrInfo &MII, const MCTargetOptions &Options)
373 : MCTargetAsmParser(Options, STI), MII(MII), UC(Parser) {
374 MCAsmParserExtension::Initialize(Parser);
375
376 // Cache the MCRegisterInfo.
377 MRI = getContext().getRegisterInfo();
378
379 // Initialize the set of available features.
380 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
381
382 // Not in an ITBlock to start with.
383 ITState.CurPosition = ~0U;
384
385 NextSymbolIsThumb = false;
386 }
387
388 // Implementation of the MCTargetAsmParser interface:
389 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
390 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
391 SMLoc NameLoc, OperandVector &Operands) override;
392 bool ParseDirective(AsmToken DirectiveID) override;
393
394 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
395 unsigned Kind) override;
396 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
397
398 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
399 OperandVector &Operands, MCStreamer &Out,
400 uint64_t &ErrorInfo,
401 bool MatchingInlineAsm) override;
402 void onLabelParsed(MCSymbol *Symbol) override;
403 };
404 } // end anonymous namespace
405
406 namespace {
407
408 /// ARMOperand - Instances of this class represent a parsed ARM machine
409 /// operand.
410 class ARMOperand : public MCParsedAsmOperand {
411 enum KindTy {
412 k_CondCode,
413 k_CCOut,
414 k_ITCondMask,
415 k_CoprocNum,
416 k_CoprocReg,
417 k_CoprocOption,
418 k_Immediate,
419 k_MemBarrierOpt,
420 k_InstSyncBarrierOpt,
421 k_Memory,
422 k_PostIndexRegister,
423 k_MSRMask,
424 k_BankedReg,
425 k_ProcIFlags,
426 k_VectorIndex,
427 k_Register,
428 k_RegisterList,
429 k_DPRRegisterList,
430 k_SPRRegisterList,
431 k_VectorList,
432 k_VectorListAllLanes,
433 k_VectorListIndexed,
434 k_ShiftedRegister,
435 k_ShiftedImmediate,
436 k_ShifterImmediate,
437 k_RotateImmediate,
438 k_ModifiedImmediate,
439 k_ConstantPoolImmediate,
440 k_BitfieldDescriptor,
441 k_Token,
442 } Kind;
443
444 SMLoc StartLoc, EndLoc, AlignmentLoc;
445 SmallVector<unsigned, 8> Registers;
446
447 struct CCOp {
448 ARMCC::CondCodes Val;
449 };
450
451 struct CopOp {
452 unsigned Val;
453 };
454
455 struct CoprocOptionOp {
456 unsigned Val;
457 };
458
459 struct ITMaskOp {
460 unsigned Mask:4;
461 };
462
463 struct MBOptOp {
464 ARM_MB::MemBOpt Val;
465 };
466
467 struct ISBOptOp {
468 ARM_ISB::InstSyncBOpt Val;
469 };
470
471 struct IFlagsOp {
472 ARM_PROC::IFlags Val;
473 };
474
475 struct MMaskOp {
476 unsigned Val;
477 };
478
479 struct BankedRegOp {
480 unsigned Val;
481 };
482
483 struct TokOp {
484 const char *Data;
485 unsigned Length;
486 };
487
488 struct RegOp {
489 unsigned RegNum;
490 };
491
492 // A vector register list is a sequential list of 1 to 4 registers.
493 struct VectorListOp {
494 unsigned RegNum;
495 unsigned Count;
496 unsigned LaneIndex;
497 bool isDoubleSpaced;
498 };
499
500 struct VectorIndexOp {
501 unsigned Val;
502 };
503
504 struct ImmOp {
505 const MCExpr *Val;
506 };
507
508 /// Combined record for all forms of ARM address expressions.
509 struct MemoryOp {
510 unsigned BaseRegNum;
511 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
512 // was specified.
513 const MCConstantExpr *OffsetImm; // Offset immediate value
514 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
515 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
516 unsigned ShiftImm; // shift for OffsetReg.
517 unsigned Alignment; // 0 = no alignment specified
518 // n = alignment in bytes (2, 4, 8, 16, or 32)
519 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
520 };
521
522 struct PostIdxRegOp {
523 unsigned RegNum;
524 bool isAdd;
525 ARM_AM::ShiftOpc ShiftTy;
526 unsigned ShiftImm;
527 };
528
529 struct ShifterImmOp {
530 bool isASR;
531 unsigned Imm;
532 };
533
534 struct RegShiftedRegOp {
535 ARM_AM::ShiftOpc ShiftTy;
536 unsigned SrcReg;
537 unsigned ShiftReg;
538 unsigned ShiftImm;
539 };
540
541 struct RegShiftedImmOp {
542 ARM_AM::ShiftOpc ShiftTy;
543 unsigned SrcReg;
544 unsigned ShiftImm;
545 };
546
547 struct RotImmOp {
548 unsigned Imm;
549 };
550
551 struct ModImmOp {
552 unsigned Bits;
553 unsigned Rot;
554 };
555
556 struct BitfieldOp {
557 unsigned LSB;
558 unsigned Width;
559 };
560
561 union {
562 struct CCOp CC;
563 struct CopOp Cop;
564 struct CoprocOptionOp CoprocOption;
565 struct MBOptOp MBOpt;
566 struct ISBOptOp ISBOpt;
567 struct ITMaskOp ITMask;
568 struct IFlagsOp IFlags;
569 struct MMaskOp MMask;
570 struct BankedRegOp BankedReg;
571 struct TokOp Tok;
572 struct RegOp Reg;
573 struct VectorListOp VectorList;
574 struct VectorIndexOp VectorIndex;
575 struct ImmOp Imm;
576 struct MemoryOp Memory;
577 struct PostIdxRegOp PostIdxReg;
578 struct ShifterImmOp ShifterImm;
579 struct RegShiftedRegOp RegShiftedReg;
580 struct RegShiftedImmOp RegShiftedImm;
581 struct RotImmOp RotImm;
582 struct ModImmOp ModImm;
583 struct BitfieldOp Bitfield;
584 };
585
586 public:
ARMOperand(KindTy K)587 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
588
589 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const590 SMLoc getStartLoc() const override { return StartLoc; }
591 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const592 SMLoc getEndLoc() const override { return EndLoc; }
593 /// getLocRange - Get the range between the first and last token of this
594 /// operand.
getLocRange() const595 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
596
597 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
getAlignmentLoc() const598 SMLoc getAlignmentLoc() const {
599 assert(Kind == k_Memory && "Invalid access!");
600 return AlignmentLoc;
601 }
602
getCondCode() const603 ARMCC::CondCodes getCondCode() const {
604 assert(Kind == k_CondCode && "Invalid access!");
605 return CC.Val;
606 }
607
getCoproc() const608 unsigned getCoproc() const {
609 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
610 return Cop.Val;
611 }
612
getToken() const613 StringRef getToken() const {
614 assert(Kind == k_Token && "Invalid access!");
615 return StringRef(Tok.Data, Tok.Length);
616 }
617
getReg() const618 unsigned getReg() const override {
619 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
620 return Reg.RegNum;
621 }
622
getRegList() const623 const SmallVectorImpl<unsigned> &getRegList() const {
624 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
625 Kind == k_SPRRegisterList) && "Invalid access!");
626 return Registers;
627 }
628
getImm() const629 const MCExpr *getImm() const {
630 assert(isImm() && "Invalid access!");
631 return Imm.Val;
632 }
633
getConstantPoolImm() const634 const MCExpr *getConstantPoolImm() const {
635 assert(isConstantPoolImm() && "Invalid access!");
636 return Imm.Val;
637 }
638
getVectorIndex() const639 unsigned getVectorIndex() const {
640 assert(Kind == k_VectorIndex && "Invalid access!");
641 return VectorIndex.Val;
642 }
643
getMemBarrierOpt() const644 ARM_MB::MemBOpt getMemBarrierOpt() const {
645 assert(Kind == k_MemBarrierOpt && "Invalid access!");
646 return MBOpt.Val;
647 }
648
getInstSyncBarrierOpt() const649 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
650 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
651 return ISBOpt.Val;
652 }
653
getProcIFlags() const654 ARM_PROC::IFlags getProcIFlags() const {
655 assert(Kind == k_ProcIFlags && "Invalid access!");
656 return IFlags.Val;
657 }
658
getMSRMask() const659 unsigned getMSRMask() const {
660 assert(Kind == k_MSRMask && "Invalid access!");
661 return MMask.Val;
662 }
663
getBankedReg() const664 unsigned getBankedReg() const {
665 assert(Kind == k_BankedReg && "Invalid access!");
666 return BankedReg.Val;
667 }
668
isCoprocNum() const669 bool isCoprocNum() const { return Kind == k_CoprocNum; }
isCoprocReg() const670 bool isCoprocReg() const { return Kind == k_CoprocReg; }
isCoprocOption() const671 bool isCoprocOption() const { return Kind == k_CoprocOption; }
isCondCode() const672 bool isCondCode() const { return Kind == k_CondCode; }
isCCOut() const673 bool isCCOut() const { return Kind == k_CCOut; }
isITMask() const674 bool isITMask() const { return Kind == k_ITCondMask; }
isITCondCode() const675 bool isITCondCode() const { return Kind == k_CondCode; }
isImm() const676 bool isImm() const override {
677 return Kind == k_Immediate;
678 }
679
isARMBranchTarget() const680 bool isARMBranchTarget() const {
681 if (!isImm()) return false;
682
683 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
684 return CE->getValue() % 4 == 0;
685 return true;
686 }
687
688
isThumbBranchTarget() const689 bool isThumbBranchTarget() const {
690 if (!isImm()) return false;
691
692 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
693 return CE->getValue() % 2 == 0;
694 return true;
695 }
696
697 // checks whether this operand is an unsigned offset which fits is a field
698 // of specified width and scaled by a specific number of bits
699 template<unsigned width, unsigned scale>
isUnsignedOffset() const700 bool isUnsignedOffset() const {
701 if (!isImm()) return false;
702 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
703 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
704 int64_t Val = CE->getValue();
705 int64_t Align = 1LL << scale;
706 int64_t Max = Align * ((1LL << width) - 1);
707 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
708 }
709 return false;
710 }
711 // checks whether this operand is an signed offset which fits is a field
712 // of specified width and scaled by a specific number of bits
713 template<unsigned width, unsigned scale>
isSignedOffset() const714 bool isSignedOffset() const {
715 if (!isImm()) return false;
716 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
717 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
718 int64_t Val = CE->getValue();
719 int64_t Align = 1LL << scale;
720 int64_t Max = Align * ((1LL << (width-1)) - 1);
721 int64_t Min = -Align * (1LL << (width-1));
722 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
723 }
724 return false;
725 }
726
727 // checks whether this operand is a memory operand computed as an offset
728 // applied to PC. the offset may have 8 bits of magnitude and is represented
729 // with two bits of shift. textually it may be either [pc, #imm], #imm or
730 // relocable expression...
isThumbMemPC() const731 bool isThumbMemPC() const {
732 int64_t Val = 0;
733 if (isImm()) {
734 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
735 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
736 if (!CE) return false;
737 Val = CE->getValue();
738 }
739 else if (isMem()) {
740 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
741 if(Memory.BaseRegNum != ARM::PC) return false;
742 Val = Memory.OffsetImm->getValue();
743 }
744 else return false;
745 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
746 }
isFPImm() const747 bool isFPImm() const {
748 if (!isImm()) return false;
749 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750 if (!CE) return false;
751 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
752 return Val != -1;
753 }
isFBits16() const754 bool isFBits16() const {
755 if (!isImm()) return false;
756 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
757 if (!CE) return false;
758 int64_t Value = CE->getValue();
759 return Value >= 0 && Value <= 16;
760 }
isFBits32() const761 bool isFBits32() const {
762 if (!isImm()) return false;
763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764 if (!CE) return false;
765 int64_t Value = CE->getValue();
766 return Value >= 1 && Value <= 32;
767 }
isImm8s4() const768 bool isImm8s4() const {
769 if (!isImm()) return false;
770 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771 if (!CE) return false;
772 int64_t Value = CE->getValue();
773 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
774 }
isImm0_1020s4() const775 bool isImm0_1020s4() const {
776 if (!isImm()) return false;
777 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778 if (!CE) return false;
779 int64_t Value = CE->getValue();
780 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
781 }
isImm0_508s4() const782 bool isImm0_508s4() const {
783 if (!isImm()) return false;
784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785 if (!CE) return false;
786 int64_t Value = CE->getValue();
787 return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
788 }
isImm0_508s4Neg() const789 bool isImm0_508s4Neg() const {
790 if (!isImm()) return false;
791 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792 if (!CE) return false;
793 int64_t Value = -CE->getValue();
794 // explicitly exclude zero. we want that to use the normal 0_508 version.
795 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
796 }
isImm0_239() const797 bool isImm0_239() const {
798 if (!isImm()) return false;
799 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800 if (!CE) return false;
801 int64_t Value = CE->getValue();
802 return Value >= 0 && Value < 240;
803 }
isImm0_255() const804 bool isImm0_255() const {
805 if (!isImm()) return false;
806 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807 if (!CE) return false;
808 int64_t Value = CE->getValue();
809 return Value >= 0 && Value < 256;
810 }
isImm0_4095() const811 bool isImm0_4095() const {
812 if (!isImm()) return false;
813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814 if (!CE) return false;
815 int64_t Value = CE->getValue();
816 return Value >= 0 && Value < 4096;
817 }
isImm0_4095Neg() const818 bool isImm0_4095Neg() const {
819 if (!isImm()) return false;
820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821 if (!CE) return false;
822 int64_t Value = -CE->getValue();
823 return Value > 0 && Value < 4096;
824 }
isImm0_1() const825 bool isImm0_1() const {
826 if (!isImm()) return false;
827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828 if (!CE) return false;
829 int64_t Value = CE->getValue();
830 return Value >= 0 && Value < 2;
831 }
isImm0_3() const832 bool isImm0_3() const {
833 if (!isImm()) return false;
834 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
835 if (!CE) return false;
836 int64_t Value = CE->getValue();
837 return Value >= 0 && Value < 4;
838 }
isImm0_7() const839 bool isImm0_7() const {
840 if (!isImm()) return false;
841 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
842 if (!CE) return false;
843 int64_t Value = CE->getValue();
844 return Value >= 0 && Value < 8;
845 }
isImm0_15() const846 bool isImm0_15() const {
847 if (!isImm()) return false;
848 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
849 if (!CE) return false;
850 int64_t Value = CE->getValue();
851 return Value >= 0 && Value < 16;
852 }
isImm0_31() const853 bool isImm0_31() const {
854 if (!isImm()) return false;
855 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
856 if (!CE) return false;
857 int64_t Value = CE->getValue();
858 return Value >= 0 && Value < 32;
859 }
isImm0_63() const860 bool isImm0_63() const {
861 if (!isImm()) return false;
862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
863 if (!CE) return false;
864 int64_t Value = CE->getValue();
865 return Value >= 0 && Value < 64;
866 }
isImm8() const867 bool isImm8() const {
868 if (!isImm()) return false;
869 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
870 if (!CE) return false;
871 int64_t Value = CE->getValue();
872 return Value == 8;
873 }
isImm16() const874 bool isImm16() const {
875 if (!isImm()) return false;
876 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
877 if (!CE) return false;
878 int64_t Value = CE->getValue();
879 return Value == 16;
880 }
isImm32() const881 bool isImm32() const {
882 if (!isImm()) return false;
883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
884 if (!CE) return false;
885 int64_t Value = CE->getValue();
886 return Value == 32;
887 }
isShrImm8() const888 bool isShrImm8() const {
889 if (!isImm()) return false;
890 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
891 if (!CE) return false;
892 int64_t Value = CE->getValue();
893 return Value > 0 && Value <= 8;
894 }
isShrImm16() const895 bool isShrImm16() const {
896 if (!isImm()) return false;
897 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
898 if (!CE) return false;
899 int64_t Value = CE->getValue();
900 return Value > 0 && Value <= 16;
901 }
isShrImm32() const902 bool isShrImm32() const {
903 if (!isImm()) return false;
904 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
905 if (!CE) return false;
906 int64_t Value = CE->getValue();
907 return Value > 0 && Value <= 32;
908 }
isShrImm64() const909 bool isShrImm64() const {
910 if (!isImm()) return false;
911 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
912 if (!CE) return false;
913 int64_t Value = CE->getValue();
914 return Value > 0 && Value <= 64;
915 }
isImm1_7() const916 bool isImm1_7() const {
917 if (!isImm()) return false;
918 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
919 if (!CE) return false;
920 int64_t Value = CE->getValue();
921 return Value > 0 && Value < 8;
922 }
isImm1_15() const923 bool isImm1_15() const {
924 if (!isImm()) return false;
925 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926 if (!CE) return false;
927 int64_t Value = CE->getValue();
928 return Value > 0 && Value < 16;
929 }
isImm1_31() const930 bool isImm1_31() const {
931 if (!isImm()) return false;
932 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
933 if (!CE) return false;
934 int64_t Value = CE->getValue();
935 return Value > 0 && Value < 32;
936 }
isImm1_16() const937 bool isImm1_16() const {
938 if (!isImm()) return false;
939 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
940 if (!CE) return false;
941 int64_t Value = CE->getValue();
942 return Value > 0 && Value < 17;
943 }
isImm1_32() const944 bool isImm1_32() const {
945 if (!isImm()) return false;
946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
947 if (!CE) return false;
948 int64_t Value = CE->getValue();
949 return Value > 0 && Value < 33;
950 }
isImm0_32() const951 bool isImm0_32() const {
952 if (!isImm()) return false;
953 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
954 if (!CE) return false;
955 int64_t Value = CE->getValue();
956 return Value >= 0 && Value < 33;
957 }
isImm0_65535() const958 bool isImm0_65535() const {
959 if (!isImm()) return false;
960 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
961 if (!CE) return false;
962 int64_t Value = CE->getValue();
963 return Value >= 0 && Value < 65536;
964 }
isImm256_65535Expr() const965 bool isImm256_65535Expr() const {
966 if (!isImm()) return false;
967 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
968 // If it's not a constant expression, it'll generate a fixup and be
969 // handled later.
970 if (!CE) return true;
971 int64_t Value = CE->getValue();
972 return Value >= 256 && Value < 65536;
973 }
isImm0_65535Expr() const974 bool isImm0_65535Expr() const {
975 if (!isImm()) return false;
976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
977 // If it's not a constant expression, it'll generate a fixup and be
978 // handled later.
979 if (!CE) return true;
980 int64_t Value = CE->getValue();
981 return Value >= 0 && Value < 65536;
982 }
isImm24bit() const983 bool isImm24bit() const {
984 if (!isImm()) return false;
985 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
986 if (!CE) return false;
987 int64_t Value = CE->getValue();
988 return Value >= 0 && Value <= 0xffffff;
989 }
isImmThumbSR() const990 bool isImmThumbSR() const {
991 if (!isImm()) return false;
992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
993 if (!CE) return false;
994 int64_t Value = CE->getValue();
995 return Value > 0 && Value < 33;
996 }
isPKHLSLImm() const997 bool isPKHLSLImm() const {
998 if (!isImm()) return false;
999 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1000 if (!CE) return false;
1001 int64_t Value = CE->getValue();
1002 return Value >= 0 && Value < 32;
1003 }
isPKHASRImm() const1004 bool isPKHASRImm() const {
1005 if (!isImm()) return false;
1006 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1007 if (!CE) return false;
1008 int64_t Value = CE->getValue();
1009 return Value > 0 && Value <= 32;
1010 }
isAdrLabel() const1011 bool isAdrLabel() const {
1012 // If we have an immediate that's not a constant, treat it as a label
1013 // reference needing a fixup.
1014 if (isImm() && !isa<MCConstantExpr>(getImm()))
1015 return true;
1016
1017 // If it is a constant, it must fit into a modified immediate encoding.
1018 if (!isImm()) return false;
1019 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1020 if (!CE) return false;
1021 int64_t Value = CE->getValue();
1022 return (ARM_AM::getSOImmVal(Value) != -1 ||
1023 ARM_AM::getSOImmVal(-Value) != -1);
1024 }
isT2SOImm() const1025 bool isT2SOImm() const {
1026 if (!isImm()) return false;
1027 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1028 if (!CE) return false;
1029 int64_t Value = CE->getValue();
1030 return ARM_AM::getT2SOImmVal(Value) != -1;
1031 }
isT2SOImmNot() const1032 bool isT2SOImmNot() const {
1033 if (!isImm()) return false;
1034 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1035 if (!CE) return false;
1036 int64_t Value = CE->getValue();
1037 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1038 ARM_AM::getT2SOImmVal(~Value) != -1;
1039 }
isT2SOImmNeg() const1040 bool isT2SOImmNeg() const {
1041 if (!isImm()) return false;
1042 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1043 if (!CE) return false;
1044 int64_t Value = CE->getValue();
1045 // Only use this when not representable as a plain so_imm.
1046 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1047 ARM_AM::getT2SOImmVal(-Value) != -1;
1048 }
isSetEndImm() const1049 bool isSetEndImm() const {
1050 if (!isImm()) return false;
1051 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1052 if (!CE) return false;
1053 int64_t Value = CE->getValue();
1054 return Value == 1 || Value == 0;
1055 }
isReg() const1056 bool isReg() const override { return Kind == k_Register; }
isRegList() const1057 bool isRegList() const { return Kind == k_RegisterList; }
isDPRRegList() const1058 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
isSPRRegList() const1059 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
isToken() const1060 bool isToken() const override { return Kind == k_Token; }
isMemBarrierOpt() const1061 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
isInstSyncBarrierOpt() const1062 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
isMem() const1063 bool isMem() const override { return Kind == k_Memory; }
isShifterImm() const1064 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
isRegShiftedReg() const1065 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
isRegShiftedImm() const1066 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
isRotImm() const1067 bool isRotImm() const { return Kind == k_RotateImmediate; }
isModImm() const1068 bool isModImm() const { return Kind == k_ModifiedImmediate; }
isModImmNot() const1069 bool isModImmNot() const {
1070 if (!isImm()) return false;
1071 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1072 if (!CE) return false;
1073 int64_t Value = CE->getValue();
1074 return ARM_AM::getSOImmVal(~Value) != -1;
1075 }
isModImmNeg() const1076 bool isModImmNeg() const {
1077 if (!isImm()) return false;
1078 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1079 if (!CE) return false;
1080 int64_t Value = CE->getValue();
1081 return ARM_AM::getSOImmVal(Value) == -1 &&
1082 ARM_AM::getSOImmVal(-Value) != -1;
1083 }
isConstantPoolImm() const1084 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
isBitfield() const1085 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
isPostIdxRegShifted() const1086 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
isPostIdxReg() const1087 bool isPostIdxReg() const {
1088 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1089 }
isMemNoOffset(bool alignOK=false,unsigned Alignment=0) const1090 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1091 if (!isMem())
1092 return false;
1093 // No offset of any kind.
1094 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1095 (alignOK || Memory.Alignment == Alignment);
1096 }
isMemPCRelImm12() const1097 bool isMemPCRelImm12() const {
1098 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1099 return false;
1100 // Base register must be PC.
1101 if (Memory.BaseRegNum != ARM::PC)
1102 return false;
1103 // Immediate offset in range [-4095, 4095].
1104 if (!Memory.OffsetImm) return true;
1105 int64_t Val = Memory.OffsetImm->getValue();
1106 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1107 }
isAlignedMemory() const1108 bool isAlignedMemory() const {
1109 return isMemNoOffset(true);
1110 }
isAlignedMemoryNone() const1111 bool isAlignedMemoryNone() const {
1112 return isMemNoOffset(false, 0);
1113 }
isDupAlignedMemoryNone() const1114 bool isDupAlignedMemoryNone() const {
1115 return isMemNoOffset(false, 0);
1116 }
isAlignedMemory16() const1117 bool isAlignedMemory16() const {
1118 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1119 return true;
1120 return isMemNoOffset(false, 0);
1121 }
isDupAlignedMemory16() const1122 bool isDupAlignedMemory16() const {
1123 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1124 return true;
1125 return isMemNoOffset(false, 0);
1126 }
isAlignedMemory32() const1127 bool isAlignedMemory32() const {
1128 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1129 return true;
1130 return isMemNoOffset(false, 0);
1131 }
isDupAlignedMemory32() const1132 bool isDupAlignedMemory32() const {
1133 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1134 return true;
1135 return isMemNoOffset(false, 0);
1136 }
isAlignedMemory64() const1137 bool isAlignedMemory64() const {
1138 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1139 return true;
1140 return isMemNoOffset(false, 0);
1141 }
isDupAlignedMemory64() const1142 bool isDupAlignedMemory64() const {
1143 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1144 return true;
1145 return isMemNoOffset(false, 0);
1146 }
isAlignedMemory64or128() const1147 bool isAlignedMemory64or128() const {
1148 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1149 return true;
1150 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1151 return true;
1152 return isMemNoOffset(false, 0);
1153 }
isDupAlignedMemory64or128() const1154 bool isDupAlignedMemory64or128() const {
1155 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1156 return true;
1157 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1158 return true;
1159 return isMemNoOffset(false, 0);
1160 }
isAlignedMemory64or128or256() const1161 bool isAlignedMemory64or128or256() const {
1162 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1163 return true;
1164 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1165 return true;
1166 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1167 return true;
1168 return isMemNoOffset(false, 0);
1169 }
isAddrMode2() const1170 bool isAddrMode2() const {
1171 if (!isMem() || Memory.Alignment != 0) return false;
1172 // Check for register offset.
1173 if (Memory.OffsetRegNum) return true;
1174 // Immediate offset in range [-4095, 4095].
1175 if (!Memory.OffsetImm) return true;
1176 int64_t Val = Memory.OffsetImm->getValue();
1177 return Val > -4096 && Val < 4096;
1178 }
isAM2OffsetImm() const1179 bool isAM2OffsetImm() const {
1180 if (!isImm()) return false;
1181 // Immediate offset in range [-4095, 4095].
1182 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1183 if (!CE) return false;
1184 int64_t Val = CE->getValue();
1185 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1186 }
isAddrMode3() const1187 bool isAddrMode3() const {
1188 // If we have an immediate that's not a constant, treat it as a label
1189 // reference needing a fixup. If it is a constant, it's something else
1190 // and we reject it.
1191 if (isImm() && !isa<MCConstantExpr>(getImm()))
1192 return true;
1193 if (!isMem() || Memory.Alignment != 0) return false;
1194 // No shifts are legal for AM3.
1195 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1196 // Check for register offset.
1197 if (Memory.OffsetRegNum) return true;
1198 // Immediate offset in range [-255, 255].
1199 if (!Memory.OffsetImm) return true;
1200 int64_t Val = Memory.OffsetImm->getValue();
1201 // The #-0 offset is encoded as INT32_MIN, and we have to check
1202 // for this too.
1203 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1204 }
isAM3Offset() const1205 bool isAM3Offset() const {
1206 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1207 return false;
1208 if (Kind == k_PostIndexRegister)
1209 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1210 // Immediate offset in range [-255, 255].
1211 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1212 if (!CE) return false;
1213 int64_t Val = CE->getValue();
1214 // Special case, #-0 is INT32_MIN.
1215 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1216 }
isAddrMode5() const1217 bool isAddrMode5() const {
1218 // If we have an immediate that's not a constant, treat it as a label
1219 // reference needing a fixup. If it is a constant, it's something else
1220 // and we reject it.
1221 if (isImm() && !isa<MCConstantExpr>(getImm()))
1222 return true;
1223 if (!isMem() || Memory.Alignment != 0) return false;
1224 // Check for register offset.
1225 if (Memory.OffsetRegNum) return false;
1226 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1227 if (!Memory.OffsetImm) return true;
1228 int64_t Val = Memory.OffsetImm->getValue();
1229 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1230 Val == INT32_MIN;
1231 }
isAddrMode5FP16() const1232 bool isAddrMode5FP16() const {
1233 // If we have an immediate that's not a constant, treat it as a label
1234 // reference needing a fixup. If it is a constant, it's something else
1235 // and we reject it.
1236 if (isImm() && !isa<MCConstantExpr>(getImm()))
1237 return true;
1238 if (!isMem() || Memory.Alignment != 0) return false;
1239 // Check for register offset.
1240 if (Memory.OffsetRegNum) return false;
1241 // Immediate offset in range [-510, 510] and a multiple of 2.
1242 if (!Memory.OffsetImm) return true;
1243 int64_t Val = Memory.OffsetImm->getValue();
1244 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN;
1245 }
isMemTBB() const1246 bool isMemTBB() const {
1247 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1248 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1249 return false;
1250 return true;
1251 }
isMemTBH() const1252 bool isMemTBH() const {
1253 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1254 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1255 Memory.Alignment != 0 )
1256 return false;
1257 return true;
1258 }
isMemRegOffset() const1259 bool isMemRegOffset() const {
1260 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1261 return false;
1262 return true;
1263 }
isT2MemRegOffset() const1264 bool isT2MemRegOffset() const {
1265 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1266 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1267 return false;
1268 // Only lsl #{0, 1, 2, 3} allowed.
1269 if (Memory.ShiftType == ARM_AM::no_shift)
1270 return true;
1271 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1272 return false;
1273 return true;
1274 }
isMemThumbRR() const1275 bool isMemThumbRR() const {
1276 // Thumb reg+reg addressing is simple. Just two registers, a base and
1277 // an offset. No shifts, negations or any other complicating factors.
1278 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1279 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1280 return false;
1281 return isARMLowRegister(Memory.BaseRegNum) &&
1282 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1283 }
isMemThumbRIs4() const1284 bool isMemThumbRIs4() const {
1285 if (!isMem() || Memory.OffsetRegNum != 0 ||
1286 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1287 return false;
1288 // Immediate offset, multiple of 4 in range [0, 124].
1289 if (!Memory.OffsetImm) return true;
1290 int64_t Val = Memory.OffsetImm->getValue();
1291 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1292 }
isMemThumbRIs2() const1293 bool isMemThumbRIs2() const {
1294 if (!isMem() || Memory.OffsetRegNum != 0 ||
1295 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1296 return false;
1297 // Immediate offset, multiple of 4 in range [0, 62].
1298 if (!Memory.OffsetImm) return true;
1299 int64_t Val = Memory.OffsetImm->getValue();
1300 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1301 }
isMemThumbRIs1() const1302 bool isMemThumbRIs1() const {
1303 if (!isMem() || Memory.OffsetRegNum != 0 ||
1304 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1305 return false;
1306 // Immediate offset in range [0, 31].
1307 if (!Memory.OffsetImm) return true;
1308 int64_t Val = Memory.OffsetImm->getValue();
1309 return Val >= 0 && Val <= 31;
1310 }
isMemThumbSPI() const1311 bool isMemThumbSPI() const {
1312 if (!isMem() || Memory.OffsetRegNum != 0 ||
1313 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1314 return false;
1315 // Immediate offset, multiple of 4 in range [0, 1020].
1316 if (!Memory.OffsetImm) return true;
1317 int64_t Val = Memory.OffsetImm->getValue();
1318 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1319 }
isMemImm8s4Offset() const1320 bool isMemImm8s4Offset() const {
1321 // If we have an immediate that's not a constant, treat it as a label
1322 // reference needing a fixup. If it is a constant, it's something else
1323 // and we reject it.
1324 if (isImm() && !isa<MCConstantExpr>(getImm()))
1325 return true;
1326 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1327 return false;
1328 // Immediate offset a multiple of 4 in range [-1020, 1020].
1329 if (!Memory.OffsetImm) return true;
1330 int64_t Val = Memory.OffsetImm->getValue();
1331 // Special case, #-0 is INT32_MIN.
1332 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1333 }
isMemImm0_1020s4Offset() const1334 bool isMemImm0_1020s4Offset() const {
1335 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1336 return false;
1337 // Immediate offset a multiple of 4 in range [0, 1020].
1338 if (!Memory.OffsetImm) return true;
1339 int64_t Val = Memory.OffsetImm->getValue();
1340 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1341 }
isMemImm8Offset() const1342 bool isMemImm8Offset() const {
1343 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1344 return false;
1345 // Base reg of PC isn't allowed for these encodings.
1346 if (Memory.BaseRegNum == ARM::PC) return false;
1347 // Immediate offset in range [-255, 255].
1348 if (!Memory.OffsetImm) return true;
1349 int64_t Val = Memory.OffsetImm->getValue();
1350 return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1351 }
isMemPosImm8Offset() const1352 bool isMemPosImm8Offset() const {
1353 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1354 return false;
1355 // Immediate offset in range [0, 255].
1356 if (!Memory.OffsetImm) return true;
1357 int64_t Val = Memory.OffsetImm->getValue();
1358 return Val >= 0 && Val < 256;
1359 }
isMemNegImm8Offset() const1360 bool isMemNegImm8Offset() const {
1361 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1362 return false;
1363 // Base reg of PC isn't allowed for these encodings.
1364 if (Memory.BaseRegNum == ARM::PC) return false;
1365 // Immediate offset in range [-255, -1].
1366 if (!Memory.OffsetImm) return false;
1367 int64_t Val = Memory.OffsetImm->getValue();
1368 return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1369 }
isMemUImm12Offset() const1370 bool isMemUImm12Offset() const {
1371 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1372 return false;
1373 // Immediate offset in range [0, 4095].
1374 if (!Memory.OffsetImm) return true;
1375 int64_t Val = Memory.OffsetImm->getValue();
1376 return (Val >= 0 && Val < 4096);
1377 }
isMemImm12Offset() const1378 bool isMemImm12Offset() const {
1379 // If we have an immediate that's not a constant, treat it as a label
1380 // reference needing a fixup. If it is a constant, it's something else
1381 // and we reject it.
1382
1383 if (isImm() && !isa<MCConstantExpr>(getImm()))
1384 return true;
1385
1386 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1387 return false;
1388 // Immediate offset in range [-4095, 4095].
1389 if (!Memory.OffsetImm) return true;
1390 int64_t Val = Memory.OffsetImm->getValue();
1391 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1392 }
isConstPoolAsmImm() const1393 bool isConstPoolAsmImm() const {
1394 // Delay processing of Constant Pool Immediate, this will turn into
1395 // a constant. Match no other operand
1396 return (isConstantPoolImm());
1397 }
isPostIdxImm8() const1398 bool isPostIdxImm8() const {
1399 if (!isImm()) return false;
1400 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1401 if (!CE) return false;
1402 int64_t Val = CE->getValue();
1403 return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1404 }
isPostIdxImm8s4() const1405 bool isPostIdxImm8s4() const {
1406 if (!isImm()) return false;
1407 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1408 if (!CE) return false;
1409 int64_t Val = CE->getValue();
1410 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1411 (Val == INT32_MIN);
1412 }
1413
isMSRMask() const1414 bool isMSRMask() const { return Kind == k_MSRMask; }
isBankedReg() const1415 bool isBankedReg() const { return Kind == k_BankedReg; }
isProcIFlags() const1416 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1417
1418 // NEON operands.
isSingleSpacedVectorList() const1419 bool isSingleSpacedVectorList() const {
1420 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1421 }
isDoubleSpacedVectorList() const1422 bool isDoubleSpacedVectorList() const {
1423 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1424 }
isVecListOneD() const1425 bool isVecListOneD() const {
1426 if (!isSingleSpacedVectorList()) return false;
1427 return VectorList.Count == 1;
1428 }
1429
isVecListDPair() const1430 bool isVecListDPair() const {
1431 if (!isSingleSpacedVectorList()) return false;
1432 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1433 .contains(VectorList.RegNum));
1434 }
1435
isVecListThreeD() const1436 bool isVecListThreeD() const {
1437 if (!isSingleSpacedVectorList()) return false;
1438 return VectorList.Count == 3;
1439 }
1440
isVecListFourD() const1441 bool isVecListFourD() const {
1442 if (!isSingleSpacedVectorList()) return false;
1443 return VectorList.Count == 4;
1444 }
1445
isVecListDPairSpaced() const1446 bool isVecListDPairSpaced() const {
1447 if (Kind != k_VectorList) return false;
1448 if (isSingleSpacedVectorList()) return false;
1449 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1450 .contains(VectorList.RegNum));
1451 }
1452
isVecListThreeQ() const1453 bool isVecListThreeQ() const {
1454 if (!isDoubleSpacedVectorList()) return false;
1455 return VectorList.Count == 3;
1456 }
1457
isVecListFourQ() const1458 bool isVecListFourQ() const {
1459 if (!isDoubleSpacedVectorList()) return false;
1460 return VectorList.Count == 4;
1461 }
1462
isSingleSpacedVectorAllLanes() const1463 bool isSingleSpacedVectorAllLanes() const {
1464 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1465 }
isDoubleSpacedVectorAllLanes() const1466 bool isDoubleSpacedVectorAllLanes() const {
1467 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1468 }
isVecListOneDAllLanes() const1469 bool isVecListOneDAllLanes() const {
1470 if (!isSingleSpacedVectorAllLanes()) return false;
1471 return VectorList.Count == 1;
1472 }
1473
isVecListDPairAllLanes() const1474 bool isVecListDPairAllLanes() const {
1475 if (!isSingleSpacedVectorAllLanes()) return false;
1476 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1477 .contains(VectorList.RegNum));
1478 }
1479
isVecListDPairSpacedAllLanes() const1480 bool isVecListDPairSpacedAllLanes() const {
1481 if (!isDoubleSpacedVectorAllLanes()) return false;
1482 return VectorList.Count == 2;
1483 }
1484
isVecListThreeDAllLanes() const1485 bool isVecListThreeDAllLanes() const {
1486 if (!isSingleSpacedVectorAllLanes()) return false;
1487 return VectorList.Count == 3;
1488 }
1489
isVecListThreeQAllLanes() const1490 bool isVecListThreeQAllLanes() const {
1491 if (!isDoubleSpacedVectorAllLanes()) return false;
1492 return VectorList.Count == 3;
1493 }
1494
isVecListFourDAllLanes() const1495 bool isVecListFourDAllLanes() const {
1496 if (!isSingleSpacedVectorAllLanes()) return false;
1497 return VectorList.Count == 4;
1498 }
1499
isVecListFourQAllLanes() const1500 bool isVecListFourQAllLanes() const {
1501 if (!isDoubleSpacedVectorAllLanes()) return false;
1502 return VectorList.Count == 4;
1503 }
1504
isSingleSpacedVectorIndexed() const1505 bool isSingleSpacedVectorIndexed() const {
1506 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1507 }
isDoubleSpacedVectorIndexed() const1508 bool isDoubleSpacedVectorIndexed() const {
1509 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1510 }
isVecListOneDByteIndexed() const1511 bool isVecListOneDByteIndexed() const {
1512 if (!isSingleSpacedVectorIndexed()) return false;
1513 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1514 }
1515
isVecListOneDHWordIndexed() const1516 bool isVecListOneDHWordIndexed() const {
1517 if (!isSingleSpacedVectorIndexed()) return false;
1518 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1519 }
1520
isVecListOneDWordIndexed() const1521 bool isVecListOneDWordIndexed() const {
1522 if (!isSingleSpacedVectorIndexed()) return false;
1523 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1524 }
1525
isVecListTwoDByteIndexed() const1526 bool isVecListTwoDByteIndexed() const {
1527 if (!isSingleSpacedVectorIndexed()) return false;
1528 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1529 }
1530
isVecListTwoDHWordIndexed() const1531 bool isVecListTwoDHWordIndexed() const {
1532 if (!isSingleSpacedVectorIndexed()) return false;
1533 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1534 }
1535
isVecListTwoQWordIndexed() const1536 bool isVecListTwoQWordIndexed() const {
1537 if (!isDoubleSpacedVectorIndexed()) return false;
1538 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1539 }
1540
isVecListTwoQHWordIndexed() const1541 bool isVecListTwoQHWordIndexed() const {
1542 if (!isDoubleSpacedVectorIndexed()) return false;
1543 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1544 }
1545
isVecListTwoDWordIndexed() const1546 bool isVecListTwoDWordIndexed() const {
1547 if (!isSingleSpacedVectorIndexed()) return false;
1548 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1549 }
1550
isVecListThreeDByteIndexed() const1551 bool isVecListThreeDByteIndexed() const {
1552 if (!isSingleSpacedVectorIndexed()) return false;
1553 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1554 }
1555
isVecListThreeDHWordIndexed() const1556 bool isVecListThreeDHWordIndexed() const {
1557 if (!isSingleSpacedVectorIndexed()) return false;
1558 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1559 }
1560
isVecListThreeQWordIndexed() const1561 bool isVecListThreeQWordIndexed() const {
1562 if (!isDoubleSpacedVectorIndexed()) return false;
1563 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1564 }
1565
isVecListThreeQHWordIndexed() const1566 bool isVecListThreeQHWordIndexed() const {
1567 if (!isDoubleSpacedVectorIndexed()) return false;
1568 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1569 }
1570
isVecListThreeDWordIndexed() const1571 bool isVecListThreeDWordIndexed() const {
1572 if (!isSingleSpacedVectorIndexed()) return false;
1573 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1574 }
1575
isVecListFourDByteIndexed() const1576 bool isVecListFourDByteIndexed() const {
1577 if (!isSingleSpacedVectorIndexed()) return false;
1578 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1579 }
1580
isVecListFourDHWordIndexed() const1581 bool isVecListFourDHWordIndexed() const {
1582 if (!isSingleSpacedVectorIndexed()) return false;
1583 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1584 }
1585
isVecListFourQWordIndexed() const1586 bool isVecListFourQWordIndexed() const {
1587 if (!isDoubleSpacedVectorIndexed()) return false;
1588 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1589 }
1590
isVecListFourQHWordIndexed() const1591 bool isVecListFourQHWordIndexed() const {
1592 if (!isDoubleSpacedVectorIndexed()) return false;
1593 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1594 }
1595
isVecListFourDWordIndexed() const1596 bool isVecListFourDWordIndexed() const {
1597 if (!isSingleSpacedVectorIndexed()) return false;
1598 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1599 }
1600
isVectorIndex8() const1601 bool isVectorIndex8() const {
1602 if (Kind != k_VectorIndex) return false;
1603 return VectorIndex.Val < 8;
1604 }
isVectorIndex16() const1605 bool isVectorIndex16() const {
1606 if (Kind != k_VectorIndex) return false;
1607 return VectorIndex.Val < 4;
1608 }
isVectorIndex32() const1609 bool isVectorIndex32() const {
1610 if (Kind != k_VectorIndex) return false;
1611 return VectorIndex.Val < 2;
1612 }
1613
isNEONi8splat() const1614 bool isNEONi8splat() const {
1615 if (!isImm()) return false;
1616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1617 // Must be a constant.
1618 if (!CE) return false;
1619 int64_t Value = CE->getValue();
1620 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1621 // value.
1622 return Value >= 0 && Value < 256;
1623 }
1624
isNEONi16splat() const1625 bool isNEONi16splat() const {
1626 if (isNEONByteReplicate(2))
1627 return false; // Leave that for bytes replication and forbid by default.
1628 if (!isImm())
1629 return false;
1630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1631 // Must be a constant.
1632 if (!CE) return false;
1633 unsigned Value = CE->getValue();
1634 return ARM_AM::isNEONi16splat(Value);
1635 }
1636
isNEONi16splatNot() const1637 bool isNEONi16splatNot() const {
1638 if (!isImm())
1639 return false;
1640 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1641 // Must be a constant.
1642 if (!CE) return false;
1643 unsigned Value = CE->getValue();
1644 return ARM_AM::isNEONi16splat(~Value & 0xffff);
1645 }
1646
isNEONi32splat() const1647 bool isNEONi32splat() const {
1648 if (isNEONByteReplicate(4))
1649 return false; // Leave that for bytes replication and forbid by default.
1650 if (!isImm())
1651 return false;
1652 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1653 // Must be a constant.
1654 if (!CE) return false;
1655 unsigned Value = CE->getValue();
1656 return ARM_AM::isNEONi32splat(Value);
1657 }
1658
isNEONi32splatNot() const1659 bool isNEONi32splatNot() const {
1660 if (!isImm())
1661 return false;
1662 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1663 // Must be a constant.
1664 if (!CE) return false;
1665 unsigned Value = CE->getValue();
1666 return ARM_AM::isNEONi32splat(~Value);
1667 }
1668
isNEONByteReplicate(unsigned NumBytes) const1669 bool isNEONByteReplicate(unsigned NumBytes) const {
1670 if (!isImm())
1671 return false;
1672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1673 // Must be a constant.
1674 if (!CE)
1675 return false;
1676 int64_t Value = CE->getValue();
1677 if (!Value)
1678 return false; // Don't bother with zero.
1679
1680 unsigned char B = Value & 0xff;
1681 for (unsigned i = 1; i < NumBytes; ++i) {
1682 Value >>= 8;
1683 if ((Value & 0xff) != B)
1684 return false;
1685 }
1686 return true;
1687 }
isNEONi16ByteReplicate() const1688 bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
isNEONi32ByteReplicate() const1689 bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
isNEONi32vmov() const1690 bool isNEONi32vmov() const {
1691 if (isNEONByteReplicate(4))
1692 return false; // Let it to be classified as byte-replicate case.
1693 if (!isImm())
1694 return false;
1695 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1696 // Must be a constant.
1697 if (!CE)
1698 return false;
1699 int64_t Value = CE->getValue();
1700 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1701 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1702 // FIXME: This is probably wrong and a copy and paste from previous example
1703 return (Value >= 0 && Value < 256) ||
1704 (Value >= 0x0100 && Value <= 0xff00) ||
1705 (Value >= 0x010000 && Value <= 0xff0000) ||
1706 (Value >= 0x01000000 && Value <= 0xff000000) ||
1707 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1708 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1709 }
isNEONi32vmovNeg() const1710 bool isNEONi32vmovNeg() const {
1711 if (!isImm()) return false;
1712 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1713 // Must be a constant.
1714 if (!CE) return false;
1715 int64_t Value = ~CE->getValue();
1716 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1717 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1718 // FIXME: This is probably wrong and a copy and paste from previous example
1719 return (Value >= 0 && Value < 256) ||
1720 (Value >= 0x0100 && Value <= 0xff00) ||
1721 (Value >= 0x010000 && Value <= 0xff0000) ||
1722 (Value >= 0x01000000 && Value <= 0xff000000) ||
1723 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1724 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1725 }
1726
isNEONi64splat() const1727 bool isNEONi64splat() const {
1728 if (!isImm()) return false;
1729 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1730 // Must be a constant.
1731 if (!CE) return false;
1732 uint64_t Value = CE->getValue();
1733 // i64 value with each byte being either 0 or 0xff.
1734 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1735 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1736 return true;
1737 }
1738
addExpr(MCInst & Inst,const MCExpr * Expr) const1739 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1740 // Add as immediates when possible. Null MCExpr = 0.
1741 if (!Expr)
1742 Inst.addOperand(MCOperand::createImm(0));
1743 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1744 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1745 else
1746 Inst.addOperand(MCOperand::createExpr(Expr));
1747 }
1748
addARMBranchTargetOperands(MCInst & Inst,unsigned N) const1749 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1750 assert(N == 1 && "Invalid number of operands!");
1751 addExpr(Inst, getImm());
1752 }
1753
addThumbBranchTargetOperands(MCInst & Inst,unsigned N) const1754 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1755 assert(N == 1 && "Invalid number of operands!");
1756 addExpr(Inst, getImm());
1757 }
1758
addCondCodeOperands(MCInst & Inst,unsigned N) const1759 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1760 assert(N == 2 && "Invalid number of operands!");
1761 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1762 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1763 Inst.addOperand(MCOperand::createReg(RegNum));
1764 }
1765
addCoprocNumOperands(MCInst & Inst,unsigned N) const1766 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!");
1768 Inst.addOperand(MCOperand::createImm(getCoproc()));
1769 }
1770
addCoprocRegOperands(MCInst & Inst,unsigned N) const1771 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1772 assert(N == 1 && "Invalid number of operands!");
1773 Inst.addOperand(MCOperand::createImm(getCoproc()));
1774 }
1775
addCoprocOptionOperands(MCInst & Inst,unsigned N) const1776 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1777 assert(N == 1 && "Invalid number of operands!");
1778 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1779 }
1780
addITMaskOperands(MCInst & Inst,unsigned N) const1781 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1782 assert(N == 1 && "Invalid number of operands!");
1783 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1784 }
1785
addITCondCodeOperands(MCInst & Inst,unsigned N) const1786 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1787 assert(N == 1 && "Invalid number of operands!");
1788 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1789 }
1790
addCCOutOperands(MCInst & Inst,unsigned N) const1791 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 1 && "Invalid number of operands!");
1793 Inst.addOperand(MCOperand::createReg(getReg()));
1794 }
1795
addRegOperands(MCInst & Inst,unsigned N) const1796 void addRegOperands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!");
1798 Inst.addOperand(MCOperand::createReg(getReg()));
1799 }
1800
addRegShiftedRegOperands(MCInst & Inst,unsigned N) const1801 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1802 assert(N == 3 && "Invalid number of operands!");
1803 assert(isRegShiftedReg() &&
1804 "addRegShiftedRegOperands() on non-RegShiftedReg!");
1805 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1806 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1807 Inst.addOperand(MCOperand::createImm(
1808 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1809 }
1810
addRegShiftedImmOperands(MCInst & Inst,unsigned N) const1811 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1812 assert(N == 2 && "Invalid number of operands!");
1813 assert(isRegShiftedImm() &&
1814 "addRegShiftedImmOperands() on non-RegShiftedImm!");
1815 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
1816 // Shift of #32 is encoded as 0 where permitted
1817 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1818 Inst.addOperand(MCOperand::createImm(
1819 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1820 }
1821
addShifterImmOperands(MCInst & Inst,unsigned N) const1822 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1823 assert(N == 1 && "Invalid number of operands!");
1824 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
1825 ShifterImm.Imm));
1826 }
1827
addRegListOperands(MCInst & Inst,unsigned N) const1828 void addRegListOperands(MCInst &Inst, unsigned N) const {
1829 assert(N == 1 && "Invalid number of operands!");
1830 const SmallVectorImpl<unsigned> &RegList = getRegList();
1831 for (SmallVectorImpl<unsigned>::const_iterator
1832 I = RegList.begin(), E = RegList.end(); I != E; ++I)
1833 Inst.addOperand(MCOperand::createReg(*I));
1834 }
1835
addDPRRegListOperands(MCInst & Inst,unsigned N) const1836 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1837 addRegListOperands(Inst, N);
1838 }
1839
addSPRRegListOperands(MCInst & Inst,unsigned N) const1840 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1841 addRegListOperands(Inst, N);
1842 }
1843
addRotImmOperands(MCInst & Inst,unsigned N) const1844 void addRotImmOperands(MCInst &Inst, unsigned N) const {
1845 assert(N == 1 && "Invalid number of operands!");
1846 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1847 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
1848 }
1849
addModImmOperands(MCInst & Inst,unsigned N) const1850 void addModImmOperands(MCInst &Inst, unsigned N) const {
1851 assert(N == 1 && "Invalid number of operands!");
1852
1853 // Support for fixups (MCFixup)
1854 if (isImm())
1855 return addImmOperands(Inst, N);
1856
1857 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
1858 }
1859
addModImmNotOperands(MCInst & Inst,unsigned N) const1860 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
1861 assert(N == 1 && "Invalid number of operands!");
1862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1863 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
1864 Inst.addOperand(MCOperand::createImm(Enc));
1865 }
1866
addModImmNegOperands(MCInst & Inst,unsigned N) const1867 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!");
1869 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1870 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
1871 Inst.addOperand(MCOperand::createImm(Enc));
1872 }
1873
addBitfieldOperands(MCInst & Inst,unsigned N) const1874 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 // Munge the lsb/width into a bitfield mask.
1877 unsigned lsb = Bitfield.LSB;
1878 unsigned width = Bitfield.Width;
1879 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1880 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1881 (32 - (lsb + width)));
1882 Inst.addOperand(MCOperand::createImm(Mask));
1883 }
1884
addImmOperands(MCInst & Inst,unsigned N) const1885 void addImmOperands(MCInst &Inst, unsigned N) const {
1886 assert(N == 1 && "Invalid number of operands!");
1887 addExpr(Inst, getImm());
1888 }
1889
addFBits16Operands(MCInst & Inst,unsigned N) const1890 void addFBits16Operands(MCInst &Inst, unsigned N) const {
1891 assert(N == 1 && "Invalid number of operands!");
1892 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1893 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
1894 }
1895
addFBits32Operands(MCInst & Inst,unsigned N) const1896 void addFBits32Operands(MCInst &Inst, unsigned N) const {
1897 assert(N == 1 && "Invalid number of operands!");
1898 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1899 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
1900 }
1901
addFPImmOperands(MCInst & Inst,unsigned N) const1902 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1905 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1906 Inst.addOperand(MCOperand::createImm(Val));
1907 }
1908
addImm8s4Operands(MCInst & Inst,unsigned N) const1909 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1910 assert(N == 1 && "Invalid number of operands!");
1911 // FIXME: We really want to scale the value here, but the LDRD/STRD
1912 // instruction don't encode operands that way yet.
1913 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1914 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1915 }
1916
addImm0_1020s4Operands(MCInst & Inst,unsigned N) const1917 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 // The immediate is scaled by four in the encoding and is stored
1920 // in the MCInst as such. Lop off the low two bits here.
1921 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1922 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
1923 }
1924
addImm0_508s4NegOperands(MCInst & Inst,unsigned N) const1925 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1926 assert(N == 1 && "Invalid number of operands!");
1927 // The immediate is scaled by four in the encoding and is stored
1928 // in the MCInst as such. Lop off the low two bits here.
1929 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1930 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
1931 }
1932
addImm0_508s4Operands(MCInst & Inst,unsigned N) const1933 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1934 assert(N == 1 && "Invalid number of operands!");
1935 // The immediate is scaled by four in the encoding and is stored
1936 // in the MCInst as such. Lop off the low two bits here.
1937 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1938 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
1939 }
1940
addImm1_16Operands(MCInst & Inst,unsigned N) const1941 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1942 assert(N == 1 && "Invalid number of operands!");
1943 // The constant encodes as the immediate-1, and we store in the instruction
1944 // the bits as encoded, so subtract off one here.
1945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1946 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
1947 }
1948
addImm1_32Operands(MCInst & Inst,unsigned N) const1949 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1950 assert(N == 1 && "Invalid number of operands!");
1951 // The constant encodes as the immediate-1, and we store in the instruction
1952 // the bits as encoded, so subtract off one here.
1953 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1954 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
1955 }
1956
addImmThumbSROperands(MCInst & Inst,unsigned N) const1957 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 // The constant encodes as the immediate, except for 32, which encodes as
1960 // zero.
1961 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1962 unsigned Imm = CE->getValue();
1963 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
1964 }
1965
addPKHASRImmOperands(MCInst & Inst,unsigned N) const1966 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1967 assert(N == 1 && "Invalid number of operands!");
1968 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1969 // the instruction as well.
1970 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1971 int Val = CE->getValue();
1972 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
1973 }
1974
addT2SOImmNotOperands(MCInst & Inst,unsigned N) const1975 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 // The operand is actually a t2_so_imm, but we have its bitwise
1978 // negation in the assembly source, so twiddle it here.
1979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1980 Inst.addOperand(MCOperand::createImm(~CE->getValue()));
1981 }
1982
addT2SOImmNegOperands(MCInst & Inst,unsigned N) const1983 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1984 assert(N == 1 && "Invalid number of operands!");
1985 // The operand is actually a t2_so_imm, but we have its
1986 // negation in the assembly source, so twiddle it here.
1987 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1988 Inst.addOperand(MCOperand::createImm(-CE->getValue()));
1989 }
1990
addImm0_4095NegOperands(MCInst & Inst,unsigned N) const1991 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 // The operand is actually an imm0_4095, but we have its
1994 // negation in the assembly source, so twiddle it here.
1995 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1996 Inst.addOperand(MCOperand::createImm(-CE->getValue()));
1997 }
1998
addUnsignedOffset_b8s2Operands(MCInst & Inst,unsigned N) const1999 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2000 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2001 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2002 return;
2003 }
2004
2005 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2006 assert(SR && "Unknown value type!");
2007 Inst.addOperand(MCOperand::createExpr(SR));
2008 }
2009
addThumbMemPCOperands(MCInst & Inst,unsigned N) const2010 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2011 assert(N == 1 && "Invalid number of operands!");
2012 if (isImm()) {
2013 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2014 if (CE) {
2015 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2016 return;
2017 }
2018
2019 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2020
2021 assert(SR && "Unknown value type!");
2022 Inst.addOperand(MCOperand::createExpr(SR));
2023 return;
2024 }
2025
2026 assert(isMem() && "Unknown value type!");
2027 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2028 Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2029 }
2030
addMemBarrierOptOperands(MCInst & Inst,unsigned N) const2031 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2033 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2034 }
2035
addInstSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2036 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2039 }
2040
addMemNoOffsetOperands(MCInst & Inst,unsigned N) const2041 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2042 assert(N == 1 && "Invalid number of operands!");
2043 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2044 }
2045
addMemPCRelImm12Operands(MCInst & Inst,unsigned N) const2046 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2047 assert(N == 1 && "Invalid number of operands!");
2048 int32_t Imm = Memory.OffsetImm->getValue();
2049 Inst.addOperand(MCOperand::createImm(Imm));
2050 }
2051
addAdrLabelOperands(MCInst & Inst,unsigned N) const2052 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 assert(isImm() && "Not an immediate!");
2055
2056 // If we have an immediate that's not a constant, treat it as a label
2057 // reference needing a fixup.
2058 if (!isa<MCConstantExpr>(getImm())) {
2059 Inst.addOperand(MCOperand::createExpr(getImm()));
2060 return;
2061 }
2062
2063 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2064 int Val = CE->getValue();
2065 Inst.addOperand(MCOperand::createImm(Val));
2066 }
2067
addAlignedMemoryOperands(MCInst & Inst,unsigned N) const2068 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 2 && "Invalid number of operands!");
2070 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2071 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2072 }
2073
addDupAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2074 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2075 addAlignedMemoryOperands(Inst, N);
2076 }
2077
addAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2078 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2079 addAlignedMemoryOperands(Inst, N);
2080 }
2081
addAlignedMemory16Operands(MCInst & Inst,unsigned N) const2082 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2083 addAlignedMemoryOperands(Inst, N);
2084 }
2085
addDupAlignedMemory16Operands(MCInst & Inst,unsigned N) const2086 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2087 addAlignedMemoryOperands(Inst, N);
2088 }
2089
addAlignedMemory32Operands(MCInst & Inst,unsigned N) const2090 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2091 addAlignedMemoryOperands(Inst, N);
2092 }
2093
addDupAlignedMemory32Operands(MCInst & Inst,unsigned N) const2094 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2095 addAlignedMemoryOperands(Inst, N);
2096 }
2097
addAlignedMemory64Operands(MCInst & Inst,unsigned N) const2098 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2099 addAlignedMemoryOperands(Inst, N);
2100 }
2101
addDupAlignedMemory64Operands(MCInst & Inst,unsigned N) const2102 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2103 addAlignedMemoryOperands(Inst, N);
2104 }
2105
addAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2106 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2107 addAlignedMemoryOperands(Inst, N);
2108 }
2109
addDupAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2110 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2111 addAlignedMemoryOperands(Inst, N);
2112 }
2113
addAlignedMemory64or128or256Operands(MCInst & Inst,unsigned N) const2114 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2115 addAlignedMemoryOperands(Inst, N);
2116 }
2117
addAddrMode2Operands(MCInst & Inst,unsigned N) const2118 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2119 assert(N == 3 && "Invalid number of operands!");
2120 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2121 if (!Memory.OffsetRegNum) {
2122 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2123 // Special case for #-0
2124 if (Val == INT32_MIN) Val = 0;
2125 if (Val < 0) Val = -Val;
2126 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2127 } else {
2128 // For register offset, we encode the shift type and negation flag
2129 // here.
2130 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2131 Memory.ShiftImm, Memory.ShiftType);
2132 }
2133 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2134 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2135 Inst.addOperand(MCOperand::createImm(Val));
2136 }
2137
addAM2OffsetImmOperands(MCInst & Inst,unsigned N) const2138 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2139 assert(N == 2 && "Invalid number of operands!");
2140 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2141 assert(CE && "non-constant AM2OffsetImm operand!");
2142 int32_t Val = CE->getValue();
2143 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2144 // Special case for #-0
2145 if (Val == INT32_MIN) Val = 0;
2146 if (Val < 0) Val = -Val;
2147 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2148 Inst.addOperand(MCOperand::createReg(0));
2149 Inst.addOperand(MCOperand::createImm(Val));
2150 }
2151
addAddrMode3Operands(MCInst & Inst,unsigned N) const2152 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2153 assert(N == 3 && "Invalid number of operands!");
2154 // If we have an immediate that's not a constant, treat it as a label
2155 // reference needing a fixup. If it is a constant, it's something else
2156 // and we reject it.
2157 if (isImm()) {
2158 Inst.addOperand(MCOperand::createExpr(getImm()));
2159 Inst.addOperand(MCOperand::createReg(0));
2160 Inst.addOperand(MCOperand::createImm(0));
2161 return;
2162 }
2163
2164 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2165 if (!Memory.OffsetRegNum) {
2166 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2167 // Special case for #-0
2168 if (Val == INT32_MIN) Val = 0;
2169 if (Val < 0) Val = -Val;
2170 Val = ARM_AM::getAM3Opc(AddSub, Val);
2171 } else {
2172 // For register offset, we encode the shift type and negation flag
2173 // here.
2174 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2175 }
2176 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2177 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2178 Inst.addOperand(MCOperand::createImm(Val));
2179 }
2180
addAM3OffsetOperands(MCInst & Inst,unsigned N) const2181 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2182 assert(N == 2 && "Invalid number of operands!");
2183 if (Kind == k_PostIndexRegister) {
2184 int32_t Val =
2185 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2186 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2187 Inst.addOperand(MCOperand::createImm(Val));
2188 return;
2189 }
2190
2191 // Constant offset.
2192 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2193 int32_t Val = CE->getValue();
2194 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2195 // Special case for #-0
2196 if (Val == INT32_MIN) Val = 0;
2197 if (Val < 0) Val = -Val;
2198 Val = ARM_AM::getAM3Opc(AddSub, Val);
2199 Inst.addOperand(MCOperand::createReg(0));
2200 Inst.addOperand(MCOperand::createImm(Val));
2201 }
2202
addAddrMode5Operands(MCInst & Inst,unsigned N) const2203 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2204 assert(N == 2 && "Invalid number of operands!");
2205 // If we have an immediate that's not a constant, treat it as a label
2206 // reference needing a fixup. If it is a constant, it's something else
2207 // and we reject it.
2208 if (isImm()) {
2209 Inst.addOperand(MCOperand::createExpr(getImm()));
2210 Inst.addOperand(MCOperand::createImm(0));
2211 return;
2212 }
2213
2214 // The lower two bits are always zero and as such are not encoded.
2215 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2216 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2217 // Special case for #-0
2218 if (Val == INT32_MIN) Val = 0;
2219 if (Val < 0) Val = -Val;
2220 Val = ARM_AM::getAM5Opc(AddSub, Val);
2221 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2222 Inst.addOperand(MCOperand::createImm(Val));
2223 }
2224
addAddrMode5FP16Operands(MCInst & Inst,unsigned N) const2225 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2226 assert(N == 2 && "Invalid number of operands!");
2227 // If we have an immediate that's not a constant, treat it as a label
2228 // reference needing a fixup. If it is a constant, it's something else
2229 // and we reject it.
2230 if (isImm()) {
2231 Inst.addOperand(MCOperand::createExpr(getImm()));
2232 Inst.addOperand(MCOperand::createImm(0));
2233 return;
2234 }
2235
2236 // The lower bit is always zero and as such is not encoded.
2237 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2238 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2239 // Special case for #-0
2240 if (Val == INT32_MIN) Val = 0;
2241 if (Val < 0) Val = -Val;
2242 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2243 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2244 Inst.addOperand(MCOperand::createImm(Val));
2245 }
2246
addMemImm8s4OffsetOperands(MCInst & Inst,unsigned N) const2247 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2248 assert(N == 2 && "Invalid number of operands!");
2249 // If we have an immediate that's not a constant, treat it as a label
2250 // reference needing a fixup. If it is a constant, it's something else
2251 // and we reject it.
2252 if (isImm()) {
2253 Inst.addOperand(MCOperand::createExpr(getImm()));
2254 Inst.addOperand(MCOperand::createImm(0));
2255 return;
2256 }
2257
2258 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2259 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2260 Inst.addOperand(MCOperand::createImm(Val));
2261 }
2262
addMemImm0_1020s4OffsetOperands(MCInst & Inst,unsigned N) const2263 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2264 assert(N == 2 && "Invalid number of operands!");
2265 // The lower two bits are always zero and as such are not encoded.
2266 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2267 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2268 Inst.addOperand(MCOperand::createImm(Val));
2269 }
2270
addMemImm8OffsetOperands(MCInst & Inst,unsigned N) const2271 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2272 assert(N == 2 && "Invalid number of operands!");
2273 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2274 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2275 Inst.addOperand(MCOperand::createImm(Val));
2276 }
2277
addMemPosImm8OffsetOperands(MCInst & Inst,unsigned N) const2278 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2279 addMemImm8OffsetOperands(Inst, N);
2280 }
2281
addMemNegImm8OffsetOperands(MCInst & Inst,unsigned N) const2282 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2283 addMemImm8OffsetOperands(Inst, N);
2284 }
2285
addMemUImm12OffsetOperands(MCInst & Inst,unsigned N) const2286 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2287 assert(N == 2 && "Invalid number of operands!");
2288 // If this is an immediate, it's a label reference.
2289 if (isImm()) {
2290 addExpr(Inst, getImm());
2291 Inst.addOperand(MCOperand::createImm(0));
2292 return;
2293 }
2294
2295 // Otherwise, it's a normal memory reg+offset.
2296 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2297 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2298 Inst.addOperand(MCOperand::createImm(Val));
2299 }
2300
addMemImm12OffsetOperands(MCInst & Inst,unsigned N) const2301 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2302 assert(N == 2 && "Invalid number of operands!");
2303 // If this is an immediate, it's a label reference.
2304 if (isImm()) {
2305 addExpr(Inst, getImm());
2306 Inst.addOperand(MCOperand::createImm(0));
2307 return;
2308 }
2309
2310 // Otherwise, it's a normal memory reg+offset.
2311 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2312 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2313 Inst.addOperand(MCOperand::createImm(Val));
2314 }
2315
addConstPoolAsmImmOperands(MCInst & Inst,unsigned N) const2316 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2317 assert(N == 1 && "Invalid number of operands!");
2318 // This is container for the immediate that we will create the constant
2319 // pool from
2320 addExpr(Inst, getConstantPoolImm());
2321 return;
2322 }
2323
addMemTBBOperands(MCInst & Inst,unsigned N) const2324 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2325 assert(N == 2 && "Invalid number of operands!");
2326 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2327 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2328 }
2329
addMemTBHOperands(MCInst & Inst,unsigned N) const2330 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2331 assert(N == 2 && "Invalid number of operands!");
2332 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2333 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2334 }
2335
addMemRegOffsetOperands(MCInst & Inst,unsigned N) const2336 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2337 assert(N == 3 && "Invalid number of operands!");
2338 unsigned Val =
2339 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2340 Memory.ShiftImm, Memory.ShiftType);
2341 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2342 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2343 Inst.addOperand(MCOperand::createImm(Val));
2344 }
2345
addT2MemRegOffsetOperands(MCInst & Inst,unsigned N) const2346 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2347 assert(N == 3 && "Invalid number of operands!");
2348 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2349 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2350 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2351 }
2352
addMemThumbRROperands(MCInst & Inst,unsigned N) const2353 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2354 assert(N == 2 && "Invalid number of operands!");
2355 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2356 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2357 }
2358
addMemThumbRIs4Operands(MCInst & Inst,unsigned N) const2359 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2360 assert(N == 2 && "Invalid number of operands!");
2361 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2362 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2363 Inst.addOperand(MCOperand::createImm(Val));
2364 }
2365
addMemThumbRIs2Operands(MCInst & Inst,unsigned N) const2366 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2367 assert(N == 2 && "Invalid number of operands!");
2368 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2369 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2370 Inst.addOperand(MCOperand::createImm(Val));
2371 }
2372
addMemThumbRIs1Operands(MCInst & Inst,unsigned N) const2373 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2374 assert(N == 2 && "Invalid number of operands!");
2375 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2376 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2377 Inst.addOperand(MCOperand::createImm(Val));
2378 }
2379
addMemThumbSPIOperands(MCInst & Inst,unsigned N) const2380 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2381 assert(N == 2 && "Invalid number of operands!");
2382 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2383 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2384 Inst.addOperand(MCOperand::createImm(Val));
2385 }
2386
addPostIdxImm8Operands(MCInst & Inst,unsigned N) const2387 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2388 assert(N == 1 && "Invalid number of operands!");
2389 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2390 assert(CE && "non-constant post-idx-imm8 operand!");
2391 int Imm = CE->getValue();
2392 bool isAdd = Imm >= 0;
2393 if (Imm == INT32_MIN) Imm = 0;
2394 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2395 Inst.addOperand(MCOperand::createImm(Imm));
2396 }
2397
addPostIdxImm8s4Operands(MCInst & Inst,unsigned N) const2398 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2399 assert(N == 1 && "Invalid number of operands!");
2400 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2401 assert(CE && "non-constant post-idx-imm8s4 operand!");
2402 int Imm = CE->getValue();
2403 bool isAdd = Imm >= 0;
2404 if (Imm == INT32_MIN) Imm = 0;
2405 // Immediate is scaled by 4.
2406 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2407 Inst.addOperand(MCOperand::createImm(Imm));
2408 }
2409
addPostIdxRegOperands(MCInst & Inst,unsigned N) const2410 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2411 assert(N == 2 && "Invalid number of operands!");
2412 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2413 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2414 }
2415
addPostIdxRegShiftedOperands(MCInst & Inst,unsigned N) const2416 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2417 assert(N == 2 && "Invalid number of operands!");
2418 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2419 // The sign, shift type, and shift amount are encoded in a single operand
2420 // using the AM2 encoding helpers.
2421 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2422 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2423 PostIdxReg.ShiftTy);
2424 Inst.addOperand(MCOperand::createImm(Imm));
2425 }
2426
addMSRMaskOperands(MCInst & Inst,unsigned N) const2427 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2428 assert(N == 1 && "Invalid number of operands!");
2429 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2430 }
2431
addBankedRegOperands(MCInst & Inst,unsigned N) const2432 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2433 assert(N == 1 && "Invalid number of operands!");
2434 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2435 }
2436
addProcIFlagsOperands(MCInst & Inst,unsigned N) const2437 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2438 assert(N == 1 && "Invalid number of operands!");
2439 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2440 }
2441
addVecListOperands(MCInst & Inst,unsigned N) const2442 void addVecListOperands(MCInst &Inst, unsigned N) const {
2443 assert(N == 1 && "Invalid number of operands!");
2444 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2445 }
2446
addVecListIndexedOperands(MCInst & Inst,unsigned N) const2447 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2448 assert(N == 2 && "Invalid number of operands!");
2449 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2450 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2451 }
2452
addVectorIndex8Operands(MCInst & Inst,unsigned N) const2453 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2454 assert(N == 1 && "Invalid number of operands!");
2455 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2456 }
2457
addVectorIndex16Operands(MCInst & Inst,unsigned N) const2458 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2459 assert(N == 1 && "Invalid number of operands!");
2460 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2461 }
2462
addVectorIndex32Operands(MCInst & Inst,unsigned N) const2463 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2464 assert(N == 1 && "Invalid number of operands!");
2465 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2466 }
2467
addNEONi8splatOperands(MCInst & Inst,unsigned N) const2468 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2469 assert(N == 1 && "Invalid number of operands!");
2470 // The immediate encodes the type of constant as well as the value.
2471 // Mask in that this is an i8 splat.
2472 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2473 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2474 }
2475
addNEONi16splatOperands(MCInst & Inst,unsigned N) const2476 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2477 assert(N == 1 && "Invalid number of operands!");
2478 // The immediate encodes the type of constant as well as the value.
2479 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2480 unsigned Value = CE->getValue();
2481 Value = ARM_AM::encodeNEONi16splat(Value);
2482 Inst.addOperand(MCOperand::createImm(Value));
2483 }
2484
addNEONi16splatNotOperands(MCInst & Inst,unsigned N) const2485 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2486 assert(N == 1 && "Invalid number of operands!");
2487 // The immediate encodes the type of constant as well as the value.
2488 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2489 unsigned Value = CE->getValue();
2490 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2491 Inst.addOperand(MCOperand::createImm(Value));
2492 }
2493
addNEONi32splatOperands(MCInst & Inst,unsigned N) const2494 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2495 assert(N == 1 && "Invalid number of operands!");
2496 // The immediate encodes the type of constant as well as the value.
2497 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2498 unsigned Value = CE->getValue();
2499 Value = ARM_AM::encodeNEONi32splat(Value);
2500 Inst.addOperand(MCOperand::createImm(Value));
2501 }
2502
addNEONi32splatNotOperands(MCInst & Inst,unsigned N) const2503 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2504 assert(N == 1 && "Invalid number of operands!");
2505 // The immediate encodes the type of constant as well as the value.
2506 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2507 unsigned Value = CE->getValue();
2508 Value = ARM_AM::encodeNEONi32splat(~Value);
2509 Inst.addOperand(MCOperand::createImm(Value));
2510 }
2511
addNEONinvByteReplicateOperands(MCInst & Inst,unsigned N) const2512 void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2513 assert(N == 1 && "Invalid number of operands!");
2514 // The immediate encodes the type of constant as well as the value.
2515 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2516 unsigned Value = CE->getValue();
2517 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2518 Inst.getOpcode() == ARM::VMOVv16i8) &&
2519 "All vmvn instructions that wants to replicate non-zero byte "
2520 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2521 unsigned B = ((~Value) & 0xff);
2522 B |= 0xe00; // cmode = 0b1110
2523 Inst.addOperand(MCOperand::createImm(B));
2524 }
addNEONi32vmovOperands(MCInst & Inst,unsigned N) const2525 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2526 assert(N == 1 && "Invalid number of operands!");
2527 // The immediate encodes the type of constant as well as the value.
2528 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2529 unsigned Value = CE->getValue();
2530 if (Value >= 256 && Value <= 0xffff)
2531 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2532 else if (Value > 0xffff && Value <= 0xffffff)
2533 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2534 else if (Value > 0xffffff)
2535 Value = (Value >> 24) | 0x600;
2536 Inst.addOperand(MCOperand::createImm(Value));
2537 }
2538
addNEONvmovByteReplicateOperands(MCInst & Inst,unsigned N) const2539 void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2540 assert(N == 1 && "Invalid number of operands!");
2541 // The immediate encodes the type of constant as well as the value.
2542 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2543 unsigned Value = CE->getValue();
2544 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2545 Inst.getOpcode() == ARM::VMOVv16i8) &&
2546 "All instructions that wants to replicate non-zero byte "
2547 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2548 unsigned B = Value & 0xff;
2549 B |= 0xe00; // cmode = 0b1110
2550 Inst.addOperand(MCOperand::createImm(B));
2551 }
addNEONi32vmovNegOperands(MCInst & Inst,unsigned N) const2552 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2553 assert(N == 1 && "Invalid number of operands!");
2554 // The immediate encodes the type of constant as well as the value.
2555 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2556 unsigned Value = ~CE->getValue();
2557 if (Value >= 256 && Value <= 0xffff)
2558 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2559 else if (Value > 0xffff && Value <= 0xffffff)
2560 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2561 else if (Value > 0xffffff)
2562 Value = (Value >> 24) | 0x600;
2563 Inst.addOperand(MCOperand::createImm(Value));
2564 }
2565
addNEONi64splatOperands(MCInst & Inst,unsigned N) const2566 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2567 assert(N == 1 && "Invalid number of operands!");
2568 // The immediate encodes the type of constant as well as the value.
2569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2570 uint64_t Value = CE->getValue();
2571 unsigned Imm = 0;
2572 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2573 Imm |= (Value & 1) << i;
2574 }
2575 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2576 }
2577
2578 void print(raw_ostream &OS) const override;
2579
CreateITMask(unsigned Mask,SMLoc S)2580 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2581 auto Op = make_unique<ARMOperand>(k_ITCondMask);
2582 Op->ITMask.Mask = Mask;
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
CreateCondCode(ARMCC::CondCodes CC,SMLoc S)2588 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2589 SMLoc S) {
2590 auto Op = make_unique<ARMOperand>(k_CondCode);
2591 Op->CC.Val = CC;
2592 Op->StartLoc = S;
2593 Op->EndLoc = S;
2594 return Op;
2595 }
2596
CreateCoprocNum(unsigned CopVal,SMLoc S)2597 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2598 auto Op = make_unique<ARMOperand>(k_CoprocNum);
2599 Op->Cop.Val = CopVal;
2600 Op->StartLoc = S;
2601 Op->EndLoc = S;
2602 return Op;
2603 }
2604
CreateCoprocReg(unsigned CopVal,SMLoc S)2605 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2606 auto Op = make_unique<ARMOperand>(k_CoprocReg);
2607 Op->Cop.Val = CopVal;
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
CreateCoprocOption(unsigned Val,SMLoc S,SMLoc E)2613 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2614 SMLoc E) {
2615 auto Op = make_unique<ARMOperand>(k_CoprocOption);
2616 Op->Cop.Val = Val;
2617 Op->StartLoc = S;
2618 Op->EndLoc = E;
2619 return Op;
2620 }
2621
CreateCCOut(unsigned RegNum,SMLoc S)2622 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2623 auto Op = make_unique<ARMOperand>(k_CCOut);
2624 Op->Reg.RegNum = RegNum;
2625 Op->StartLoc = S;
2626 Op->EndLoc = S;
2627 return Op;
2628 }
2629
CreateToken(StringRef Str,SMLoc S)2630 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2631 auto Op = make_unique<ARMOperand>(k_Token);
2632 Op->Tok.Data = Str.data();
2633 Op->Tok.Length = Str.size();
2634 Op->StartLoc = S;
2635 Op->EndLoc = S;
2636 return Op;
2637 }
2638
CreateReg(unsigned RegNum,SMLoc S,SMLoc E)2639 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2640 SMLoc E) {
2641 auto Op = make_unique<ARMOperand>(k_Register);
2642 Op->Reg.RegNum = RegNum;
2643 Op->StartLoc = S;
2644 Op->EndLoc = E;
2645 return Op;
2646 }
2647
2648 static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftReg,unsigned ShiftImm,SMLoc S,SMLoc E)2649 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2650 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2651 SMLoc E) {
2652 auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2653 Op->RegShiftedReg.ShiftTy = ShTy;
2654 Op->RegShiftedReg.SrcReg = SrcReg;
2655 Op->RegShiftedReg.ShiftReg = ShiftReg;
2656 Op->RegShiftedReg.ShiftImm = ShiftImm;
2657 Op->StartLoc = S;
2658 Op->EndLoc = E;
2659 return Op;
2660 }
2661
2662 static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftImm,SMLoc S,SMLoc E)2663 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2664 unsigned ShiftImm, SMLoc S, SMLoc E) {
2665 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2666 Op->RegShiftedImm.ShiftTy = ShTy;
2667 Op->RegShiftedImm.SrcReg = SrcReg;
2668 Op->RegShiftedImm.ShiftImm = ShiftImm;
2669 Op->StartLoc = S;
2670 Op->EndLoc = E;
2671 return Op;
2672 }
2673
CreateShifterImm(bool isASR,unsigned Imm,SMLoc S,SMLoc E)2674 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2675 SMLoc S, SMLoc E) {
2676 auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2677 Op->ShifterImm.isASR = isASR;
2678 Op->ShifterImm.Imm = Imm;
2679 Op->StartLoc = S;
2680 Op->EndLoc = E;
2681 return Op;
2682 }
2683
CreateRotImm(unsigned Imm,SMLoc S,SMLoc E)2684 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2685 SMLoc E) {
2686 auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2687 Op->RotImm.Imm = Imm;
2688 Op->StartLoc = S;
2689 Op->EndLoc = E;
2690 return Op;
2691 }
2692
CreateModImm(unsigned Bits,unsigned Rot,SMLoc S,SMLoc E)2693 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2694 SMLoc S, SMLoc E) {
2695 auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2696 Op->ModImm.Bits = Bits;
2697 Op->ModImm.Rot = Rot;
2698 Op->StartLoc = S;
2699 Op->EndLoc = E;
2700 return Op;
2701 }
2702
2703 static std::unique_ptr<ARMOperand>
CreateConstantPoolImm(const MCExpr * Val,SMLoc S,SMLoc E)2704 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2705 auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2706 Op->Imm.Val = Val;
2707 Op->StartLoc = S;
2708 Op->EndLoc = E;
2709 return Op;
2710 }
2711
2712 static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB,unsigned Width,SMLoc S,SMLoc E)2713 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2714 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2715 Op->Bitfield.LSB = LSB;
2716 Op->Bitfield.Width = Width;
2717 Op->StartLoc = S;
2718 Op->EndLoc = E;
2719 return Op;
2720 }
2721
2722 static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,SMLoc StartLoc,SMLoc EndLoc)2723 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2724 SMLoc StartLoc, SMLoc EndLoc) {
2725 assert (Regs.size() > 0 && "RegList contains no registers?");
2726 KindTy Kind = k_RegisterList;
2727
2728 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2729 Kind = k_DPRRegisterList;
2730 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2731 contains(Regs.front().second))
2732 Kind = k_SPRRegisterList;
2733
2734 // Sort based on the register encoding values.
2735 array_pod_sort(Regs.begin(), Regs.end());
2736
2737 auto Op = make_unique<ARMOperand>(Kind);
2738 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2739 I = Regs.begin(), E = Regs.end(); I != E; ++I)
2740 Op->Registers.push_back(I->second);
2741 Op->StartLoc = StartLoc;
2742 Op->EndLoc = EndLoc;
2743 return Op;
2744 }
2745
CreateVectorList(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2746 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2747 unsigned Count,
2748 bool isDoubleSpaced,
2749 SMLoc S, SMLoc E) {
2750 auto Op = make_unique<ARMOperand>(k_VectorList);
2751 Op->VectorList.RegNum = RegNum;
2752 Op->VectorList.Count = Count;
2753 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2754 Op->StartLoc = S;
2755 Op->EndLoc = E;
2756 return Op;
2757 }
2758
2759 static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2760 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2761 SMLoc S, SMLoc E) {
2762 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2763 Op->VectorList.RegNum = RegNum;
2764 Op->VectorList.Count = Count;
2765 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2766 Op->StartLoc = S;
2767 Op->EndLoc = E;
2768 return Op;
2769 }
2770
2771 static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum,unsigned Count,unsigned Index,bool isDoubleSpaced,SMLoc S,SMLoc E)2772 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2773 bool isDoubleSpaced, SMLoc S, SMLoc E) {
2774 auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2775 Op->VectorList.RegNum = RegNum;
2776 Op->VectorList.Count = Count;
2777 Op->VectorList.LaneIndex = Index;
2778 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2779 Op->StartLoc = S;
2780 Op->EndLoc = E;
2781 return Op;
2782 }
2783
2784 static std::unique_ptr<ARMOperand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)2785 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2786 auto Op = make_unique<ARMOperand>(k_VectorIndex);
2787 Op->VectorIndex.Val = Idx;
2788 Op->StartLoc = S;
2789 Op->EndLoc = E;
2790 return Op;
2791 }
2792
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E)2793 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2794 SMLoc E) {
2795 auto Op = make_unique<ARMOperand>(k_Immediate);
2796 Op->Imm.Val = Val;
2797 Op->StartLoc = S;
2798 Op->EndLoc = E;
2799 return Op;
2800 }
2801
2802 static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum,const MCConstantExpr * OffsetImm,unsigned OffsetRegNum,ARM_AM::ShiftOpc ShiftType,unsigned ShiftImm,unsigned Alignment,bool isNegative,SMLoc S,SMLoc E,SMLoc AlignmentLoc=SMLoc ())2803 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2804 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2805 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2806 SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2807 auto Op = make_unique<ARMOperand>(k_Memory);
2808 Op->Memory.BaseRegNum = BaseRegNum;
2809 Op->Memory.OffsetImm = OffsetImm;
2810 Op->Memory.OffsetRegNum = OffsetRegNum;
2811 Op->Memory.ShiftType = ShiftType;
2812 Op->Memory.ShiftImm = ShiftImm;
2813 Op->Memory.Alignment = Alignment;
2814 Op->Memory.isNegative = isNegative;
2815 Op->StartLoc = S;
2816 Op->EndLoc = E;
2817 Op->AlignmentLoc = AlignmentLoc;
2818 return Op;
2819 }
2820
2821 static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum,bool isAdd,ARM_AM::ShiftOpc ShiftTy,unsigned ShiftImm,SMLoc S,SMLoc E)2822 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2823 unsigned ShiftImm, SMLoc S, SMLoc E) {
2824 auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2825 Op->PostIdxReg.RegNum = RegNum;
2826 Op->PostIdxReg.isAdd = isAdd;
2827 Op->PostIdxReg.ShiftTy = ShiftTy;
2828 Op->PostIdxReg.ShiftImm = ShiftImm;
2829 Op->StartLoc = S;
2830 Op->EndLoc = E;
2831 return Op;
2832 }
2833
CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,SMLoc S)2834 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
2835 SMLoc S) {
2836 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
2837 Op->MBOpt.Val = Opt;
2838 Op->StartLoc = S;
2839 Op->EndLoc = S;
2840 return Op;
2841 }
2842
2843 static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,SMLoc S)2844 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
2845 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
2846 Op->ISBOpt.Val = Opt;
2847 Op->StartLoc = S;
2848 Op->EndLoc = S;
2849 return Op;
2850 }
2851
CreateProcIFlags(ARM_PROC::IFlags IFlags,SMLoc S)2852 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
2853 SMLoc S) {
2854 auto Op = make_unique<ARMOperand>(k_ProcIFlags);
2855 Op->IFlags.Val = IFlags;
2856 Op->StartLoc = S;
2857 Op->EndLoc = S;
2858 return Op;
2859 }
2860
CreateMSRMask(unsigned MMask,SMLoc S)2861 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
2862 auto Op = make_unique<ARMOperand>(k_MSRMask);
2863 Op->MMask.Val = MMask;
2864 Op->StartLoc = S;
2865 Op->EndLoc = S;
2866 return Op;
2867 }
2868
CreateBankedReg(unsigned Reg,SMLoc S)2869 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
2870 auto Op = make_unique<ARMOperand>(k_BankedReg);
2871 Op->BankedReg.Val = Reg;
2872 Op->StartLoc = S;
2873 Op->EndLoc = S;
2874 return Op;
2875 }
2876 };
2877
2878 } // end anonymous namespace.
2879
print(raw_ostream & OS) const2880 void ARMOperand::print(raw_ostream &OS) const {
2881 switch (Kind) {
2882 case k_CondCode:
2883 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2884 break;
2885 case k_CCOut:
2886 OS << "<ccout " << getReg() << ">";
2887 break;
2888 case k_ITCondMask: {
2889 static const char *const MaskStr[] = {
2890 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2891 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2892 };
2893 assert((ITMask.Mask & 0xf) == ITMask.Mask);
2894 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2895 break;
2896 }
2897 case k_CoprocNum:
2898 OS << "<coprocessor number: " << getCoproc() << ">";
2899 break;
2900 case k_CoprocReg:
2901 OS << "<coprocessor register: " << getCoproc() << ">";
2902 break;
2903 case k_CoprocOption:
2904 OS << "<coprocessor option: " << CoprocOption.Val << ">";
2905 break;
2906 case k_MSRMask:
2907 OS << "<mask: " << getMSRMask() << ">";
2908 break;
2909 case k_BankedReg:
2910 OS << "<banked reg: " << getBankedReg() << ">";
2911 break;
2912 case k_Immediate:
2913 OS << *getImm();
2914 break;
2915 case k_MemBarrierOpt:
2916 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2917 break;
2918 case k_InstSyncBarrierOpt:
2919 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2920 break;
2921 case k_Memory:
2922 OS << "<memory "
2923 << " base:" << Memory.BaseRegNum;
2924 OS << ">";
2925 break;
2926 case k_PostIndexRegister:
2927 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2928 << PostIdxReg.RegNum;
2929 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2930 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2931 << PostIdxReg.ShiftImm;
2932 OS << ">";
2933 break;
2934 case k_ProcIFlags: {
2935 OS << "<ARM_PROC::";
2936 unsigned IFlags = getProcIFlags();
2937 for (int i=2; i >= 0; --i)
2938 if (IFlags & (1 << i))
2939 OS << ARM_PROC::IFlagsToString(1 << i);
2940 OS << ">";
2941 break;
2942 }
2943 case k_Register:
2944 OS << "<register " << getReg() << ">";
2945 break;
2946 case k_ShifterImmediate:
2947 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2948 << " #" << ShifterImm.Imm << ">";
2949 break;
2950 case k_ShiftedRegister:
2951 OS << "<so_reg_reg "
2952 << RegShiftedReg.SrcReg << " "
2953 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2954 << " " << RegShiftedReg.ShiftReg << ">";
2955 break;
2956 case k_ShiftedImmediate:
2957 OS << "<so_reg_imm "
2958 << RegShiftedImm.SrcReg << " "
2959 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2960 << " #" << RegShiftedImm.ShiftImm << ">";
2961 break;
2962 case k_RotateImmediate:
2963 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2964 break;
2965 case k_ModifiedImmediate:
2966 OS << "<mod_imm #" << ModImm.Bits << ", #"
2967 << ModImm.Rot << ")>";
2968 break;
2969 case k_ConstantPoolImmediate:
2970 OS << "<constant_pool_imm #" << *getConstantPoolImm();
2971 break;
2972 case k_BitfieldDescriptor:
2973 OS << "<bitfield " << "lsb: " << Bitfield.LSB
2974 << ", width: " << Bitfield.Width << ">";
2975 break;
2976 case k_RegisterList:
2977 case k_DPRRegisterList:
2978 case k_SPRRegisterList: {
2979 OS << "<register_list ";
2980
2981 const SmallVectorImpl<unsigned> &RegList = getRegList();
2982 for (SmallVectorImpl<unsigned>::const_iterator
2983 I = RegList.begin(), E = RegList.end(); I != E; ) {
2984 OS << *I;
2985 if (++I < E) OS << ", ";
2986 }
2987
2988 OS << ">";
2989 break;
2990 }
2991 case k_VectorList:
2992 OS << "<vector_list " << VectorList.Count << " * "
2993 << VectorList.RegNum << ">";
2994 break;
2995 case k_VectorListAllLanes:
2996 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2997 << VectorList.RegNum << ">";
2998 break;
2999 case k_VectorListIndexed:
3000 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3001 << VectorList.Count << " * " << VectorList.RegNum << ">";
3002 break;
3003 case k_Token:
3004 OS << "'" << getToken() << "'";
3005 break;
3006 case k_VectorIndex:
3007 OS << "<vectorindex " << getVectorIndex() << ">";
3008 break;
3009 }
3010 }
3011
3012 /// @name Auto-generated Match Functions
3013 /// {
3014
3015 static unsigned MatchRegisterName(StringRef Name);
3016
3017 /// }
3018
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)3019 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3020 SMLoc &StartLoc, SMLoc &EndLoc) {
3021 const AsmToken &Tok = getParser().getTok();
3022 StartLoc = Tok.getLoc();
3023 EndLoc = Tok.getEndLoc();
3024 RegNo = tryParseRegister();
3025
3026 return (RegNo == (unsigned)-1);
3027 }
3028
3029 /// Try to parse a register name. The token must be an Identifier when called,
3030 /// and if it is a register name the token is eaten and the register number is
3031 /// returned. Otherwise return -1.
3032 ///
tryParseRegister()3033 int ARMAsmParser::tryParseRegister() {
3034 MCAsmParser &Parser = getParser();
3035 const AsmToken &Tok = Parser.getTok();
3036 if (Tok.isNot(AsmToken::Identifier)) return -1;
3037
3038 std::string lowerCase = Tok.getString().lower();
3039 unsigned RegNum = MatchRegisterName(lowerCase);
3040 if (!RegNum) {
3041 RegNum = StringSwitch<unsigned>(lowerCase)
3042 .Case("r13", ARM::SP)
3043 .Case("r14", ARM::LR)
3044 .Case("r15", ARM::PC)
3045 .Case("ip", ARM::R12)
3046 // Additional register name aliases for 'gas' compatibility.
3047 .Case("a1", ARM::R0)
3048 .Case("a2", ARM::R1)
3049 .Case("a3", ARM::R2)
3050 .Case("a4", ARM::R3)
3051 .Case("v1", ARM::R4)
3052 .Case("v2", ARM::R5)
3053 .Case("v3", ARM::R6)
3054 .Case("v4", ARM::R7)
3055 .Case("v5", ARM::R8)
3056 .Case("v6", ARM::R9)
3057 .Case("v7", ARM::R10)
3058 .Case("v8", ARM::R11)
3059 .Case("sb", ARM::R9)
3060 .Case("sl", ARM::R10)
3061 .Case("fp", ARM::R11)
3062 .Default(0);
3063 }
3064 if (!RegNum) {
3065 // Check for aliases registered via .req. Canonicalize to lower case.
3066 // That's more consistent since register names are case insensitive, and
3067 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3068 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3069 // If no match, return failure.
3070 if (Entry == RegisterReqs.end())
3071 return -1;
3072 Parser.Lex(); // Eat identifier token.
3073 return Entry->getValue();
3074 }
3075
3076 // Some FPUs only have 16 D registers, so D16-D31 are invalid
3077 if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3078 return -1;
3079
3080 Parser.Lex(); // Eat identifier token.
3081
3082 return RegNum;
3083 }
3084
3085 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
3086 // If a recoverable error occurs, return 1. If an irrecoverable error
3087 // occurs, return -1. An irrecoverable error is one where tokens have been
3088 // consumed in the process of trying to parse the shifter (i.e., when it is
3089 // indeed a shifter operand, but malformed).
tryParseShiftRegister(OperandVector & Operands)3090 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3091 MCAsmParser &Parser = getParser();
3092 SMLoc S = Parser.getTok().getLoc();
3093 const AsmToken &Tok = Parser.getTok();
3094 if (Tok.isNot(AsmToken::Identifier))
3095 return -1;
3096
3097 std::string lowerCase = Tok.getString().lower();
3098 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3099 .Case("asl", ARM_AM::lsl)
3100 .Case("lsl", ARM_AM::lsl)
3101 .Case("lsr", ARM_AM::lsr)
3102 .Case("asr", ARM_AM::asr)
3103 .Case("ror", ARM_AM::ror)
3104 .Case("rrx", ARM_AM::rrx)
3105 .Default(ARM_AM::no_shift);
3106
3107 if (ShiftTy == ARM_AM::no_shift)
3108 return 1;
3109
3110 Parser.Lex(); // Eat the operator.
3111
3112 // The source register for the shift has already been added to the
3113 // operand list, so we need to pop it off and combine it into the shifted
3114 // register operand instead.
3115 std::unique_ptr<ARMOperand> PrevOp(
3116 (ARMOperand *)Operands.pop_back_val().release());
3117 if (!PrevOp->isReg())
3118 return Error(PrevOp->getStartLoc(), "shift must be of a register");
3119 int SrcReg = PrevOp->getReg();
3120
3121 SMLoc EndLoc;
3122 int64_t Imm = 0;
3123 int ShiftReg = 0;
3124 if (ShiftTy == ARM_AM::rrx) {
3125 // RRX Doesn't have an explicit shift amount. The encoder expects
3126 // the shift register to be the same as the source register. Seems odd,
3127 // but OK.
3128 ShiftReg = SrcReg;
3129 } else {
3130 // Figure out if this is shifted by a constant or a register (for non-RRX).
3131 if (Parser.getTok().is(AsmToken::Hash) ||
3132 Parser.getTok().is(AsmToken::Dollar)) {
3133 Parser.Lex(); // Eat hash.
3134 SMLoc ImmLoc = Parser.getTok().getLoc();
3135 const MCExpr *ShiftExpr = nullptr;
3136 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3137 Error(ImmLoc, "invalid immediate shift value");
3138 return -1;
3139 }
3140 // The expression must be evaluatable as an immediate.
3141 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3142 if (!CE) {
3143 Error(ImmLoc, "invalid immediate shift value");
3144 return -1;
3145 }
3146 // Range check the immediate.
3147 // lsl, ror: 0 <= imm <= 31
3148 // lsr, asr: 0 <= imm <= 32
3149 Imm = CE->getValue();
3150 if (Imm < 0 ||
3151 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3152 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3153 Error(ImmLoc, "immediate shift value out of range");
3154 return -1;
3155 }
3156 // shift by zero is a nop. Always send it through as lsl.
3157 // ('as' compatibility)
3158 if (Imm == 0)
3159 ShiftTy = ARM_AM::lsl;
3160 } else if (Parser.getTok().is(AsmToken::Identifier)) {
3161 SMLoc L = Parser.getTok().getLoc();
3162 EndLoc = Parser.getTok().getEndLoc();
3163 ShiftReg = tryParseRegister();
3164 if (ShiftReg == -1) {
3165 Error(L, "expected immediate or register in shift operand");
3166 return -1;
3167 }
3168 } else {
3169 Error(Parser.getTok().getLoc(),
3170 "expected immediate or register in shift operand");
3171 return -1;
3172 }
3173 }
3174
3175 if (ShiftReg && ShiftTy != ARM_AM::rrx)
3176 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3177 ShiftReg, Imm,
3178 S, EndLoc));
3179 else
3180 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3181 S, EndLoc));
3182
3183 return 0;
3184 }
3185
3186
3187 /// Try to parse a register name. The token must be an Identifier when called.
3188 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3189 /// if there is a "writeback". 'true' if it's not a register.
3190 ///
3191 /// TODO this is likely to change to allow different register types and or to
3192 /// parse for a specific register type.
tryParseRegisterWithWriteBack(OperandVector & Operands)3193 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3194 MCAsmParser &Parser = getParser();
3195 const AsmToken &RegTok = Parser.getTok();
3196 int RegNo = tryParseRegister();
3197 if (RegNo == -1)
3198 return true;
3199
3200 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3201 RegTok.getEndLoc()));
3202
3203 const AsmToken &ExclaimTok = Parser.getTok();
3204 if (ExclaimTok.is(AsmToken::Exclaim)) {
3205 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3206 ExclaimTok.getLoc()));
3207 Parser.Lex(); // Eat exclaim token
3208 return false;
3209 }
3210
3211 // Also check for an index operand. This is only legal for vector registers,
3212 // but that'll get caught OK in operand matching, so we don't need to
3213 // explicitly filter everything else out here.
3214 if (Parser.getTok().is(AsmToken::LBrac)) {
3215 SMLoc SIdx = Parser.getTok().getLoc();
3216 Parser.Lex(); // Eat left bracket token.
3217
3218 const MCExpr *ImmVal;
3219 if (getParser().parseExpression(ImmVal))
3220 return true;
3221 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3222 if (!MCE)
3223 return TokError("immediate value expected for vector index");
3224
3225 if (Parser.getTok().isNot(AsmToken::RBrac))
3226 return Error(Parser.getTok().getLoc(), "']' expected");
3227
3228 SMLoc E = Parser.getTok().getEndLoc();
3229 Parser.Lex(); // Eat right bracket token.
3230
3231 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3232 SIdx, E,
3233 getContext()));
3234 }
3235
3236 return false;
3237 }
3238
3239 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3240 /// instruction with a symbolic operand name.
3241 /// We accept "crN" syntax for GAS compatibility.
3242 /// <operand-name> ::= <prefix><number>
3243 /// If CoprocOp is 'c', then:
3244 /// <prefix> ::= c | cr
3245 /// If CoprocOp is 'p', then :
3246 /// <prefix> ::= p
3247 /// <number> ::= integer in range [0, 15]
MatchCoprocessorOperandName(StringRef Name,char CoprocOp)3248 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3249 // Use the same layout as the tablegen'erated register name matcher. Ugly,
3250 // but efficient.
3251 if (Name.size() < 2 || Name[0] != CoprocOp)
3252 return -1;
3253 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3254
3255 switch (Name.size()) {
3256 default: return -1;
3257 case 1:
3258 switch (Name[0]) {
3259 default: return -1;
3260 case '0': return 0;
3261 case '1': return 1;
3262 case '2': return 2;
3263 case '3': return 3;
3264 case '4': return 4;
3265 case '5': return 5;
3266 case '6': return 6;
3267 case '7': return 7;
3268 case '8': return 8;
3269 case '9': return 9;
3270 }
3271 case 2:
3272 if (Name[0] != '1')
3273 return -1;
3274 switch (Name[1]) {
3275 default: return -1;
3276 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3277 // However, old cores (v5/v6) did use them in that way.
3278 case '0': return 10;
3279 case '1': return 11;
3280 case '2': return 12;
3281 case '3': return 13;
3282 case '4': return 14;
3283 case '5': return 15;
3284 }
3285 }
3286 }
3287
3288 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3289 ARMAsmParser::OperandMatchResultTy
parseITCondCode(OperandVector & Operands)3290 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3291 MCAsmParser &Parser = getParser();
3292 SMLoc S = Parser.getTok().getLoc();
3293 const AsmToken &Tok = Parser.getTok();
3294 if (!Tok.is(AsmToken::Identifier))
3295 return MatchOperand_NoMatch;
3296 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
3297 .Case("eq", ARMCC::EQ)
3298 .Case("ne", ARMCC::NE)
3299 .Case("hs", ARMCC::HS)
3300 .Case("cs", ARMCC::HS)
3301 .Case("lo", ARMCC::LO)
3302 .Case("cc", ARMCC::LO)
3303 .Case("mi", ARMCC::MI)
3304 .Case("pl", ARMCC::PL)
3305 .Case("vs", ARMCC::VS)
3306 .Case("vc", ARMCC::VC)
3307 .Case("hi", ARMCC::HI)
3308 .Case("ls", ARMCC::LS)
3309 .Case("ge", ARMCC::GE)
3310 .Case("lt", ARMCC::LT)
3311 .Case("gt", ARMCC::GT)
3312 .Case("le", ARMCC::LE)
3313 .Case("al", ARMCC::AL)
3314 .Default(~0U);
3315 if (CC == ~0U)
3316 return MatchOperand_NoMatch;
3317 Parser.Lex(); // Eat the token.
3318
3319 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3320
3321 return MatchOperand_Success;
3322 }
3323
3324 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3325 /// token must be an Identifier when called, and if it is a coprocessor
3326 /// number, the token is eaten and the operand is added to the operand list.
3327 ARMAsmParser::OperandMatchResultTy
parseCoprocNumOperand(OperandVector & Operands)3328 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3329 MCAsmParser &Parser = getParser();
3330 SMLoc S = Parser.getTok().getLoc();
3331 const AsmToken &Tok = Parser.getTok();
3332 if (Tok.isNot(AsmToken::Identifier))
3333 return MatchOperand_NoMatch;
3334
3335 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3336 if (Num == -1)
3337 return MatchOperand_NoMatch;
3338 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3339 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3340 return MatchOperand_NoMatch;
3341
3342 Parser.Lex(); // Eat identifier token.
3343 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3344 return MatchOperand_Success;
3345 }
3346
3347 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3348 /// token must be an Identifier when called, and if it is a coprocessor
3349 /// number, the token is eaten and the operand is added to the operand list.
3350 ARMAsmParser::OperandMatchResultTy
parseCoprocRegOperand(OperandVector & Operands)3351 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3352 MCAsmParser &Parser = getParser();
3353 SMLoc S = Parser.getTok().getLoc();
3354 const AsmToken &Tok = Parser.getTok();
3355 if (Tok.isNot(AsmToken::Identifier))
3356 return MatchOperand_NoMatch;
3357
3358 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3359 if (Reg == -1)
3360 return MatchOperand_NoMatch;
3361
3362 Parser.Lex(); // Eat identifier token.
3363 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3364 return MatchOperand_Success;
3365 }
3366
3367 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3368 /// coproc_option : '{' imm0_255 '}'
3369 ARMAsmParser::OperandMatchResultTy
parseCoprocOptionOperand(OperandVector & Operands)3370 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3371 MCAsmParser &Parser = getParser();
3372 SMLoc S = Parser.getTok().getLoc();
3373
3374 // If this isn't a '{', this isn't a coprocessor immediate operand.
3375 if (Parser.getTok().isNot(AsmToken::LCurly))
3376 return MatchOperand_NoMatch;
3377 Parser.Lex(); // Eat the '{'
3378
3379 const MCExpr *Expr;
3380 SMLoc Loc = Parser.getTok().getLoc();
3381 if (getParser().parseExpression(Expr)) {
3382 Error(Loc, "illegal expression");
3383 return MatchOperand_ParseFail;
3384 }
3385 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3386 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3387 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3388 return MatchOperand_ParseFail;
3389 }
3390 int Val = CE->getValue();
3391
3392 // Check for and consume the closing '}'
3393 if (Parser.getTok().isNot(AsmToken::RCurly))
3394 return MatchOperand_ParseFail;
3395 SMLoc E = Parser.getTok().getEndLoc();
3396 Parser.Lex(); // Eat the '}'
3397
3398 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3399 return MatchOperand_Success;
3400 }
3401
3402 // For register list parsing, we need to map from raw GPR register numbering
3403 // to the enumeration values. The enumeration values aren't sorted by
3404 // register number due to our using "sp", "lr" and "pc" as canonical names.
getNextRegister(unsigned Reg)3405 static unsigned getNextRegister(unsigned Reg) {
3406 // If this is a GPR, we need to do it manually, otherwise we can rely
3407 // on the sort ordering of the enumeration since the other reg-classes
3408 // are sane.
3409 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3410 return Reg + 1;
3411 switch(Reg) {
3412 default: llvm_unreachable("Invalid GPR number!");
3413 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3414 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3415 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3416 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3417 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3418 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3419 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3420 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3421 }
3422 }
3423
3424 // Return the low-subreg of a given Q register.
getDRegFromQReg(unsigned QReg)3425 static unsigned getDRegFromQReg(unsigned QReg) {
3426 switch (QReg) {
3427 default: llvm_unreachable("expected a Q register!");
3428 case ARM::Q0: return ARM::D0;
3429 case ARM::Q1: return ARM::D2;
3430 case ARM::Q2: return ARM::D4;
3431 case ARM::Q3: return ARM::D6;
3432 case ARM::Q4: return ARM::D8;
3433 case ARM::Q5: return ARM::D10;
3434 case ARM::Q6: return ARM::D12;
3435 case ARM::Q7: return ARM::D14;
3436 case ARM::Q8: return ARM::D16;
3437 case ARM::Q9: return ARM::D18;
3438 case ARM::Q10: return ARM::D20;
3439 case ARM::Q11: return ARM::D22;
3440 case ARM::Q12: return ARM::D24;
3441 case ARM::Q13: return ARM::D26;
3442 case ARM::Q14: return ARM::D28;
3443 case ARM::Q15: return ARM::D30;
3444 }
3445 }
3446
3447 /// Parse a register list.
parseRegisterList(OperandVector & Operands)3448 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3449 MCAsmParser &Parser = getParser();
3450 assert(Parser.getTok().is(AsmToken::LCurly) &&
3451 "Token is not a Left Curly Brace");
3452 SMLoc S = Parser.getTok().getLoc();
3453 Parser.Lex(); // Eat '{' token.
3454 SMLoc RegLoc = Parser.getTok().getLoc();
3455
3456 // Check the first register in the list to see what register class
3457 // this is a list of.
3458 int Reg = tryParseRegister();
3459 if (Reg == -1)
3460 return Error(RegLoc, "register expected");
3461
3462 // The reglist instructions have at most 16 registers, so reserve
3463 // space for that many.
3464 int EReg = 0;
3465 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3466
3467 // Allow Q regs and just interpret them as the two D sub-registers.
3468 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3469 Reg = getDRegFromQReg(Reg);
3470 EReg = MRI->getEncodingValue(Reg);
3471 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3472 ++Reg;
3473 }
3474 const MCRegisterClass *RC;
3475 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3476 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3477 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3478 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3479 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3480 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3481 else
3482 return Error(RegLoc, "invalid register in register list");
3483
3484 // Store the register.
3485 EReg = MRI->getEncodingValue(Reg);
3486 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3487
3488 // This starts immediately after the first register token in the list,
3489 // so we can see either a comma or a minus (range separator) as a legal
3490 // next token.
3491 while (Parser.getTok().is(AsmToken::Comma) ||
3492 Parser.getTok().is(AsmToken::Minus)) {
3493 if (Parser.getTok().is(AsmToken::Minus)) {
3494 Parser.Lex(); // Eat the minus.
3495 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3496 int EndReg = tryParseRegister();
3497 if (EndReg == -1)
3498 return Error(AfterMinusLoc, "register expected");
3499 // Allow Q regs and just interpret them as the two D sub-registers.
3500 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3501 EndReg = getDRegFromQReg(EndReg) + 1;
3502 // If the register is the same as the start reg, there's nothing
3503 // more to do.
3504 if (Reg == EndReg)
3505 continue;
3506 // The register must be in the same register class as the first.
3507 if (!RC->contains(EndReg))
3508 return Error(AfterMinusLoc, "invalid register in register list");
3509 // Ranges must go from low to high.
3510 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3511 return Error(AfterMinusLoc, "bad range in register list");
3512
3513 // Add all the registers in the range to the register list.
3514 while (Reg != EndReg) {
3515 Reg = getNextRegister(Reg);
3516 EReg = MRI->getEncodingValue(Reg);
3517 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3518 }
3519 continue;
3520 }
3521 Parser.Lex(); // Eat the comma.
3522 RegLoc = Parser.getTok().getLoc();
3523 int OldReg = Reg;
3524 const AsmToken RegTok = Parser.getTok();
3525 Reg = tryParseRegister();
3526 if (Reg == -1)
3527 return Error(RegLoc, "register expected");
3528 // Allow Q regs and just interpret them as the two D sub-registers.
3529 bool isQReg = false;
3530 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3531 Reg = getDRegFromQReg(Reg);
3532 isQReg = true;
3533 }
3534 // The register must be in the same register class as the first.
3535 if (!RC->contains(Reg))
3536 return Error(RegLoc, "invalid register in register list");
3537 // List must be monotonically increasing.
3538 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3539 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3540 Warning(RegLoc, "register list not in ascending order");
3541 else
3542 return Error(RegLoc, "register list not in ascending order");
3543 }
3544 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3545 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3546 ") in register list");
3547 continue;
3548 }
3549 // VFP register lists must also be contiguous.
3550 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3551 Reg != OldReg + 1)
3552 return Error(RegLoc, "non-contiguous register range");
3553 EReg = MRI->getEncodingValue(Reg);
3554 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3555 if (isQReg) {
3556 EReg = MRI->getEncodingValue(++Reg);
3557 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3558 }
3559 }
3560
3561 if (Parser.getTok().isNot(AsmToken::RCurly))
3562 return Error(Parser.getTok().getLoc(), "'}' expected");
3563 SMLoc E = Parser.getTok().getEndLoc();
3564 Parser.Lex(); // Eat '}' token.
3565
3566 // Push the register list operand.
3567 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3568
3569 // The ARM system instruction variants for LDM/STM have a '^' token here.
3570 if (Parser.getTok().is(AsmToken::Caret)) {
3571 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3572 Parser.Lex(); // Eat '^' token.
3573 }
3574
3575 return false;
3576 }
3577
3578 // Helper function to parse the lane index for vector lists.
3579 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseVectorLane(VectorLaneTy & LaneKind,unsigned & Index,SMLoc & EndLoc)3580 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3581 MCAsmParser &Parser = getParser();
3582 Index = 0; // Always return a defined index value.
3583 if (Parser.getTok().is(AsmToken::LBrac)) {
3584 Parser.Lex(); // Eat the '['.
3585 if (Parser.getTok().is(AsmToken::RBrac)) {
3586 // "Dn[]" is the 'all lanes' syntax.
3587 LaneKind = AllLanes;
3588 EndLoc = Parser.getTok().getEndLoc();
3589 Parser.Lex(); // Eat the ']'.
3590 return MatchOperand_Success;
3591 }
3592
3593 // There's an optional '#' token here. Normally there wouldn't be, but
3594 // inline assemble puts one in, and it's friendly to accept that.
3595 if (Parser.getTok().is(AsmToken::Hash))
3596 Parser.Lex(); // Eat '#' or '$'.
3597
3598 const MCExpr *LaneIndex;
3599 SMLoc Loc = Parser.getTok().getLoc();
3600 if (getParser().parseExpression(LaneIndex)) {
3601 Error(Loc, "illegal expression");
3602 return MatchOperand_ParseFail;
3603 }
3604 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3605 if (!CE) {
3606 Error(Loc, "lane index must be empty or an integer");
3607 return MatchOperand_ParseFail;
3608 }
3609 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3610 Error(Parser.getTok().getLoc(), "']' expected");
3611 return MatchOperand_ParseFail;
3612 }
3613 EndLoc = Parser.getTok().getEndLoc();
3614 Parser.Lex(); // Eat the ']'.
3615 int64_t Val = CE->getValue();
3616
3617 // FIXME: Make this range check context sensitive for .8, .16, .32.
3618 if (Val < 0 || Val > 7) {
3619 Error(Parser.getTok().getLoc(), "lane index out of range");
3620 return MatchOperand_ParseFail;
3621 }
3622 Index = Val;
3623 LaneKind = IndexedLane;
3624 return MatchOperand_Success;
3625 }
3626 LaneKind = NoLanes;
3627 return MatchOperand_Success;
3628 }
3629
3630 // parse a vector register list
3631 ARMAsmParser::OperandMatchResultTy
parseVectorList(OperandVector & Operands)3632 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3633 MCAsmParser &Parser = getParser();
3634 VectorLaneTy LaneKind;
3635 unsigned LaneIndex;
3636 SMLoc S = Parser.getTok().getLoc();
3637 // As an extension (to match gas), support a plain D register or Q register
3638 // (without encosing curly braces) as a single or double entry list,
3639 // respectively.
3640 if (Parser.getTok().is(AsmToken::Identifier)) {
3641 SMLoc E = Parser.getTok().getEndLoc();
3642 int Reg = tryParseRegister();
3643 if (Reg == -1)
3644 return MatchOperand_NoMatch;
3645 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3646 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3647 if (Res != MatchOperand_Success)
3648 return Res;
3649 switch (LaneKind) {
3650 case NoLanes:
3651 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3652 break;
3653 case AllLanes:
3654 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3655 S, E));
3656 break;
3657 case IndexedLane:
3658 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3659 LaneIndex,
3660 false, S, E));
3661 break;
3662 }
3663 return MatchOperand_Success;
3664 }
3665 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3666 Reg = getDRegFromQReg(Reg);
3667 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3668 if (Res != MatchOperand_Success)
3669 return Res;
3670 switch (LaneKind) {
3671 case NoLanes:
3672 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3673 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3674 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3675 break;
3676 case AllLanes:
3677 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3678 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3679 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3680 S, E));
3681 break;
3682 case IndexedLane:
3683 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3684 LaneIndex,
3685 false, S, E));
3686 break;
3687 }
3688 return MatchOperand_Success;
3689 }
3690 Error(S, "vector register expected");
3691 return MatchOperand_ParseFail;
3692 }
3693
3694 if (Parser.getTok().isNot(AsmToken::LCurly))
3695 return MatchOperand_NoMatch;
3696
3697 Parser.Lex(); // Eat '{' token.
3698 SMLoc RegLoc = Parser.getTok().getLoc();
3699
3700 int Reg = tryParseRegister();
3701 if (Reg == -1) {
3702 Error(RegLoc, "register expected");
3703 return MatchOperand_ParseFail;
3704 }
3705 unsigned Count = 1;
3706 int Spacing = 0;
3707 unsigned FirstReg = Reg;
3708 // The list is of D registers, but we also allow Q regs and just interpret
3709 // them as the two D sub-registers.
3710 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3711 FirstReg = Reg = getDRegFromQReg(Reg);
3712 Spacing = 1; // double-spacing requires explicit D registers, otherwise
3713 // it's ambiguous with four-register single spaced.
3714 ++Reg;
3715 ++Count;
3716 }
3717
3718 SMLoc E;
3719 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3720 return MatchOperand_ParseFail;
3721
3722 while (Parser.getTok().is(AsmToken::Comma) ||
3723 Parser.getTok().is(AsmToken::Minus)) {
3724 if (Parser.getTok().is(AsmToken::Minus)) {
3725 if (!Spacing)
3726 Spacing = 1; // Register range implies a single spaced list.
3727 else if (Spacing == 2) {
3728 Error(Parser.getTok().getLoc(),
3729 "sequential registers in double spaced list");
3730 return MatchOperand_ParseFail;
3731 }
3732 Parser.Lex(); // Eat the minus.
3733 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3734 int EndReg = tryParseRegister();
3735 if (EndReg == -1) {
3736 Error(AfterMinusLoc, "register expected");
3737 return MatchOperand_ParseFail;
3738 }
3739 // Allow Q regs and just interpret them as the two D sub-registers.
3740 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3741 EndReg = getDRegFromQReg(EndReg) + 1;
3742 // If the register is the same as the start reg, there's nothing
3743 // more to do.
3744 if (Reg == EndReg)
3745 continue;
3746 // The register must be in the same register class as the first.
3747 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3748 Error(AfterMinusLoc, "invalid register in register list");
3749 return MatchOperand_ParseFail;
3750 }
3751 // Ranges must go from low to high.
3752 if (Reg > EndReg) {
3753 Error(AfterMinusLoc, "bad range in register list");
3754 return MatchOperand_ParseFail;
3755 }
3756 // Parse the lane specifier if present.
3757 VectorLaneTy NextLaneKind;
3758 unsigned NextLaneIndex;
3759 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3760 MatchOperand_Success)
3761 return MatchOperand_ParseFail;
3762 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3763 Error(AfterMinusLoc, "mismatched lane index in register list");
3764 return MatchOperand_ParseFail;
3765 }
3766
3767 // Add all the registers in the range to the register list.
3768 Count += EndReg - Reg;
3769 Reg = EndReg;
3770 continue;
3771 }
3772 Parser.Lex(); // Eat the comma.
3773 RegLoc = Parser.getTok().getLoc();
3774 int OldReg = Reg;
3775 Reg = tryParseRegister();
3776 if (Reg == -1) {
3777 Error(RegLoc, "register expected");
3778 return MatchOperand_ParseFail;
3779 }
3780 // vector register lists must be contiguous.
3781 // It's OK to use the enumeration values directly here rather, as the
3782 // VFP register classes have the enum sorted properly.
3783 //
3784 // The list is of D registers, but we also allow Q regs and just interpret
3785 // them as the two D sub-registers.
3786 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3787 if (!Spacing)
3788 Spacing = 1; // Register range implies a single spaced list.
3789 else if (Spacing == 2) {
3790 Error(RegLoc,
3791 "invalid register in double-spaced list (must be 'D' register')");
3792 return MatchOperand_ParseFail;
3793 }
3794 Reg = getDRegFromQReg(Reg);
3795 if (Reg != OldReg + 1) {
3796 Error(RegLoc, "non-contiguous register range");
3797 return MatchOperand_ParseFail;
3798 }
3799 ++Reg;
3800 Count += 2;
3801 // Parse the lane specifier if present.
3802 VectorLaneTy NextLaneKind;
3803 unsigned NextLaneIndex;
3804 SMLoc LaneLoc = Parser.getTok().getLoc();
3805 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3806 MatchOperand_Success)
3807 return MatchOperand_ParseFail;
3808 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3809 Error(LaneLoc, "mismatched lane index in register list");
3810 return MatchOperand_ParseFail;
3811 }
3812 continue;
3813 }
3814 // Normal D register.
3815 // Figure out the register spacing (single or double) of the list if
3816 // we don't know it already.
3817 if (!Spacing)
3818 Spacing = 1 + (Reg == OldReg + 2);
3819
3820 // Just check that it's contiguous and keep going.
3821 if (Reg != OldReg + Spacing) {
3822 Error(RegLoc, "non-contiguous register range");
3823 return MatchOperand_ParseFail;
3824 }
3825 ++Count;
3826 // Parse the lane specifier if present.
3827 VectorLaneTy NextLaneKind;
3828 unsigned NextLaneIndex;
3829 SMLoc EndLoc = Parser.getTok().getLoc();
3830 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3831 return MatchOperand_ParseFail;
3832 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3833 Error(EndLoc, "mismatched lane index in register list");
3834 return MatchOperand_ParseFail;
3835 }
3836 }
3837
3838 if (Parser.getTok().isNot(AsmToken::RCurly)) {
3839 Error(Parser.getTok().getLoc(), "'}' expected");
3840 return MatchOperand_ParseFail;
3841 }
3842 E = Parser.getTok().getEndLoc();
3843 Parser.Lex(); // Eat '}' token.
3844
3845 switch (LaneKind) {
3846 case NoLanes:
3847 // Two-register operands have been converted to the
3848 // composite register classes.
3849 if (Count == 2) {
3850 const MCRegisterClass *RC = (Spacing == 1) ?
3851 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3852 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3853 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3854 }
3855
3856 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3857 (Spacing == 2), S, E));
3858 break;
3859 case AllLanes:
3860 // Two-register operands have been converted to the
3861 // composite register classes.
3862 if (Count == 2) {
3863 const MCRegisterClass *RC = (Spacing == 1) ?
3864 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3865 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3866 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3867 }
3868 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3869 (Spacing == 2),
3870 S, E));
3871 break;
3872 case IndexedLane:
3873 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3874 LaneIndex,
3875 (Spacing == 2),
3876 S, E));
3877 break;
3878 }
3879 return MatchOperand_Success;
3880 }
3881
3882 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3883 ARMAsmParser::OperandMatchResultTy
parseMemBarrierOptOperand(OperandVector & Operands)3884 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
3885 MCAsmParser &Parser = getParser();
3886 SMLoc S = Parser.getTok().getLoc();
3887 const AsmToken &Tok = Parser.getTok();
3888 unsigned Opt;
3889
3890 if (Tok.is(AsmToken::Identifier)) {
3891 StringRef OptStr = Tok.getString();
3892
3893 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3894 .Case("sy", ARM_MB::SY)
3895 .Case("st", ARM_MB::ST)
3896 .Case("ld", ARM_MB::LD)
3897 .Case("sh", ARM_MB::ISH)
3898 .Case("ish", ARM_MB::ISH)
3899 .Case("shst", ARM_MB::ISHST)
3900 .Case("ishst", ARM_MB::ISHST)
3901 .Case("ishld", ARM_MB::ISHLD)
3902 .Case("nsh", ARM_MB::NSH)
3903 .Case("un", ARM_MB::NSH)
3904 .Case("nshst", ARM_MB::NSHST)
3905 .Case("nshld", ARM_MB::NSHLD)
3906 .Case("unst", ARM_MB::NSHST)
3907 .Case("osh", ARM_MB::OSH)
3908 .Case("oshst", ARM_MB::OSHST)
3909 .Case("oshld", ARM_MB::OSHLD)
3910 .Default(~0U);
3911
3912 // ishld, oshld, nshld and ld are only available from ARMv8.
3913 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3914 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3915 Opt = ~0U;
3916
3917 if (Opt == ~0U)
3918 return MatchOperand_NoMatch;
3919
3920 Parser.Lex(); // Eat identifier token.
3921 } else if (Tok.is(AsmToken::Hash) ||
3922 Tok.is(AsmToken::Dollar) ||
3923 Tok.is(AsmToken::Integer)) {
3924 if (Parser.getTok().isNot(AsmToken::Integer))
3925 Parser.Lex(); // Eat '#' or '$'.
3926 SMLoc Loc = Parser.getTok().getLoc();
3927
3928 const MCExpr *MemBarrierID;
3929 if (getParser().parseExpression(MemBarrierID)) {
3930 Error(Loc, "illegal expression");
3931 return MatchOperand_ParseFail;
3932 }
3933
3934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3935 if (!CE) {
3936 Error(Loc, "constant expression expected");
3937 return MatchOperand_ParseFail;
3938 }
3939
3940 int Val = CE->getValue();
3941 if (Val & ~0xf) {
3942 Error(Loc, "immediate value out of range");
3943 return MatchOperand_ParseFail;
3944 }
3945
3946 Opt = ARM_MB::RESERVED_0 + Val;
3947 } else
3948 return MatchOperand_ParseFail;
3949
3950 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3951 return MatchOperand_Success;
3952 }
3953
3954 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3955 ARMAsmParser::OperandMatchResultTy
parseInstSyncBarrierOptOperand(OperandVector & Operands)3956 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
3957 MCAsmParser &Parser = getParser();
3958 SMLoc S = Parser.getTok().getLoc();
3959 const AsmToken &Tok = Parser.getTok();
3960 unsigned Opt;
3961
3962 if (Tok.is(AsmToken::Identifier)) {
3963 StringRef OptStr = Tok.getString();
3964
3965 if (OptStr.equals_lower("sy"))
3966 Opt = ARM_ISB::SY;
3967 else
3968 return MatchOperand_NoMatch;
3969
3970 Parser.Lex(); // Eat identifier token.
3971 } else if (Tok.is(AsmToken::Hash) ||
3972 Tok.is(AsmToken::Dollar) ||
3973 Tok.is(AsmToken::Integer)) {
3974 if (Parser.getTok().isNot(AsmToken::Integer))
3975 Parser.Lex(); // Eat '#' or '$'.
3976 SMLoc Loc = Parser.getTok().getLoc();
3977
3978 const MCExpr *ISBarrierID;
3979 if (getParser().parseExpression(ISBarrierID)) {
3980 Error(Loc, "illegal expression");
3981 return MatchOperand_ParseFail;
3982 }
3983
3984 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3985 if (!CE) {
3986 Error(Loc, "constant expression expected");
3987 return MatchOperand_ParseFail;
3988 }
3989
3990 int Val = CE->getValue();
3991 if (Val & ~0xf) {
3992 Error(Loc, "immediate value out of range");
3993 return MatchOperand_ParseFail;
3994 }
3995
3996 Opt = ARM_ISB::RESERVED_0 + Val;
3997 } else
3998 return MatchOperand_ParseFail;
3999
4000 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4001 (ARM_ISB::InstSyncBOpt)Opt, S));
4002 return MatchOperand_Success;
4003 }
4004
4005
4006 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4007 ARMAsmParser::OperandMatchResultTy
parseProcIFlagsOperand(OperandVector & Operands)4008 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4009 MCAsmParser &Parser = getParser();
4010 SMLoc S = Parser.getTok().getLoc();
4011 const AsmToken &Tok = Parser.getTok();
4012 if (!Tok.is(AsmToken::Identifier))
4013 return MatchOperand_NoMatch;
4014 StringRef IFlagsStr = Tok.getString();
4015
4016 // An iflags string of "none" is interpreted to mean that none of the AIF
4017 // bits are set. Not a terribly useful instruction, but a valid encoding.
4018 unsigned IFlags = 0;
4019 if (IFlagsStr != "none") {
4020 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4021 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
4022 .Case("a", ARM_PROC::A)
4023 .Case("i", ARM_PROC::I)
4024 .Case("f", ARM_PROC::F)
4025 .Default(~0U);
4026
4027 // If some specific iflag is already set, it means that some letter is
4028 // present more than once, this is not acceptable.
4029 if (Flag == ~0U || (IFlags & Flag))
4030 return MatchOperand_NoMatch;
4031
4032 IFlags |= Flag;
4033 }
4034 }
4035
4036 Parser.Lex(); // Eat identifier token.
4037 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4038 return MatchOperand_Success;
4039 }
4040
4041 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4042 ARMAsmParser::OperandMatchResultTy
parseMSRMaskOperand(OperandVector & Operands)4043 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4044 MCAsmParser &Parser = getParser();
4045 SMLoc S = Parser.getTok().getLoc();
4046 const AsmToken &Tok = Parser.getTok();
4047 if (!Tok.is(AsmToken::Identifier))
4048 return MatchOperand_NoMatch;
4049 StringRef Mask = Tok.getString();
4050
4051 if (isMClass()) {
4052 // See ARMv6-M 10.1.1
4053 std::string Name = Mask.lower();
4054 unsigned FlagsVal = StringSwitch<unsigned>(Name)
4055 // Note: in the documentation:
4056 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
4057 // for MSR APSR_nzcvq.
4058 // but we do make it an alias here. This is so to get the "mask encoding"
4059 // bits correct on MSR APSR writes.
4060 //
4061 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
4062 // should really only be allowed when writing a special register. Note
4063 // they get dropped in the MRS instruction reading a special register as
4064 // the SYSm field is only 8 bits.
4065 .Case("apsr", 0x800)
4066 .Case("apsr_nzcvq", 0x800)
4067 .Case("apsr_g", 0x400)
4068 .Case("apsr_nzcvqg", 0xc00)
4069 .Case("iapsr", 0x801)
4070 .Case("iapsr_nzcvq", 0x801)
4071 .Case("iapsr_g", 0x401)
4072 .Case("iapsr_nzcvqg", 0xc01)
4073 .Case("eapsr", 0x802)
4074 .Case("eapsr_nzcvq", 0x802)
4075 .Case("eapsr_g", 0x402)
4076 .Case("eapsr_nzcvqg", 0xc02)
4077 .Case("xpsr", 0x803)
4078 .Case("xpsr_nzcvq", 0x803)
4079 .Case("xpsr_g", 0x403)
4080 .Case("xpsr_nzcvqg", 0xc03)
4081 .Case("ipsr", 0x805)
4082 .Case("epsr", 0x806)
4083 .Case("iepsr", 0x807)
4084 .Case("msp", 0x808)
4085 .Case("psp", 0x809)
4086 .Case("primask", 0x810)
4087 .Case("basepri", 0x811)
4088 .Case("basepri_max", 0x812)
4089 .Case("faultmask", 0x813)
4090 .Case("control", 0x814)
4091 .Case("msplim", 0x80a)
4092 .Case("psplim", 0x80b)
4093 .Case("msp_ns", 0x888)
4094 .Case("psp_ns", 0x889)
4095 .Case("msplim_ns", 0x88a)
4096 .Case("psplim_ns", 0x88b)
4097 .Case("primask_ns", 0x890)
4098 .Case("basepri_ns", 0x891)
4099 .Case("basepri_max_ns", 0x892)
4100 .Case("faultmask_ns", 0x893)
4101 .Case("control_ns", 0x894)
4102 .Case("sp_ns", 0x898)
4103 .Default(~0U);
4104
4105 if (FlagsVal == ~0U)
4106 return MatchOperand_NoMatch;
4107
4108 if (!hasDSP() && (FlagsVal & 0x400))
4109 // The _g and _nzcvqg versions are only valid if the DSP extension is
4110 // available.
4111 return MatchOperand_NoMatch;
4112
4113 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
4114 // basepri, basepri_max and faultmask only valid for V7m.
4115 return MatchOperand_NoMatch;
4116
4117 if (!has8MSecExt() && (FlagsVal == 0x80a || FlagsVal == 0x80b ||
4118 (FlagsVal > 0x814 && FlagsVal < 0xc00)))
4119 return MatchOperand_NoMatch;
4120
4121 if (!hasV8MMainline() && (FlagsVal == 0x88a || FlagsVal == 0x88b ||
4122 (FlagsVal > 0x890 && FlagsVal <= 0x893)))
4123 return MatchOperand_NoMatch;
4124
4125 Parser.Lex(); // Eat identifier token.
4126 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4127 return MatchOperand_Success;
4128 }
4129
4130 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4131 size_t Start = 0, Next = Mask.find('_');
4132 StringRef Flags = "";
4133 std::string SpecReg = Mask.slice(Start, Next).lower();
4134 if (Next != StringRef::npos)
4135 Flags = Mask.slice(Next+1, Mask.size());
4136
4137 // FlagsVal contains the complete mask:
4138 // 3-0: Mask
4139 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4140 unsigned FlagsVal = 0;
4141
4142 if (SpecReg == "apsr") {
4143 FlagsVal = StringSwitch<unsigned>(Flags)
4144 .Case("nzcvq", 0x8) // same as CPSR_f
4145 .Case("g", 0x4) // same as CPSR_s
4146 .Case("nzcvqg", 0xc) // same as CPSR_fs
4147 .Default(~0U);
4148
4149 if (FlagsVal == ~0U) {
4150 if (!Flags.empty())
4151 return MatchOperand_NoMatch;
4152 else
4153 FlagsVal = 8; // No flag
4154 }
4155 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4156 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4157 if (Flags == "all" || Flags == "")
4158 Flags = "fc";
4159 for (int i = 0, e = Flags.size(); i != e; ++i) {
4160 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4161 .Case("c", 1)
4162 .Case("x", 2)
4163 .Case("s", 4)
4164 .Case("f", 8)
4165 .Default(~0U);
4166
4167 // If some specific flag is already set, it means that some letter is
4168 // present more than once, this is not acceptable.
4169 if (FlagsVal == ~0U || (FlagsVal & Flag))
4170 return MatchOperand_NoMatch;
4171 FlagsVal |= Flag;
4172 }
4173 } else // No match for special register.
4174 return MatchOperand_NoMatch;
4175
4176 // Special register without flags is NOT equivalent to "fc" flags.
4177 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4178 // two lines would enable gas compatibility at the expense of breaking
4179 // round-tripping.
4180 //
4181 // if (!FlagsVal)
4182 // FlagsVal = 0x9;
4183
4184 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4185 if (SpecReg == "spsr")
4186 FlagsVal |= 16;
4187
4188 Parser.Lex(); // Eat identifier token.
4189 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4190 return MatchOperand_Success;
4191 }
4192
4193 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4194 /// use in the MRS/MSR instructions added to support virtualization.
4195 ARMAsmParser::OperandMatchResultTy
parseBankedRegOperand(OperandVector & Operands)4196 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4197 MCAsmParser &Parser = getParser();
4198 SMLoc S = Parser.getTok().getLoc();
4199 const AsmToken &Tok = Parser.getTok();
4200 if (!Tok.is(AsmToken::Identifier))
4201 return MatchOperand_NoMatch;
4202 StringRef RegName = Tok.getString();
4203
4204 // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM
4205 // and bit 5 is R.
4206 unsigned Encoding = StringSwitch<unsigned>(RegName.lower())
4207 .Case("r8_usr", 0x00)
4208 .Case("r9_usr", 0x01)
4209 .Case("r10_usr", 0x02)
4210 .Case("r11_usr", 0x03)
4211 .Case("r12_usr", 0x04)
4212 .Case("sp_usr", 0x05)
4213 .Case("lr_usr", 0x06)
4214 .Case("r8_fiq", 0x08)
4215 .Case("r9_fiq", 0x09)
4216 .Case("r10_fiq", 0x0a)
4217 .Case("r11_fiq", 0x0b)
4218 .Case("r12_fiq", 0x0c)
4219 .Case("sp_fiq", 0x0d)
4220 .Case("lr_fiq", 0x0e)
4221 .Case("lr_irq", 0x10)
4222 .Case("sp_irq", 0x11)
4223 .Case("lr_svc", 0x12)
4224 .Case("sp_svc", 0x13)
4225 .Case("lr_abt", 0x14)
4226 .Case("sp_abt", 0x15)
4227 .Case("lr_und", 0x16)
4228 .Case("sp_und", 0x17)
4229 .Case("lr_mon", 0x1c)
4230 .Case("sp_mon", 0x1d)
4231 .Case("elr_hyp", 0x1e)
4232 .Case("sp_hyp", 0x1f)
4233 .Case("spsr_fiq", 0x2e)
4234 .Case("spsr_irq", 0x30)
4235 .Case("spsr_svc", 0x32)
4236 .Case("spsr_abt", 0x34)
4237 .Case("spsr_und", 0x36)
4238 .Case("spsr_mon", 0x3c)
4239 .Case("spsr_hyp", 0x3e)
4240 .Default(~0U);
4241
4242 if (Encoding == ~0U)
4243 return MatchOperand_NoMatch;
4244
4245 Parser.Lex(); // Eat identifier token.
4246 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4247 return MatchOperand_Success;
4248 }
4249
4250 ARMAsmParser::OperandMatchResultTy
parsePKHImm(OperandVector & Operands,StringRef Op,int Low,int High)4251 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4252 int High) {
4253 MCAsmParser &Parser = getParser();
4254 const AsmToken &Tok = Parser.getTok();
4255 if (Tok.isNot(AsmToken::Identifier)) {
4256 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4257 return MatchOperand_ParseFail;
4258 }
4259 StringRef ShiftName = Tok.getString();
4260 std::string LowerOp = Op.lower();
4261 std::string UpperOp = Op.upper();
4262 if (ShiftName != LowerOp && ShiftName != UpperOp) {
4263 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4264 return MatchOperand_ParseFail;
4265 }
4266 Parser.Lex(); // Eat shift type token.
4267
4268 // There must be a '#' and a shift amount.
4269 if (Parser.getTok().isNot(AsmToken::Hash) &&
4270 Parser.getTok().isNot(AsmToken::Dollar)) {
4271 Error(Parser.getTok().getLoc(), "'#' expected");
4272 return MatchOperand_ParseFail;
4273 }
4274 Parser.Lex(); // Eat hash token.
4275
4276 const MCExpr *ShiftAmount;
4277 SMLoc Loc = Parser.getTok().getLoc();
4278 SMLoc EndLoc;
4279 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4280 Error(Loc, "illegal expression");
4281 return MatchOperand_ParseFail;
4282 }
4283 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4284 if (!CE) {
4285 Error(Loc, "constant expression expected");
4286 return MatchOperand_ParseFail;
4287 }
4288 int Val = CE->getValue();
4289 if (Val < Low || Val > High) {
4290 Error(Loc, "immediate value out of range");
4291 return MatchOperand_ParseFail;
4292 }
4293
4294 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4295
4296 return MatchOperand_Success;
4297 }
4298
4299 ARMAsmParser::OperandMatchResultTy
parseSetEndImm(OperandVector & Operands)4300 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4301 MCAsmParser &Parser = getParser();
4302 const AsmToken &Tok = Parser.getTok();
4303 SMLoc S = Tok.getLoc();
4304 if (Tok.isNot(AsmToken::Identifier)) {
4305 Error(S, "'be' or 'le' operand expected");
4306 return MatchOperand_ParseFail;
4307 }
4308 int Val = StringSwitch<int>(Tok.getString().lower())
4309 .Case("be", 1)
4310 .Case("le", 0)
4311 .Default(-1);
4312 Parser.Lex(); // Eat the token.
4313
4314 if (Val == -1) {
4315 Error(S, "'be' or 'le' operand expected");
4316 return MatchOperand_ParseFail;
4317 }
4318 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4319 getContext()),
4320 S, Tok.getEndLoc()));
4321 return MatchOperand_Success;
4322 }
4323
4324 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4325 /// instructions. Legal values are:
4326 /// lsl #n 'n' in [0,31]
4327 /// asr #n 'n' in [1,32]
4328 /// n == 32 encoded as n == 0.
4329 ARMAsmParser::OperandMatchResultTy
parseShifterImm(OperandVector & Operands)4330 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4331 MCAsmParser &Parser = getParser();
4332 const AsmToken &Tok = Parser.getTok();
4333 SMLoc S = Tok.getLoc();
4334 if (Tok.isNot(AsmToken::Identifier)) {
4335 Error(S, "shift operator 'asr' or 'lsl' expected");
4336 return MatchOperand_ParseFail;
4337 }
4338 StringRef ShiftName = Tok.getString();
4339 bool isASR;
4340 if (ShiftName == "lsl" || ShiftName == "LSL")
4341 isASR = false;
4342 else if (ShiftName == "asr" || ShiftName == "ASR")
4343 isASR = true;
4344 else {
4345 Error(S, "shift operator 'asr' or 'lsl' expected");
4346 return MatchOperand_ParseFail;
4347 }
4348 Parser.Lex(); // Eat the operator.
4349
4350 // A '#' and a shift amount.
4351 if (Parser.getTok().isNot(AsmToken::Hash) &&
4352 Parser.getTok().isNot(AsmToken::Dollar)) {
4353 Error(Parser.getTok().getLoc(), "'#' expected");
4354 return MatchOperand_ParseFail;
4355 }
4356 Parser.Lex(); // Eat hash token.
4357 SMLoc ExLoc = Parser.getTok().getLoc();
4358
4359 const MCExpr *ShiftAmount;
4360 SMLoc EndLoc;
4361 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4362 Error(ExLoc, "malformed shift expression");
4363 return MatchOperand_ParseFail;
4364 }
4365 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4366 if (!CE) {
4367 Error(ExLoc, "shift amount must be an immediate");
4368 return MatchOperand_ParseFail;
4369 }
4370
4371 int64_t Val = CE->getValue();
4372 if (isASR) {
4373 // Shift amount must be in [1,32]
4374 if (Val < 1 || Val > 32) {
4375 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4376 return MatchOperand_ParseFail;
4377 }
4378 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4379 if (isThumb() && Val == 32) {
4380 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4381 return MatchOperand_ParseFail;
4382 }
4383 if (Val == 32) Val = 0;
4384 } else {
4385 // Shift amount must be in [1,32]
4386 if (Val < 0 || Val > 31) {
4387 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4388 return MatchOperand_ParseFail;
4389 }
4390 }
4391
4392 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4393
4394 return MatchOperand_Success;
4395 }
4396
4397 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4398 /// of instructions. Legal values are:
4399 /// ror #n 'n' in {0, 8, 16, 24}
4400 ARMAsmParser::OperandMatchResultTy
parseRotImm(OperandVector & Operands)4401 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4402 MCAsmParser &Parser = getParser();
4403 const AsmToken &Tok = Parser.getTok();
4404 SMLoc S = Tok.getLoc();
4405 if (Tok.isNot(AsmToken::Identifier))
4406 return MatchOperand_NoMatch;
4407 StringRef ShiftName = Tok.getString();
4408 if (ShiftName != "ror" && ShiftName != "ROR")
4409 return MatchOperand_NoMatch;
4410 Parser.Lex(); // Eat the operator.
4411
4412 // A '#' and a rotate amount.
4413 if (Parser.getTok().isNot(AsmToken::Hash) &&
4414 Parser.getTok().isNot(AsmToken::Dollar)) {
4415 Error(Parser.getTok().getLoc(), "'#' expected");
4416 return MatchOperand_ParseFail;
4417 }
4418 Parser.Lex(); // Eat hash token.
4419 SMLoc ExLoc = Parser.getTok().getLoc();
4420
4421 const MCExpr *ShiftAmount;
4422 SMLoc EndLoc;
4423 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4424 Error(ExLoc, "malformed rotate expression");
4425 return MatchOperand_ParseFail;
4426 }
4427 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4428 if (!CE) {
4429 Error(ExLoc, "rotate amount must be an immediate");
4430 return MatchOperand_ParseFail;
4431 }
4432
4433 int64_t Val = CE->getValue();
4434 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4435 // normally, zero is represented in asm by omitting the rotate operand
4436 // entirely.
4437 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4438 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4439 return MatchOperand_ParseFail;
4440 }
4441
4442 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4443
4444 return MatchOperand_Success;
4445 }
4446
4447 ARMAsmParser::OperandMatchResultTy
parseModImm(OperandVector & Operands)4448 ARMAsmParser::parseModImm(OperandVector &Operands) {
4449 MCAsmParser &Parser = getParser();
4450 MCAsmLexer &Lexer = getLexer();
4451 int64_t Imm1, Imm2;
4452
4453 SMLoc S = Parser.getTok().getLoc();
4454
4455 // 1) A mod_imm operand can appear in the place of a register name:
4456 // add r0, #mod_imm
4457 // add r0, r0, #mod_imm
4458 // to correctly handle the latter, we bail out as soon as we see an
4459 // identifier.
4460 //
4461 // 2) Similarly, we do not want to parse into complex operands:
4462 // mov r0, #mod_imm
4463 // mov r0, :lower16:(_foo)
4464 if (Parser.getTok().is(AsmToken::Identifier) ||
4465 Parser.getTok().is(AsmToken::Colon))
4466 return MatchOperand_NoMatch;
4467
4468 // Hash (dollar) is optional as per the ARMARM
4469 if (Parser.getTok().is(AsmToken::Hash) ||
4470 Parser.getTok().is(AsmToken::Dollar)) {
4471 // Avoid parsing into complex operands (#:)
4472 if (Lexer.peekTok().is(AsmToken::Colon))
4473 return MatchOperand_NoMatch;
4474
4475 // Eat the hash (dollar)
4476 Parser.Lex();
4477 }
4478
4479 SMLoc Sx1, Ex1;
4480 Sx1 = Parser.getTok().getLoc();
4481 const MCExpr *Imm1Exp;
4482 if (getParser().parseExpression(Imm1Exp, Ex1)) {
4483 Error(Sx1, "malformed expression");
4484 return MatchOperand_ParseFail;
4485 }
4486
4487 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4488
4489 if (CE) {
4490 // Immediate must fit within 32-bits
4491 Imm1 = CE->getValue();
4492 int Enc = ARM_AM::getSOImmVal(Imm1);
4493 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4494 // We have a match!
4495 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4496 (Enc & 0xF00) >> 7,
4497 Sx1, Ex1));
4498 return MatchOperand_Success;
4499 }
4500
4501 // We have parsed an immediate which is not for us, fallback to a plain
4502 // immediate. This can happen for instruction aliases. For an example,
4503 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4504 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4505 // instruction with a mod_imm operand. The alias is defined such that the
4506 // parser method is shared, that's why we have to do this here.
4507 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4508 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4509 return MatchOperand_Success;
4510 }
4511 } else {
4512 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4513 // MCFixup). Fallback to a plain immediate.
4514 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4515 return MatchOperand_Success;
4516 }
4517
4518 // From this point onward, we expect the input to be a (#bits, #rot) pair
4519 if (Parser.getTok().isNot(AsmToken::Comma)) {
4520 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4521 return MatchOperand_ParseFail;
4522 }
4523
4524 if (Imm1 & ~0xFF) {
4525 Error(Sx1, "immediate operand must a number in the range [0, 255]");
4526 return MatchOperand_ParseFail;
4527 }
4528
4529 // Eat the comma
4530 Parser.Lex();
4531
4532 // Repeat for #rot
4533 SMLoc Sx2, Ex2;
4534 Sx2 = Parser.getTok().getLoc();
4535
4536 // Eat the optional hash (dollar)
4537 if (Parser.getTok().is(AsmToken::Hash) ||
4538 Parser.getTok().is(AsmToken::Dollar))
4539 Parser.Lex();
4540
4541 const MCExpr *Imm2Exp;
4542 if (getParser().parseExpression(Imm2Exp, Ex2)) {
4543 Error(Sx2, "malformed expression");
4544 return MatchOperand_ParseFail;
4545 }
4546
4547 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4548
4549 if (CE) {
4550 Imm2 = CE->getValue();
4551 if (!(Imm2 & ~0x1E)) {
4552 // We have a match!
4553 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4554 return MatchOperand_Success;
4555 }
4556 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4557 return MatchOperand_ParseFail;
4558 } else {
4559 Error(Sx2, "constant expression expected");
4560 return MatchOperand_ParseFail;
4561 }
4562 }
4563
4564 ARMAsmParser::OperandMatchResultTy
parseBitfield(OperandVector & Operands)4565 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4566 MCAsmParser &Parser = getParser();
4567 SMLoc S = Parser.getTok().getLoc();
4568 // The bitfield descriptor is really two operands, the LSB and the width.
4569 if (Parser.getTok().isNot(AsmToken::Hash) &&
4570 Parser.getTok().isNot(AsmToken::Dollar)) {
4571 Error(Parser.getTok().getLoc(), "'#' expected");
4572 return MatchOperand_ParseFail;
4573 }
4574 Parser.Lex(); // Eat hash token.
4575
4576 const MCExpr *LSBExpr;
4577 SMLoc E = Parser.getTok().getLoc();
4578 if (getParser().parseExpression(LSBExpr)) {
4579 Error(E, "malformed immediate expression");
4580 return MatchOperand_ParseFail;
4581 }
4582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4583 if (!CE) {
4584 Error(E, "'lsb' operand must be an immediate");
4585 return MatchOperand_ParseFail;
4586 }
4587
4588 int64_t LSB = CE->getValue();
4589 // The LSB must be in the range [0,31]
4590 if (LSB < 0 || LSB > 31) {
4591 Error(E, "'lsb' operand must be in the range [0,31]");
4592 return MatchOperand_ParseFail;
4593 }
4594 E = Parser.getTok().getLoc();
4595
4596 // Expect another immediate operand.
4597 if (Parser.getTok().isNot(AsmToken::Comma)) {
4598 Error(Parser.getTok().getLoc(), "too few operands");
4599 return MatchOperand_ParseFail;
4600 }
4601 Parser.Lex(); // Eat hash token.
4602 if (Parser.getTok().isNot(AsmToken::Hash) &&
4603 Parser.getTok().isNot(AsmToken::Dollar)) {
4604 Error(Parser.getTok().getLoc(), "'#' expected");
4605 return MatchOperand_ParseFail;
4606 }
4607 Parser.Lex(); // Eat hash token.
4608
4609 const MCExpr *WidthExpr;
4610 SMLoc EndLoc;
4611 if (getParser().parseExpression(WidthExpr, EndLoc)) {
4612 Error(E, "malformed immediate expression");
4613 return MatchOperand_ParseFail;
4614 }
4615 CE = dyn_cast<MCConstantExpr>(WidthExpr);
4616 if (!CE) {
4617 Error(E, "'width' operand must be an immediate");
4618 return MatchOperand_ParseFail;
4619 }
4620
4621 int64_t Width = CE->getValue();
4622 // The LSB must be in the range [1,32-lsb]
4623 if (Width < 1 || Width > 32 - LSB) {
4624 Error(E, "'width' operand must be in the range [1,32-lsb]");
4625 return MatchOperand_ParseFail;
4626 }
4627
4628 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4629
4630 return MatchOperand_Success;
4631 }
4632
4633 ARMAsmParser::OperandMatchResultTy
parsePostIdxReg(OperandVector & Operands)4634 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4635 // Check for a post-index addressing register operand. Specifically:
4636 // postidx_reg := '+' register {, shift}
4637 // | '-' register {, shift}
4638 // | register {, shift}
4639
4640 // This method must return MatchOperand_NoMatch without consuming any tokens
4641 // in the case where there is no match, as other alternatives take other
4642 // parse methods.
4643 MCAsmParser &Parser = getParser();
4644 AsmToken Tok = Parser.getTok();
4645 SMLoc S = Tok.getLoc();
4646 bool haveEaten = false;
4647 bool isAdd = true;
4648 if (Tok.is(AsmToken::Plus)) {
4649 Parser.Lex(); // Eat the '+' token.
4650 haveEaten = true;
4651 } else if (Tok.is(AsmToken::Minus)) {
4652 Parser.Lex(); // Eat the '-' token.
4653 isAdd = false;
4654 haveEaten = true;
4655 }
4656
4657 SMLoc E = Parser.getTok().getEndLoc();
4658 int Reg = tryParseRegister();
4659 if (Reg == -1) {
4660 if (!haveEaten)
4661 return MatchOperand_NoMatch;
4662 Error(Parser.getTok().getLoc(), "register expected");
4663 return MatchOperand_ParseFail;
4664 }
4665
4666 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4667 unsigned ShiftImm = 0;
4668 if (Parser.getTok().is(AsmToken::Comma)) {
4669 Parser.Lex(); // Eat the ','.
4670 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4671 return MatchOperand_ParseFail;
4672
4673 // FIXME: Only approximates end...may include intervening whitespace.
4674 E = Parser.getTok().getLoc();
4675 }
4676
4677 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4678 ShiftImm, S, E));
4679
4680 return MatchOperand_Success;
4681 }
4682
4683 ARMAsmParser::OperandMatchResultTy
parseAM3Offset(OperandVector & Operands)4684 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4685 // Check for a post-index addressing register operand. Specifically:
4686 // am3offset := '+' register
4687 // | '-' register
4688 // | register
4689 // | # imm
4690 // | # + imm
4691 // | # - imm
4692
4693 // This method must return MatchOperand_NoMatch without consuming any tokens
4694 // in the case where there is no match, as other alternatives take other
4695 // parse methods.
4696 MCAsmParser &Parser = getParser();
4697 AsmToken Tok = Parser.getTok();
4698 SMLoc S = Tok.getLoc();
4699
4700 // Do immediates first, as we always parse those if we have a '#'.
4701 if (Parser.getTok().is(AsmToken::Hash) ||
4702 Parser.getTok().is(AsmToken::Dollar)) {
4703 Parser.Lex(); // Eat '#' or '$'.
4704 // Explicitly look for a '-', as we need to encode negative zero
4705 // differently.
4706 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4707 const MCExpr *Offset;
4708 SMLoc E;
4709 if (getParser().parseExpression(Offset, E))
4710 return MatchOperand_ParseFail;
4711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4712 if (!CE) {
4713 Error(S, "constant expression expected");
4714 return MatchOperand_ParseFail;
4715 }
4716 // Negative zero is encoded as the flag value INT32_MIN.
4717 int32_t Val = CE->getValue();
4718 if (isNegative && Val == 0)
4719 Val = INT32_MIN;
4720
4721 Operands.push_back(
4722 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4723
4724 return MatchOperand_Success;
4725 }
4726
4727
4728 bool haveEaten = false;
4729 bool isAdd = true;
4730 if (Tok.is(AsmToken::Plus)) {
4731 Parser.Lex(); // Eat the '+' token.
4732 haveEaten = true;
4733 } else if (Tok.is(AsmToken::Minus)) {
4734 Parser.Lex(); // Eat the '-' token.
4735 isAdd = false;
4736 haveEaten = true;
4737 }
4738
4739 Tok = Parser.getTok();
4740 int Reg = tryParseRegister();
4741 if (Reg == -1) {
4742 if (!haveEaten)
4743 return MatchOperand_NoMatch;
4744 Error(Tok.getLoc(), "register expected");
4745 return MatchOperand_ParseFail;
4746 }
4747
4748 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4749 0, S, Tok.getEndLoc()));
4750
4751 return MatchOperand_Success;
4752 }
4753
4754 /// Convert parsed operands to MCInst. Needed here because this instruction
4755 /// only has two register operands, but multiplication is commutative so
4756 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
cvtThumbMultiply(MCInst & Inst,const OperandVector & Operands)4757 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4758 const OperandVector &Operands) {
4759 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4760 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4761 // If we have a three-operand form, make sure to set Rn to be the operand
4762 // that isn't the same as Rd.
4763 unsigned RegOp = 4;
4764 if (Operands.size() == 6 &&
4765 ((ARMOperand &)*Operands[4]).getReg() ==
4766 ((ARMOperand &)*Operands[3]).getReg())
4767 RegOp = 5;
4768 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4769 Inst.addOperand(Inst.getOperand(0));
4770 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4771 }
4772
cvtThumbBranches(MCInst & Inst,const OperandVector & Operands)4773 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4774 const OperandVector &Operands) {
4775 int CondOp = -1, ImmOp = -1;
4776 switch(Inst.getOpcode()) {
4777 case ARM::tB:
4778 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4779
4780 case ARM::t2B:
4781 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4782
4783 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4784 }
4785 // first decide whether or not the branch should be conditional
4786 // by looking at it's location relative to an IT block
4787 if(inITBlock()) {
4788 // inside an IT block we cannot have any conditional branches. any
4789 // such instructions needs to be converted to unconditional form
4790 switch(Inst.getOpcode()) {
4791 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4792 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4793 }
4794 } else {
4795 // outside IT blocks we can only have unconditional branches with AL
4796 // condition code or conditional branches with non-AL condition code
4797 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4798 switch(Inst.getOpcode()) {
4799 case ARM::tB:
4800 case ARM::tBcc:
4801 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4802 break;
4803 case ARM::t2B:
4804 case ARM::t2Bcc:
4805 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4806 break;
4807 }
4808 }
4809
4810 // now decide on encoding size based on branch target range
4811 switch(Inst.getOpcode()) {
4812 // classify tB as either t2B or t1B based on range of immediate operand
4813 case ARM::tB: {
4814 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4815 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4816 Inst.setOpcode(ARM::t2B);
4817 break;
4818 }
4819 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4820 case ARM::tBcc: {
4821 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4822 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4823 Inst.setOpcode(ARM::t2Bcc);
4824 break;
4825 }
4826 }
4827 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4828 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4829 }
4830
4831 /// Parse an ARM memory expression, return false if successful else return true
4832 /// or an error. The first token must be a '[' when called.
parseMemory(OperandVector & Operands)4833 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4834 MCAsmParser &Parser = getParser();
4835 SMLoc S, E;
4836 assert(Parser.getTok().is(AsmToken::LBrac) &&
4837 "Token is not a Left Bracket");
4838 S = Parser.getTok().getLoc();
4839 Parser.Lex(); // Eat left bracket token.
4840
4841 const AsmToken &BaseRegTok = Parser.getTok();
4842 int BaseRegNum = tryParseRegister();
4843 if (BaseRegNum == -1)
4844 return Error(BaseRegTok.getLoc(), "register expected");
4845
4846 // The next token must either be a comma, a colon or a closing bracket.
4847 const AsmToken &Tok = Parser.getTok();
4848 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4849 !Tok.is(AsmToken::RBrac))
4850 return Error(Tok.getLoc(), "malformed memory operand");
4851
4852 if (Tok.is(AsmToken::RBrac)) {
4853 E = Tok.getEndLoc();
4854 Parser.Lex(); // Eat right bracket token.
4855
4856 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4857 ARM_AM::no_shift, 0, 0, false,
4858 S, E));
4859
4860 // If there's a pre-indexing writeback marker, '!', just add it as a token
4861 // operand. It's rather odd, but syntactically valid.
4862 if (Parser.getTok().is(AsmToken::Exclaim)) {
4863 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4864 Parser.Lex(); // Eat the '!'.
4865 }
4866
4867 return false;
4868 }
4869
4870 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4871 "Lost colon or comma in memory operand?!");
4872 if (Tok.is(AsmToken::Comma)) {
4873 Parser.Lex(); // Eat the comma.
4874 }
4875
4876 // If we have a ':', it's an alignment specifier.
4877 if (Parser.getTok().is(AsmToken::Colon)) {
4878 Parser.Lex(); // Eat the ':'.
4879 E = Parser.getTok().getLoc();
4880 SMLoc AlignmentLoc = Tok.getLoc();
4881
4882 const MCExpr *Expr;
4883 if (getParser().parseExpression(Expr))
4884 return true;
4885
4886 // The expression has to be a constant. Memory references with relocations
4887 // don't come through here, as they use the <label> forms of the relevant
4888 // instructions.
4889 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4890 if (!CE)
4891 return Error (E, "constant expression expected");
4892
4893 unsigned Align = 0;
4894 switch (CE->getValue()) {
4895 default:
4896 return Error(E,
4897 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4898 case 16: Align = 2; break;
4899 case 32: Align = 4; break;
4900 case 64: Align = 8; break;
4901 case 128: Align = 16; break;
4902 case 256: Align = 32; break;
4903 }
4904
4905 // Now we should have the closing ']'
4906 if (Parser.getTok().isNot(AsmToken::RBrac))
4907 return Error(Parser.getTok().getLoc(), "']' expected");
4908 E = Parser.getTok().getEndLoc();
4909 Parser.Lex(); // Eat right bracket token.
4910
4911 // Don't worry about range checking the value here. That's handled by
4912 // the is*() predicates.
4913 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4914 ARM_AM::no_shift, 0, Align,
4915 false, S, E, AlignmentLoc));
4916
4917 // If there's a pre-indexing writeback marker, '!', just add it as a token
4918 // operand.
4919 if (Parser.getTok().is(AsmToken::Exclaim)) {
4920 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4921 Parser.Lex(); // Eat the '!'.
4922 }
4923
4924 return false;
4925 }
4926
4927 // If we have a '#', it's an immediate offset, else assume it's a register
4928 // offset. Be friendly and also accept a plain integer (without a leading
4929 // hash) for gas compatibility.
4930 if (Parser.getTok().is(AsmToken::Hash) ||
4931 Parser.getTok().is(AsmToken::Dollar) ||
4932 Parser.getTok().is(AsmToken::Integer)) {
4933 if (Parser.getTok().isNot(AsmToken::Integer))
4934 Parser.Lex(); // Eat '#' or '$'.
4935 E = Parser.getTok().getLoc();
4936
4937 bool isNegative = getParser().getTok().is(AsmToken::Minus);
4938 const MCExpr *Offset;
4939 if (getParser().parseExpression(Offset))
4940 return true;
4941
4942 // The expression has to be a constant. Memory references with relocations
4943 // don't come through here, as they use the <label> forms of the relevant
4944 // instructions.
4945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4946 if (!CE)
4947 return Error (E, "constant expression expected");
4948
4949 // If the constant was #-0, represent it as INT32_MIN.
4950 int32_t Val = CE->getValue();
4951 if (isNegative && Val == 0)
4952 CE = MCConstantExpr::create(INT32_MIN, getContext());
4953
4954 // Now we should have the closing ']'
4955 if (Parser.getTok().isNot(AsmToken::RBrac))
4956 return Error(Parser.getTok().getLoc(), "']' expected");
4957 E = Parser.getTok().getEndLoc();
4958 Parser.Lex(); // Eat right bracket token.
4959
4960 // Don't worry about range checking the value here. That's handled by
4961 // the is*() predicates.
4962 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4963 ARM_AM::no_shift, 0, 0,
4964 false, S, E));
4965
4966 // If there's a pre-indexing writeback marker, '!', just add it as a token
4967 // operand.
4968 if (Parser.getTok().is(AsmToken::Exclaim)) {
4969 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4970 Parser.Lex(); // Eat the '!'.
4971 }
4972
4973 return false;
4974 }
4975
4976 // The register offset is optionally preceded by a '+' or '-'
4977 bool isNegative = false;
4978 if (Parser.getTok().is(AsmToken::Minus)) {
4979 isNegative = true;
4980 Parser.Lex(); // Eat the '-'.
4981 } else if (Parser.getTok().is(AsmToken::Plus)) {
4982 // Nothing to do.
4983 Parser.Lex(); // Eat the '+'.
4984 }
4985
4986 E = Parser.getTok().getLoc();
4987 int OffsetRegNum = tryParseRegister();
4988 if (OffsetRegNum == -1)
4989 return Error(E, "register expected");
4990
4991 // If there's a shift operator, handle it.
4992 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4993 unsigned ShiftImm = 0;
4994 if (Parser.getTok().is(AsmToken::Comma)) {
4995 Parser.Lex(); // Eat the ','.
4996 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4997 return true;
4998 }
4999
5000 // Now we should have the closing ']'
5001 if (Parser.getTok().isNot(AsmToken::RBrac))
5002 return Error(Parser.getTok().getLoc(), "']' expected");
5003 E = Parser.getTok().getEndLoc();
5004 Parser.Lex(); // Eat right bracket token.
5005
5006 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5007 ShiftType, ShiftImm, 0, isNegative,
5008 S, E));
5009
5010 // If there's a pre-indexing writeback marker, '!', just add it as a token
5011 // operand.
5012 if (Parser.getTok().is(AsmToken::Exclaim)) {
5013 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5014 Parser.Lex(); // Eat the '!'.
5015 }
5016
5017 return false;
5018 }
5019
5020 /// parseMemRegOffsetShift - one of these two:
5021 /// ( lsl | lsr | asr | ror ) , # shift_amount
5022 /// rrx
5023 /// return true if it parses a shift otherwise it returns false.
parseMemRegOffsetShift(ARM_AM::ShiftOpc & St,unsigned & Amount)5024 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5025 unsigned &Amount) {
5026 MCAsmParser &Parser = getParser();
5027 SMLoc Loc = Parser.getTok().getLoc();
5028 const AsmToken &Tok = Parser.getTok();
5029 if (Tok.isNot(AsmToken::Identifier))
5030 return true;
5031 StringRef ShiftName = Tok.getString();
5032 if (ShiftName == "lsl" || ShiftName == "LSL" ||
5033 ShiftName == "asl" || ShiftName == "ASL")
5034 St = ARM_AM::lsl;
5035 else if (ShiftName == "lsr" || ShiftName == "LSR")
5036 St = ARM_AM::lsr;
5037 else if (ShiftName == "asr" || ShiftName == "ASR")
5038 St = ARM_AM::asr;
5039 else if (ShiftName == "ror" || ShiftName == "ROR")
5040 St = ARM_AM::ror;
5041 else if (ShiftName == "rrx" || ShiftName == "RRX")
5042 St = ARM_AM::rrx;
5043 else
5044 return Error(Loc, "illegal shift operator");
5045 Parser.Lex(); // Eat shift type token.
5046
5047 // rrx stands alone.
5048 Amount = 0;
5049 if (St != ARM_AM::rrx) {
5050 Loc = Parser.getTok().getLoc();
5051 // A '#' and a shift amount.
5052 const AsmToken &HashTok = Parser.getTok();
5053 if (HashTok.isNot(AsmToken::Hash) &&
5054 HashTok.isNot(AsmToken::Dollar))
5055 return Error(HashTok.getLoc(), "'#' expected");
5056 Parser.Lex(); // Eat hash token.
5057
5058 const MCExpr *Expr;
5059 if (getParser().parseExpression(Expr))
5060 return true;
5061 // Range check the immediate.
5062 // lsl, ror: 0 <= imm <= 31
5063 // lsr, asr: 0 <= imm <= 32
5064 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5065 if (!CE)
5066 return Error(Loc, "shift amount must be an immediate");
5067 int64_t Imm = CE->getValue();
5068 if (Imm < 0 ||
5069 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5070 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5071 return Error(Loc, "immediate shift value out of range");
5072 // If <ShiftTy> #0, turn it into a no_shift.
5073 if (Imm == 0)
5074 St = ARM_AM::lsl;
5075 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5076 if (Imm == 32)
5077 Imm = 0;
5078 Amount = Imm;
5079 }
5080
5081 return false;
5082 }
5083
5084 /// parseFPImm - A floating point immediate expression operand.
5085 ARMAsmParser::OperandMatchResultTy
parseFPImm(OperandVector & Operands)5086 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5087 MCAsmParser &Parser = getParser();
5088 // Anything that can accept a floating point constant as an operand
5089 // needs to go through here, as the regular parseExpression is
5090 // integer only.
5091 //
5092 // This routine still creates a generic Immediate operand, containing
5093 // a bitcast of the 64-bit floating point value. The various operands
5094 // that accept floats can check whether the value is valid for them
5095 // via the standard is*() predicates.
5096
5097 SMLoc S = Parser.getTok().getLoc();
5098
5099 if (Parser.getTok().isNot(AsmToken::Hash) &&
5100 Parser.getTok().isNot(AsmToken::Dollar))
5101 return MatchOperand_NoMatch;
5102
5103 // Disambiguate the VMOV forms that can accept an FP immediate.
5104 // vmov.f32 <sreg>, #imm
5105 // vmov.f64 <dreg>, #imm
5106 // vmov.f32 <dreg>, #imm @ vector f32x2
5107 // vmov.f32 <qreg>, #imm @ vector f32x4
5108 //
5109 // There are also the NEON VMOV instructions which expect an
5110 // integer constant. Make sure we don't try to parse an FPImm
5111 // for these:
5112 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5113 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5114 bool isVmovf = TyOp.isToken() &&
5115 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5116 TyOp.getToken() == ".f16");
5117 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5118 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5119 Mnemonic.getToken() == "fconsts");
5120 if (!(isVmovf || isFconst))
5121 return MatchOperand_NoMatch;
5122
5123 Parser.Lex(); // Eat '#' or '$'.
5124
5125 // Handle negation, as that still comes through as a separate token.
5126 bool isNegative = false;
5127 if (Parser.getTok().is(AsmToken::Minus)) {
5128 isNegative = true;
5129 Parser.Lex();
5130 }
5131 const AsmToken &Tok = Parser.getTok();
5132 SMLoc Loc = Tok.getLoc();
5133 if (Tok.is(AsmToken::Real) && isVmovf) {
5134 APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
5135 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5136 // If we had a '-' in front, toggle the sign bit.
5137 IntVal ^= (uint64_t)isNegative << 31;
5138 Parser.Lex(); // Eat the token.
5139 Operands.push_back(ARMOperand::CreateImm(
5140 MCConstantExpr::create(IntVal, getContext()),
5141 S, Parser.getTok().getLoc()));
5142 return MatchOperand_Success;
5143 }
5144 // Also handle plain integers. Instructions which allow floating point
5145 // immediates also allow a raw encoded 8-bit value.
5146 if (Tok.is(AsmToken::Integer) && isFconst) {
5147 int64_t Val = Tok.getIntVal();
5148 Parser.Lex(); // Eat the token.
5149 if (Val > 255 || Val < 0) {
5150 Error(Loc, "encoded floating point value out of range");
5151 return MatchOperand_ParseFail;
5152 }
5153 float RealVal = ARM_AM::getFPImmFloat(Val);
5154 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5155
5156 Operands.push_back(ARMOperand::CreateImm(
5157 MCConstantExpr::create(Val, getContext()), S,
5158 Parser.getTok().getLoc()));
5159 return MatchOperand_Success;
5160 }
5161
5162 Error(Loc, "invalid floating point immediate");
5163 return MatchOperand_ParseFail;
5164 }
5165
5166 /// Parse a arm instruction operand. For now this parses the operand regardless
5167 /// of the mnemonic.
parseOperand(OperandVector & Operands,StringRef Mnemonic)5168 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5169 MCAsmParser &Parser = getParser();
5170 SMLoc S, E;
5171
5172 // Check if the current operand has a custom associated parser, if so, try to
5173 // custom parse the operand, or fallback to the general approach.
5174 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5175 if (ResTy == MatchOperand_Success)
5176 return false;
5177 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5178 // there was a match, but an error occurred, in which case, just return that
5179 // the operand parsing failed.
5180 if (ResTy == MatchOperand_ParseFail)
5181 return true;
5182
5183 switch (getLexer().getKind()) {
5184 default:
5185 Error(Parser.getTok().getLoc(), "unexpected token in operand");
5186 return true;
5187 case AsmToken::Identifier: {
5188 // If we've seen a branch mnemonic, the next operand must be a label. This
5189 // is true even if the label is a register name. So "br r1" means branch to
5190 // label "r1".
5191 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5192 if (!ExpectLabel) {
5193 if (!tryParseRegisterWithWriteBack(Operands))
5194 return false;
5195 int Res = tryParseShiftRegister(Operands);
5196 if (Res == 0) // success
5197 return false;
5198 else if (Res == -1) // irrecoverable error
5199 return true;
5200 // If this is VMRS, check for the apsr_nzcv operand.
5201 if (Mnemonic == "vmrs" &&
5202 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5203 S = Parser.getTok().getLoc();
5204 Parser.Lex();
5205 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5206 return false;
5207 }
5208 }
5209
5210 // Fall though for the Identifier case that is not a register or a
5211 // special name.
5212 }
5213 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
5214 case AsmToken::Integer: // things like 1f and 2b as a branch targets
5215 case AsmToken::String: // quoted label names.
5216 case AsmToken::Dot: { // . as a branch target
5217 // This was not a register so parse other operands that start with an
5218 // identifier (like labels) as expressions and create them as immediates.
5219 const MCExpr *IdVal;
5220 S = Parser.getTok().getLoc();
5221 if (getParser().parseExpression(IdVal))
5222 return true;
5223 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5224 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5225 return false;
5226 }
5227 case AsmToken::LBrac:
5228 return parseMemory(Operands);
5229 case AsmToken::LCurly:
5230 return parseRegisterList(Operands);
5231 case AsmToken::Dollar:
5232 case AsmToken::Hash: {
5233 // #42 -> immediate.
5234 S = Parser.getTok().getLoc();
5235 Parser.Lex();
5236
5237 if (Parser.getTok().isNot(AsmToken::Colon)) {
5238 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5239 const MCExpr *ImmVal;
5240 if (getParser().parseExpression(ImmVal))
5241 return true;
5242 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5243 if (CE) {
5244 int32_t Val = CE->getValue();
5245 if (isNegative && Val == 0)
5246 ImmVal = MCConstantExpr::create(INT32_MIN, getContext());
5247 }
5248 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5249 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5250
5251 // There can be a trailing '!' on operands that we want as a separate
5252 // '!' Token operand. Handle that here. For example, the compatibility
5253 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5254 if (Parser.getTok().is(AsmToken::Exclaim)) {
5255 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5256 Parser.getTok().getLoc()));
5257 Parser.Lex(); // Eat exclaim token
5258 }
5259 return false;
5260 }
5261 // w/ a ':' after the '#', it's just like a plain ':'.
5262 // FALLTHROUGH
5263 }
5264 case AsmToken::Colon: {
5265 S = Parser.getTok().getLoc();
5266 // ":lower16:" and ":upper16:" expression prefixes
5267 // FIXME: Check it's an expression prefix,
5268 // e.g. (FOO - :lower16:BAR) isn't legal.
5269 ARMMCExpr::VariantKind RefKind;
5270 if (parsePrefix(RefKind))
5271 return true;
5272
5273 const MCExpr *SubExprVal;
5274 if (getParser().parseExpression(SubExprVal))
5275 return true;
5276
5277 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5278 getContext());
5279 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5280 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5281 return false;
5282 }
5283 case AsmToken::Equal: {
5284 S = Parser.getTok().getLoc();
5285 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5286 return Error(S, "unexpected token in operand");
5287 Parser.Lex(); // Eat '='
5288 const MCExpr *SubExprVal;
5289 if (getParser().parseExpression(SubExprVal))
5290 return true;
5291 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5292 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5293 return false;
5294 }
5295 }
5296 }
5297
5298 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5299 // :lower16: and :upper16:.
parsePrefix(ARMMCExpr::VariantKind & RefKind)5300 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5301 MCAsmParser &Parser = getParser();
5302 RefKind = ARMMCExpr::VK_ARM_None;
5303
5304 // consume an optional '#' (GNU compatibility)
5305 if (getLexer().is(AsmToken::Hash))
5306 Parser.Lex();
5307
5308 // :lower16: and :upper16: modifiers
5309 assert(getLexer().is(AsmToken::Colon) && "expected a :");
5310 Parser.Lex(); // Eat ':'
5311
5312 if (getLexer().isNot(AsmToken::Identifier)) {
5313 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5314 return true;
5315 }
5316
5317 enum {
5318 COFF = (1 << MCObjectFileInfo::IsCOFF),
5319 ELF = (1 << MCObjectFileInfo::IsELF),
5320 MACHO = (1 << MCObjectFileInfo::IsMachO)
5321 };
5322 static const struct PrefixEntry {
5323 const char *Spelling;
5324 ARMMCExpr::VariantKind VariantKind;
5325 uint8_t SupportedFormats;
5326 } PrefixEntries[] = {
5327 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5328 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5329 };
5330
5331 StringRef IDVal = Parser.getTok().getIdentifier();
5332
5333 const auto &Prefix =
5334 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5335 [&IDVal](const PrefixEntry &PE) {
5336 return PE.Spelling == IDVal;
5337 });
5338 if (Prefix == std::end(PrefixEntries)) {
5339 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5340 return true;
5341 }
5342
5343 uint8_t CurrentFormat;
5344 switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5345 case MCObjectFileInfo::IsMachO:
5346 CurrentFormat = MACHO;
5347 break;
5348 case MCObjectFileInfo::IsELF:
5349 CurrentFormat = ELF;
5350 break;
5351 case MCObjectFileInfo::IsCOFF:
5352 CurrentFormat = COFF;
5353 break;
5354 }
5355
5356 if (~Prefix->SupportedFormats & CurrentFormat) {
5357 Error(Parser.getTok().getLoc(),
5358 "cannot represent relocation in the current file format");
5359 return true;
5360 }
5361
5362 RefKind = Prefix->VariantKind;
5363 Parser.Lex();
5364
5365 if (getLexer().isNot(AsmToken::Colon)) {
5366 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5367 return true;
5368 }
5369 Parser.Lex(); // Eat the last ':'
5370
5371 return false;
5372 }
5373
5374 /// \brief Given a mnemonic, split out possible predication code and carry
5375 /// setting letters to form a canonical mnemonic and flags.
5376 //
5377 // FIXME: Would be nice to autogen this.
5378 // FIXME: This is a bit of a maze of special cases.
splitMnemonic(StringRef Mnemonic,unsigned & PredicationCode,bool & CarrySetting,unsigned & ProcessorIMod,StringRef & ITMask)5379 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5380 unsigned &PredicationCode,
5381 bool &CarrySetting,
5382 unsigned &ProcessorIMod,
5383 StringRef &ITMask) {
5384 PredicationCode = ARMCC::AL;
5385 CarrySetting = false;
5386 ProcessorIMod = 0;
5387
5388 // Ignore some mnemonics we know aren't predicated forms.
5389 //
5390 // FIXME: Would be nice to autogen this.
5391 if ((Mnemonic == "movs" && isThumb()) ||
5392 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
5393 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
5394 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
5395 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
5396 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
5397 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
5398 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
5399 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5400 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5401 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
5402 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5403 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5404 Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5405 Mnemonic == "bxns" || Mnemonic == "blxns")
5406 return Mnemonic;
5407
5408 // First, split out any predication code. Ignore mnemonics we know aren't
5409 // predicated but do have a carry-set and so weren't caught above.
5410 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5411 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5412 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5413 Mnemonic != "sbcs" && Mnemonic != "rscs") {
5414 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
5415 .Case("eq", ARMCC::EQ)
5416 .Case("ne", ARMCC::NE)
5417 .Case("hs", ARMCC::HS)
5418 .Case("cs", ARMCC::HS)
5419 .Case("lo", ARMCC::LO)
5420 .Case("cc", ARMCC::LO)
5421 .Case("mi", ARMCC::MI)
5422 .Case("pl", ARMCC::PL)
5423 .Case("vs", ARMCC::VS)
5424 .Case("vc", ARMCC::VC)
5425 .Case("hi", ARMCC::HI)
5426 .Case("ls", ARMCC::LS)
5427 .Case("ge", ARMCC::GE)
5428 .Case("lt", ARMCC::LT)
5429 .Case("gt", ARMCC::GT)
5430 .Case("le", ARMCC::LE)
5431 .Case("al", ARMCC::AL)
5432 .Default(~0U);
5433 if (CC != ~0U) {
5434 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5435 PredicationCode = CC;
5436 }
5437 }
5438
5439 // Next, determine if we have a carry setting bit. We explicitly ignore all
5440 // the instructions we know end in 's'.
5441 if (Mnemonic.endswith("s") &&
5442 !(Mnemonic == "cps" || Mnemonic == "mls" ||
5443 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5444 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5445 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5446 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5447 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5448 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5449 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5450 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5451 Mnemonic == "bxns" || Mnemonic == "blxns" ||
5452 (Mnemonic == "movs" && isThumb()))) {
5453 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5454 CarrySetting = true;
5455 }
5456
5457 // The "cps" instruction can have a interrupt mode operand which is glued into
5458 // the mnemonic. Check if this is the case, split it and parse the imod op
5459 if (Mnemonic.startswith("cps")) {
5460 // Split out any imod code.
5461 unsigned IMod =
5462 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5463 .Case("ie", ARM_PROC::IE)
5464 .Case("id", ARM_PROC::ID)
5465 .Default(~0U);
5466 if (IMod != ~0U) {
5467 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5468 ProcessorIMod = IMod;
5469 }
5470 }
5471
5472 // The "it" instruction has the condition mask on the end of the mnemonic.
5473 if (Mnemonic.startswith("it")) {
5474 ITMask = Mnemonic.slice(2, Mnemonic.size());
5475 Mnemonic = Mnemonic.slice(0, 2);
5476 }
5477
5478 return Mnemonic;
5479 }
5480
5481 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5482 /// inclusion of carry set or predication code operands.
5483 //
5484 // FIXME: It would be nice to autogen this.
getMnemonicAcceptInfo(StringRef Mnemonic,StringRef FullInst,bool & CanAcceptCarrySet,bool & CanAcceptPredicationCode)5485 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5486 bool &CanAcceptCarrySet,
5487 bool &CanAcceptPredicationCode) {
5488 CanAcceptCarrySet =
5489 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5490 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5491 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5492 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5493 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5494 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5495 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5496 (!isThumb() &&
5497 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5498 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5499
5500 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5501 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5502 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5503 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5504 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5505 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5506 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5507 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5508 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5509 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5510 (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5511 Mnemonic == "vmovx" || Mnemonic == "vins") {
5512 // These mnemonics are never predicable
5513 CanAcceptPredicationCode = false;
5514 } else if (!isThumb()) {
5515 // Some instructions are only predicable in Thumb mode
5516 CanAcceptPredicationCode =
5517 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5518 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5519 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5520 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5521 Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" &&
5522 Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") &&
5523 !Mnemonic.startswith("srs");
5524 } else if (isThumbOne()) {
5525 if (hasV6MOps())
5526 CanAcceptPredicationCode = Mnemonic != "movs";
5527 else
5528 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5529 } else
5530 CanAcceptPredicationCode = true;
5531 }
5532
5533 // \brief Some Thumb instructions have two operand forms that are not
5534 // available as three operand, convert to two operand form if possible.
5535 //
5536 // FIXME: We would really like to be able to tablegen'erate this.
tryConvertingToTwoOperandForm(StringRef Mnemonic,bool CarrySetting,OperandVector & Operands)5537 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5538 bool CarrySetting,
5539 OperandVector &Operands) {
5540 if (Operands.size() != 6)
5541 return;
5542
5543 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5544 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5545 if (!Op3.isReg() || !Op4.isReg())
5546 return;
5547
5548 auto Op3Reg = Op3.getReg();
5549 auto Op4Reg = Op4.getReg();
5550
5551 // For most Thumb2 cases we just generate the 3 operand form and reduce
5552 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5553 // won't accept SP or PC so we do the transformation here taking care
5554 // with immediate range in the 'add sp, sp #imm' case.
5555 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5556 if (isThumbTwo()) {
5557 if (Mnemonic != "add")
5558 return;
5559 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5560 (Op5.isReg() && Op5.getReg() == ARM::PC);
5561 if (!TryTransform) {
5562 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5563 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5564 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5565 Op5.isImm() && !Op5.isImm0_508s4());
5566 }
5567 if (!TryTransform)
5568 return;
5569 } else if (!isThumbOne())
5570 return;
5571
5572 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5573 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5574 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5575 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5576 return;
5577
5578 // If first 2 operands of a 3 operand instruction are the same
5579 // then transform to 2 operand version of the same instruction
5580 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5581 bool Transform = Op3Reg == Op4Reg;
5582
5583 // For communtative operations, we might be able to transform if we swap
5584 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
5585 // as tADDrsp.
5586 const ARMOperand *LastOp = &Op5;
5587 bool Swap = false;
5588 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5589 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5590 Mnemonic == "and" || Mnemonic == "eor" ||
5591 Mnemonic == "adc" || Mnemonic == "orr")) {
5592 Swap = true;
5593 LastOp = &Op4;
5594 Transform = true;
5595 }
5596
5597 // If both registers are the same then remove one of them from
5598 // the operand list, with certain exceptions.
5599 if (Transform) {
5600 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5601 // 2 operand forms don't exist.
5602 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5603 LastOp->isReg())
5604 Transform = false;
5605
5606 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5607 // 3-bits because the ARMARM says not to.
5608 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5609 Transform = false;
5610 }
5611
5612 if (Transform) {
5613 if (Swap)
5614 std::swap(Op4, Op5);
5615 Operands.erase(Operands.begin() + 3);
5616 }
5617 }
5618
shouldOmitCCOutOperand(StringRef Mnemonic,OperandVector & Operands)5619 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5620 OperandVector &Operands) {
5621 // FIXME: This is all horribly hacky. We really need a better way to deal
5622 // with optional operands like this in the matcher table.
5623
5624 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5625 // another does not. Specifically, the MOVW instruction does not. So we
5626 // special case it here and remove the defaulted (non-setting) cc_out
5627 // operand if that's the instruction we're trying to match.
5628 //
5629 // We do this as post-processing of the explicit operands rather than just
5630 // conditionally adding the cc_out in the first place because we need
5631 // to check the type of the parsed immediate operand.
5632 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5633 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5634 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5635 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5636 return true;
5637
5638 // Register-register 'add' for thumb does not have a cc_out operand
5639 // when there are only two register operands.
5640 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5641 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5642 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5643 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5644 return true;
5645 // Register-register 'add' for thumb does not have a cc_out operand
5646 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5647 // have to check the immediate range here since Thumb2 has a variant
5648 // that can handle a different range and has a cc_out operand.
5649 if (((isThumb() && Mnemonic == "add") ||
5650 (isThumbTwo() && Mnemonic == "sub")) &&
5651 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5652 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5653 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5654 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5655 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5656 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5657 return true;
5658 // For Thumb2, add/sub immediate does not have a cc_out operand for the
5659 // imm0_4095 variant. That's the least-preferred variant when
5660 // selecting via the generic "add" mnemonic, so to know that we
5661 // should remove the cc_out operand, we have to explicitly check that
5662 // it's not one of the other variants. Ugh.
5663 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5664 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5665 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5666 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5667 // Nest conditions rather than one big 'if' statement for readability.
5668 //
5669 // If both registers are low, we're in an IT block, and the immediate is
5670 // in range, we should use encoding T1 instead, which has a cc_out.
5671 if (inITBlock() &&
5672 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5673 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5674 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5675 return false;
5676 // Check against T3. If the second register is the PC, this is an
5677 // alternate form of ADR, which uses encoding T4, so check for that too.
5678 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5679 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5680 return false;
5681
5682 // Otherwise, we use encoding T4, which does not have a cc_out
5683 // operand.
5684 return true;
5685 }
5686
5687 // The thumb2 multiply instruction doesn't have a CCOut register, so
5688 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5689 // use the 16-bit encoding or not.
5690 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5691 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5692 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5693 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5694 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5695 // If the registers aren't low regs, the destination reg isn't the
5696 // same as one of the source regs, or the cc_out operand is zero
5697 // outside of an IT block, we have to use the 32-bit encoding, so
5698 // remove the cc_out operand.
5699 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5700 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5701 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5702 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5703 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5704 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5705 static_cast<ARMOperand &>(*Operands[4]).getReg())))
5706 return true;
5707
5708 // Also check the 'mul' syntax variant that doesn't specify an explicit
5709 // destination register.
5710 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5711 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5712 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5713 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5714 // If the registers aren't low regs or the cc_out operand is zero
5715 // outside of an IT block, we have to use the 32-bit encoding, so
5716 // remove the cc_out operand.
5717 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5718 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5719 !inITBlock()))
5720 return true;
5721
5722
5723
5724 // Register-register 'add/sub' for thumb does not have a cc_out operand
5725 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5726 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5727 // right, this will result in better diagnostics (which operand is off)
5728 // anyway.
5729 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5730 (Operands.size() == 5 || Operands.size() == 6) &&
5731 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5732 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5733 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5734 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5735 (Operands.size() == 6 &&
5736 static_cast<ARMOperand &>(*Operands[5]).isImm())))
5737 return true;
5738
5739 return false;
5740 }
5741
shouldOmitPredicateOperand(StringRef Mnemonic,OperandVector & Operands)5742 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5743 OperandVector &Operands) {
5744 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5745 unsigned RegIdx = 3;
5746 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5747 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5748 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5749 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5750 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5751 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5752 RegIdx = 4;
5753
5754 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5755 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5756 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5757 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5758 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5759 return true;
5760 }
5761 return false;
5762 }
5763
isDataTypeToken(StringRef Tok)5764 static bool isDataTypeToken(StringRef Tok) {
5765 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5766 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5767 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5768 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5769 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5770 Tok == ".f" || Tok == ".d";
5771 }
5772
5773 // FIXME: This bit should probably be handled via an explicit match class
5774 // in the .td files that matches the suffix instead of having it be
5775 // a literal string token the way it is now.
doesIgnoreDataTypeSuffix(StringRef Mnemonic,StringRef DT)5776 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5777 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5778 }
5779 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5780 unsigned VariantID);
5781
RequiresVFPRegListValidation(StringRef Inst,bool & AcceptSinglePrecisionOnly,bool & AcceptDoublePrecisionOnly)5782 static bool RequiresVFPRegListValidation(StringRef Inst,
5783 bool &AcceptSinglePrecisionOnly,
5784 bool &AcceptDoublePrecisionOnly) {
5785 if (Inst.size() < 7)
5786 return false;
5787
5788 if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5789 StringRef AddressingMode = Inst.substr(4, 2);
5790 if (AddressingMode == "ia" || AddressingMode == "db" ||
5791 AddressingMode == "ea" || AddressingMode == "fd") {
5792 AcceptSinglePrecisionOnly = Inst[6] == 's';
5793 AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5794 return true;
5795 }
5796 }
5797
5798 return false;
5799 }
5800
5801 /// Parse an arm instruction mnemonic followed by its operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)5802 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5803 SMLoc NameLoc, OperandVector &Operands) {
5804 MCAsmParser &Parser = getParser();
5805 // FIXME: Can this be done via tablegen in some fashion?
5806 bool RequireVFPRegisterListCheck;
5807 bool AcceptSinglePrecisionOnly;
5808 bool AcceptDoublePrecisionOnly;
5809 RequireVFPRegisterListCheck =
5810 RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5811 AcceptDoublePrecisionOnly);
5812
5813 // Apply mnemonic aliases before doing anything else, as the destination
5814 // mnemonic may include suffices and we want to handle them normally.
5815 // The generic tblgen'erated code does this later, at the start of
5816 // MatchInstructionImpl(), but that's too late for aliases that include
5817 // any sort of suffix.
5818 uint64_t AvailableFeatures = getAvailableFeatures();
5819 unsigned AssemblerDialect = getParser().getAssemblerDialect();
5820 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5821
5822 // First check for the ARM-specific .req directive.
5823 if (Parser.getTok().is(AsmToken::Identifier) &&
5824 Parser.getTok().getIdentifier() == ".req") {
5825 parseDirectiveReq(Name, NameLoc);
5826 // We always return 'error' for this, as we're done with this
5827 // statement and don't need to match the 'instruction."
5828 return true;
5829 }
5830
5831 // Create the leading tokens for the mnemonic, split by '.' characters.
5832 size_t Start = 0, Next = Name.find('.');
5833 StringRef Mnemonic = Name.slice(Start, Next);
5834
5835 // Split out the predication code and carry setting flag from the mnemonic.
5836 unsigned PredicationCode;
5837 unsigned ProcessorIMod;
5838 bool CarrySetting;
5839 StringRef ITMask;
5840 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5841 ProcessorIMod, ITMask);
5842
5843 // In Thumb1, only the branch (B) instruction can be predicated.
5844 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5845 Parser.eatToEndOfStatement();
5846 return Error(NameLoc, "conditional execution not supported in Thumb1");
5847 }
5848
5849 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5850
5851 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5852 // is the mask as it will be for the IT encoding if the conditional
5853 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5854 // where the conditional bit0 is zero, the instruction post-processing
5855 // will adjust the mask accordingly.
5856 if (Mnemonic == "it") {
5857 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5858 if (ITMask.size() > 3) {
5859 Parser.eatToEndOfStatement();
5860 return Error(Loc, "too many conditions on IT instruction");
5861 }
5862 unsigned Mask = 8;
5863 for (unsigned i = ITMask.size(); i != 0; --i) {
5864 char pos = ITMask[i - 1];
5865 if (pos != 't' && pos != 'e') {
5866 Parser.eatToEndOfStatement();
5867 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5868 }
5869 Mask >>= 1;
5870 if (ITMask[i - 1] == 't')
5871 Mask |= 8;
5872 }
5873 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5874 }
5875
5876 // FIXME: This is all a pretty gross hack. We should automatically handle
5877 // optional operands like this via tblgen.
5878
5879 // Next, add the CCOut and ConditionCode operands, if needed.
5880 //
5881 // For mnemonics which can ever incorporate a carry setting bit or predication
5882 // code, our matching model involves us always generating CCOut and
5883 // ConditionCode operands to match the mnemonic "as written" and then we let
5884 // the matcher deal with finding the right instruction or generating an
5885 // appropriate error.
5886 bool CanAcceptCarrySet, CanAcceptPredicationCode;
5887 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5888
5889 // If we had a carry-set on an instruction that can't do that, issue an
5890 // error.
5891 if (!CanAcceptCarrySet && CarrySetting) {
5892 Parser.eatToEndOfStatement();
5893 return Error(NameLoc, "instruction '" + Mnemonic +
5894 "' can not set flags, but 's' suffix specified");
5895 }
5896 // If we had a predication code on an instruction that can't do that, issue an
5897 // error.
5898 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5899 Parser.eatToEndOfStatement();
5900 return Error(NameLoc, "instruction '" + Mnemonic +
5901 "' is not predicable, but condition code specified");
5902 }
5903
5904 // Add the carry setting operand, if necessary.
5905 if (CanAcceptCarrySet) {
5906 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5907 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5908 Loc));
5909 }
5910
5911 // Add the predication code operand, if necessary.
5912 if (CanAcceptPredicationCode) {
5913 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5914 CarrySetting);
5915 Operands.push_back(ARMOperand::CreateCondCode(
5916 ARMCC::CondCodes(PredicationCode), Loc));
5917 }
5918
5919 // Add the processor imod operand, if necessary.
5920 if (ProcessorIMod) {
5921 Operands.push_back(ARMOperand::CreateImm(
5922 MCConstantExpr::create(ProcessorIMod, getContext()),
5923 NameLoc, NameLoc));
5924 } else if (Mnemonic == "cps" && isMClass()) {
5925 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
5926 }
5927
5928 // Add the remaining tokens in the mnemonic.
5929 while (Next != StringRef::npos) {
5930 Start = Next;
5931 Next = Name.find('.', Start + 1);
5932 StringRef ExtraToken = Name.slice(Start, Next);
5933
5934 // Some NEON instructions have an optional datatype suffix that is
5935 // completely ignored. Check for that.
5936 if (isDataTypeToken(ExtraToken) &&
5937 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5938 continue;
5939
5940 // For for ARM mode generate an error if the .n qualifier is used.
5941 if (ExtraToken == ".n" && !isThumb()) {
5942 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5943 Parser.eatToEndOfStatement();
5944 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5945 "arm mode");
5946 }
5947
5948 // The .n qualifier is always discarded as that is what the tables
5949 // and matcher expect. In ARM mode the .w qualifier has no effect,
5950 // so discard it to avoid errors that can be caused by the matcher.
5951 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5952 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5953 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5954 }
5955 }
5956
5957 // Read the remaining operands.
5958 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5959 // Read the first operand.
5960 if (parseOperand(Operands, Mnemonic)) {
5961 Parser.eatToEndOfStatement();
5962 return true;
5963 }
5964
5965 while (getLexer().is(AsmToken::Comma)) {
5966 Parser.Lex(); // Eat the comma.
5967
5968 // Parse and remember the operand.
5969 if (parseOperand(Operands, Mnemonic)) {
5970 Parser.eatToEndOfStatement();
5971 return true;
5972 }
5973 }
5974 }
5975
5976 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5977 SMLoc Loc = getLexer().getLoc();
5978 Parser.eatToEndOfStatement();
5979 return Error(Loc, "unexpected token in argument list");
5980 }
5981
5982 Parser.Lex(); // Consume the EndOfStatement
5983
5984 if (RequireVFPRegisterListCheck) {
5985 ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
5986 if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
5987 return Error(Op.getStartLoc(),
5988 "VFP/Neon single precision register expected");
5989 if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
5990 return Error(Op.getStartLoc(),
5991 "VFP/Neon double precision register expected");
5992 }
5993
5994 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
5995
5996 // Some instructions, mostly Thumb, have forms for the same mnemonic that
5997 // do and don't have a cc_out optional-def operand. With some spot-checks
5998 // of the operand list, we can figure out which variant we're trying to
5999 // parse and adjust accordingly before actually matching. We shouldn't ever
6000 // try to remove a cc_out operand that was explicitly set on the
6001 // mnemonic, of course (CarrySetting == true). Reason number #317 the
6002 // table driven matcher doesn't fit well with the ARM instruction set.
6003 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6004 Operands.erase(Operands.begin() + 1);
6005
6006 // Some instructions have the same mnemonic, but don't always
6007 // have a predicate. Distinguish them here and delete the
6008 // predicate if needed.
6009 if (shouldOmitPredicateOperand(Mnemonic, Operands))
6010 Operands.erase(Operands.begin() + 1);
6011
6012 // ARM mode 'blx' need special handling, as the register operand version
6013 // is predicable, but the label operand version is not. So, we can't rely
6014 // on the Mnemonic based checking to correctly figure out when to put
6015 // a k_CondCode operand in the list. If we're trying to match the label
6016 // version, remove the k_CondCode operand here.
6017 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6018 static_cast<ARMOperand &>(*Operands[2]).isImm())
6019 Operands.erase(Operands.begin() + 1);
6020
6021 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6022 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6023 // a single GPRPair reg operand is used in the .td file to replace the two
6024 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6025 // expressed as a GPRPair, so we have to manually merge them.
6026 // FIXME: We would really like to be able to tablegen'erate this.
6027 if (!isThumb() && Operands.size() > 4 &&
6028 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6029 Mnemonic == "stlexd")) {
6030 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6031 unsigned Idx = isLoad ? 2 : 3;
6032 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6033 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6034
6035 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6036 // Adjust only if Op1 and Op2 are GPRs.
6037 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6038 MRC.contains(Op2.getReg())) {
6039 unsigned Reg1 = Op1.getReg();
6040 unsigned Reg2 = Op2.getReg();
6041 unsigned Rt = MRI->getEncodingValue(Reg1);
6042 unsigned Rt2 = MRI->getEncodingValue(Reg2);
6043
6044 // Rt2 must be Rt + 1 and Rt must be even.
6045 if (Rt + 1 != Rt2 || (Rt & 1)) {
6046 Error(Op2.getStartLoc(), isLoad
6047 ? "destination operands must be sequential"
6048 : "source operands must be sequential");
6049 return true;
6050 }
6051 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6052 &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6053 Operands[Idx] =
6054 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6055 Operands.erase(Operands.begin() + Idx + 1);
6056 }
6057 }
6058
6059 // GNU Assembler extension (compatibility)
6060 if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
6061 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6062 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6063 if (Op3.isMem()) {
6064 assert(Op2.isReg() && "expected register argument");
6065
6066 unsigned SuperReg = MRI->getMatchingSuperReg(
6067 Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
6068
6069 assert(SuperReg && "expected register pair");
6070
6071 unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
6072
6073 Operands.insert(
6074 Operands.begin() + 3,
6075 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6076 }
6077 }
6078
6079 // FIXME: As said above, this is all a pretty gross hack. This instruction
6080 // does not fit with other "subs" and tblgen.
6081 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6082 // so the Mnemonic is the original name "subs" and delete the predicate
6083 // operand so it will match the table entry.
6084 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6085 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6086 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6087 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6088 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6089 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6090 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6091 Operands.erase(Operands.begin() + 1);
6092 }
6093 return false;
6094 }
6095
6096 // Validate context-sensitive operand constraints.
6097
6098 // return 'true' if register list contains non-low GPR registers,
6099 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6100 // 'containsReg' to true.
checkLowRegisterList(const MCInst & Inst,unsigned OpNo,unsigned Reg,unsigned HiReg,bool & containsReg)6101 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6102 unsigned Reg, unsigned HiReg,
6103 bool &containsReg) {
6104 containsReg = false;
6105 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6106 unsigned OpReg = Inst.getOperand(i).getReg();
6107 if (OpReg == Reg)
6108 containsReg = true;
6109 // Anything other than a low register isn't legal here.
6110 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6111 return true;
6112 }
6113 return false;
6114 }
6115
6116 // Check if the specified regisgter is in the register list of the inst,
6117 // starting at the indicated operand number.
listContainsReg(const MCInst & Inst,unsigned OpNo,unsigned Reg)6118 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6119 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6120 unsigned OpReg = Inst.getOperand(i).getReg();
6121 if (OpReg == Reg)
6122 return true;
6123 }
6124 return false;
6125 }
6126
6127 // Return true if instruction has the interesting property of being
6128 // allowed in IT blocks, but not being predicable.
instIsBreakpoint(const MCInst & Inst)6129 static bool instIsBreakpoint(const MCInst &Inst) {
6130 return Inst.getOpcode() == ARM::tBKPT ||
6131 Inst.getOpcode() == ARM::BKPT ||
6132 Inst.getOpcode() == ARM::tHLT ||
6133 Inst.getOpcode() == ARM::HLT;
6134
6135 }
6136
validatetLDMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo,bool IsARPop)6137 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6138 const OperandVector &Operands,
6139 unsigned ListNo, bool IsARPop) {
6140 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6141 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6142
6143 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6144 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6145 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6146
6147 if (!IsARPop && ListContainsSP)
6148 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6149 "SP may not be in the register list");
6150 else if (ListContainsPC && ListContainsLR)
6151 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6152 "PC and LR may not be in the register list simultaneously");
6153 else if (inITBlock() && !lastInITBlock() && ListContainsPC)
6154 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6155 "instruction must be outside of IT block or the last "
6156 "instruction in an IT block");
6157 return false;
6158 }
6159
validatetSTMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo)6160 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6161 const OperandVector &Operands,
6162 unsigned ListNo) {
6163 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6164 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6165
6166 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6167 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6168
6169 if (ListContainsSP && ListContainsPC)
6170 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6171 "SP and PC may not be in the register list");
6172 else if (ListContainsSP)
6173 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6174 "SP may not be in the register list");
6175 else if (ListContainsPC)
6176 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6177 "PC may not be in the register list");
6178 return false;
6179 }
6180
6181 // FIXME: We would really like to be able to tablegen'erate this.
validateInstruction(MCInst & Inst,const OperandVector & Operands)6182 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6183 const OperandVector &Operands) {
6184 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6185 SMLoc Loc = Operands[0]->getStartLoc();
6186
6187 // Check the IT block state first.
6188 // NOTE: BKPT and HLT instructions have the interesting property of being
6189 // allowed in IT blocks, but not being predicable. They just always execute.
6190 if (inITBlock() && !instIsBreakpoint(Inst)) {
6191 unsigned Bit = 1;
6192 if (ITState.FirstCond)
6193 ITState.FirstCond = false;
6194 else
6195 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
6196 // The instruction must be predicable.
6197 if (!MCID.isPredicable())
6198 return Error(Loc, "instructions in IT block must be predicable");
6199 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6200 unsigned ITCond = Bit ? ITState.Cond :
6201 ARMCC::getOppositeCondition(ITState.Cond);
6202 if (Cond != ITCond) {
6203 // Find the condition code Operand to get its SMLoc information.
6204 SMLoc CondLoc;
6205 for (unsigned I = 1; I < Operands.size(); ++I)
6206 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6207 CondLoc = Operands[I]->getStartLoc();
6208 return Error(CondLoc, "incorrect condition in IT block; got '" +
6209 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6210 "', but expected '" +
6211 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
6212 }
6213 // Check for non-'al' condition codes outside of the IT block.
6214 } else if (isThumbTwo() && MCID.isPredicable() &&
6215 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6216 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6217 Inst.getOpcode() != ARM::t2Bcc)
6218 return Error(Loc, "predicated instructions must be in IT block");
6219
6220 const unsigned Opcode = Inst.getOpcode();
6221 switch (Opcode) {
6222 case ARM::LDRD:
6223 case ARM::LDRD_PRE:
6224 case ARM::LDRD_POST: {
6225 const unsigned RtReg = Inst.getOperand(0).getReg();
6226
6227 // Rt can't be R14.
6228 if (RtReg == ARM::LR)
6229 return Error(Operands[3]->getStartLoc(),
6230 "Rt can't be R14");
6231
6232 const unsigned Rt = MRI->getEncodingValue(RtReg);
6233 // Rt must be even-numbered.
6234 if ((Rt & 1) == 1)
6235 return Error(Operands[3]->getStartLoc(),
6236 "Rt must be even-numbered");
6237
6238 // Rt2 must be Rt + 1.
6239 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6240 if (Rt2 != Rt + 1)
6241 return Error(Operands[3]->getStartLoc(),
6242 "destination operands must be sequential");
6243
6244 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6245 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6246 // For addressing modes with writeback, the base register needs to be
6247 // different from the destination registers.
6248 if (Rn == Rt || Rn == Rt2)
6249 return Error(Operands[3]->getStartLoc(),
6250 "base register needs to be different from destination "
6251 "registers");
6252 }
6253
6254 return false;
6255 }
6256 case ARM::t2LDRDi8:
6257 case ARM::t2LDRD_PRE:
6258 case ARM::t2LDRD_POST: {
6259 // Rt2 must be different from Rt.
6260 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6261 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6262 if (Rt2 == Rt)
6263 return Error(Operands[3]->getStartLoc(),
6264 "destination operands can't be identical");
6265 return false;
6266 }
6267 case ARM::t2BXJ: {
6268 const unsigned RmReg = Inst.getOperand(0).getReg();
6269 // Rm = SP is no longer unpredictable in v8-A
6270 if (RmReg == ARM::SP && !hasV8Ops())
6271 return Error(Operands[2]->getStartLoc(),
6272 "r13 (SP) is an unpredictable operand to BXJ");
6273 return false;
6274 }
6275 case ARM::STRD: {
6276 // Rt2 must be Rt + 1.
6277 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6278 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6279 if (Rt2 != Rt + 1)
6280 return Error(Operands[3]->getStartLoc(),
6281 "source operands must be sequential");
6282 return false;
6283 }
6284 case ARM::STRD_PRE:
6285 case ARM::STRD_POST: {
6286 // Rt2 must be Rt + 1.
6287 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6288 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6289 if (Rt2 != Rt + 1)
6290 return Error(Operands[3]->getStartLoc(),
6291 "source operands must be sequential");
6292 return false;
6293 }
6294 case ARM::STR_PRE_IMM:
6295 case ARM::STR_PRE_REG:
6296 case ARM::STR_POST_IMM:
6297 case ARM::STR_POST_REG:
6298 case ARM::STRH_PRE:
6299 case ARM::STRH_POST:
6300 case ARM::STRB_PRE_IMM:
6301 case ARM::STRB_PRE_REG:
6302 case ARM::STRB_POST_IMM:
6303 case ARM::STRB_POST_REG: {
6304 // Rt must be different from Rn.
6305 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6306 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6307
6308 if (Rt == Rn)
6309 return Error(Operands[3]->getStartLoc(),
6310 "source register and base register can't be identical");
6311 return false;
6312 }
6313 case ARM::LDR_PRE_IMM:
6314 case ARM::LDR_PRE_REG:
6315 case ARM::LDR_POST_IMM:
6316 case ARM::LDR_POST_REG:
6317 case ARM::LDRH_PRE:
6318 case ARM::LDRH_POST:
6319 case ARM::LDRSH_PRE:
6320 case ARM::LDRSH_POST:
6321 case ARM::LDRB_PRE_IMM:
6322 case ARM::LDRB_PRE_REG:
6323 case ARM::LDRB_POST_IMM:
6324 case ARM::LDRB_POST_REG:
6325 case ARM::LDRSB_PRE:
6326 case ARM::LDRSB_POST: {
6327 // Rt must be different from Rn.
6328 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6329 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6330
6331 if (Rt == Rn)
6332 return Error(Operands[3]->getStartLoc(),
6333 "destination register and base register can't be identical");
6334 return false;
6335 }
6336 case ARM::SBFX:
6337 case ARM::UBFX: {
6338 // Width must be in range [1, 32-lsb].
6339 unsigned LSB = Inst.getOperand(2).getImm();
6340 unsigned Widthm1 = Inst.getOperand(3).getImm();
6341 if (Widthm1 >= 32 - LSB)
6342 return Error(Operands[5]->getStartLoc(),
6343 "bitfield width must be in range [1,32-lsb]");
6344 return false;
6345 }
6346 // Notionally handles ARM::tLDMIA_UPD too.
6347 case ARM::tLDMIA: {
6348 // If we're parsing Thumb2, the .w variant is available and handles
6349 // most cases that are normally illegal for a Thumb1 LDM instruction.
6350 // We'll make the transformation in processInstruction() if necessary.
6351 //
6352 // Thumb LDM instructions are writeback iff the base register is not
6353 // in the register list.
6354 unsigned Rn = Inst.getOperand(0).getReg();
6355 bool HasWritebackToken =
6356 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6357 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6358 bool ListContainsBase;
6359 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6360 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6361 "registers must be in range r0-r7");
6362 // If we should have writeback, then there should be a '!' token.
6363 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6364 return Error(Operands[2]->getStartLoc(),
6365 "writeback operator '!' expected");
6366 // If we should not have writeback, there must not be a '!'. This is
6367 // true even for the 32-bit wide encodings.
6368 if (ListContainsBase && HasWritebackToken)
6369 return Error(Operands[3]->getStartLoc(),
6370 "writeback operator '!' not allowed when base register "
6371 "in register list");
6372
6373 if (validatetLDMRegList(Inst, Operands, 3))
6374 return true;
6375 break;
6376 }
6377 case ARM::LDMIA_UPD:
6378 case ARM::LDMDB_UPD:
6379 case ARM::LDMIB_UPD:
6380 case ARM::LDMDA_UPD:
6381 // ARM variants loading and updating the same register are only officially
6382 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6383 if (!hasV7Ops())
6384 break;
6385 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6386 return Error(Operands.back()->getStartLoc(),
6387 "writeback register not allowed in register list");
6388 break;
6389 case ARM::t2LDMIA:
6390 case ARM::t2LDMDB:
6391 if (validatetLDMRegList(Inst, Operands, 3))
6392 return true;
6393 break;
6394 case ARM::t2STMIA:
6395 case ARM::t2STMDB:
6396 if (validatetSTMRegList(Inst, Operands, 3))
6397 return true;
6398 break;
6399 case ARM::t2LDMIA_UPD:
6400 case ARM::t2LDMDB_UPD:
6401 case ARM::t2STMIA_UPD:
6402 case ARM::t2STMDB_UPD: {
6403 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6404 return Error(Operands.back()->getStartLoc(),
6405 "writeback register not allowed in register list");
6406
6407 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6408 if (validatetLDMRegList(Inst, Operands, 3))
6409 return true;
6410 } else {
6411 if (validatetSTMRegList(Inst, Operands, 3))
6412 return true;
6413 }
6414 break;
6415 }
6416 case ARM::sysLDMIA_UPD:
6417 case ARM::sysLDMDA_UPD:
6418 case ARM::sysLDMDB_UPD:
6419 case ARM::sysLDMIB_UPD:
6420 if (!listContainsReg(Inst, 3, ARM::PC))
6421 return Error(Operands[4]->getStartLoc(),
6422 "writeback register only allowed on system LDM "
6423 "if PC in register-list");
6424 break;
6425 case ARM::sysSTMIA_UPD:
6426 case ARM::sysSTMDA_UPD:
6427 case ARM::sysSTMDB_UPD:
6428 case ARM::sysSTMIB_UPD:
6429 return Error(Operands[2]->getStartLoc(),
6430 "system STM cannot have writeback register");
6431 case ARM::tMUL: {
6432 // The second source operand must be the same register as the destination
6433 // operand.
6434 //
6435 // In this case, we must directly check the parsed operands because the
6436 // cvtThumbMultiply() function is written in such a way that it guarantees
6437 // this first statement is always true for the new Inst. Essentially, the
6438 // destination is unconditionally copied into the second source operand
6439 // without checking to see if it matches what we actually parsed.
6440 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6441 ((ARMOperand &)*Operands[5]).getReg()) &&
6442 (((ARMOperand &)*Operands[3]).getReg() !=
6443 ((ARMOperand &)*Operands[4]).getReg())) {
6444 return Error(Operands[3]->getStartLoc(),
6445 "destination register must match source register");
6446 }
6447 break;
6448 }
6449 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6450 // so only issue a diagnostic for thumb1. The instructions will be
6451 // switched to the t2 encodings in processInstruction() if necessary.
6452 case ARM::tPOP: {
6453 bool ListContainsBase;
6454 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6455 !isThumbTwo())
6456 return Error(Operands[2]->getStartLoc(),
6457 "registers must be in range r0-r7 or pc");
6458 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6459 return true;
6460 break;
6461 }
6462 case ARM::tPUSH: {
6463 bool ListContainsBase;
6464 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6465 !isThumbTwo())
6466 return Error(Operands[2]->getStartLoc(),
6467 "registers must be in range r0-r7 or lr");
6468 if (validatetSTMRegList(Inst, Operands, 2))
6469 return true;
6470 break;
6471 }
6472 case ARM::tSTMIA_UPD: {
6473 bool ListContainsBase, InvalidLowList;
6474 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6475 0, ListContainsBase);
6476 if (InvalidLowList && !isThumbTwo())
6477 return Error(Operands[4]->getStartLoc(),
6478 "registers must be in range r0-r7");
6479
6480 // This would be converted to a 32-bit stm, but that's not valid if the
6481 // writeback register is in the list.
6482 if (InvalidLowList && ListContainsBase)
6483 return Error(Operands[4]->getStartLoc(),
6484 "writeback operator '!' not allowed when base register "
6485 "in register list");
6486
6487 if (validatetSTMRegList(Inst, Operands, 4))
6488 return true;
6489 break;
6490 }
6491 case ARM::tADDrSP: {
6492 // If the non-SP source operand and the destination operand are not the
6493 // same, we need thumb2 (for the wide encoding), or we have an error.
6494 if (!isThumbTwo() &&
6495 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6496 return Error(Operands[4]->getStartLoc(),
6497 "source register must be the same as destination");
6498 }
6499 break;
6500 }
6501 // Final range checking for Thumb unconditional branch instructions.
6502 case ARM::tB:
6503 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6504 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6505 break;
6506 case ARM::t2B: {
6507 int op = (Operands[2]->isImm()) ? 2 : 3;
6508 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6509 return Error(Operands[op]->getStartLoc(), "branch target out of range");
6510 break;
6511 }
6512 // Final range checking for Thumb conditional branch instructions.
6513 case ARM::tBcc:
6514 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6515 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6516 break;
6517 case ARM::t2Bcc: {
6518 int Op = (Operands[2]->isImm()) ? 2 : 3;
6519 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6520 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6521 break;
6522 }
6523 case ARM::MOVi16:
6524 case ARM::t2MOVi16:
6525 case ARM::t2MOVTi16:
6526 {
6527 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6528 // especially when we turn it into a movw and the expression <symbol> does
6529 // not have a :lower16: or :upper16 as part of the expression. We don't
6530 // want the behavior of silently truncating, which can be unexpected and
6531 // lead to bugs that are difficult to find since this is an easy mistake
6532 // to make.
6533 int i = (Operands[3]->isImm()) ? 3 : 4;
6534 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6535 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6536 if (CE) break;
6537 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6538 if (!E) break;
6539 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6540 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6541 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6542 return Error(
6543 Op.getStartLoc(),
6544 "immediate expression for mov requires :lower16: or :upper16");
6545 break;
6546 }
6547 case ARM::HINT:
6548 case ARM::t2HINT: {
6549 if (hasRAS()) {
6550 // ESB is not predicable (pred must be AL)
6551 unsigned Imm8 = Inst.getOperand(0).getImm();
6552 unsigned Pred = Inst.getOperand(1).getImm();
6553 if (Imm8 == 0x10 && Pred != ARMCC::AL)
6554 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6555 "predicable, but condition "
6556 "code specified");
6557 }
6558 // Without the RAS extension, this behaves as any other unallocated hint.
6559 break;
6560 }
6561 }
6562
6563 return false;
6564 }
6565
getRealVSTOpcode(unsigned Opc,unsigned & Spacing)6566 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6567 switch(Opc) {
6568 default: llvm_unreachable("unexpected opcode!");
6569 // VST1LN
6570 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6571 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6572 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6573 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6574 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6575 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6576 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
6577 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6578 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6579
6580 // VST2LN
6581 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6582 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6583 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6584 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6585 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6586
6587 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6588 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6589 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6590 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6591 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6592
6593 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
6594 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6595 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6596 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6597 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6598
6599 // VST3LN
6600 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6601 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6602 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6603 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6604 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6605 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6606 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6607 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6608 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6609 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6610 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
6611 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6612 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6613 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6614 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6615
6616 // VST3
6617 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6618 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6619 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6620 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6621 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6622 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6623 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6624 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6625 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6626 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6627 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6628 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6629 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
6630 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6631 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6632 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
6633 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6634 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6635
6636 // VST4LN
6637 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6638 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6639 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6640 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6641 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6642 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6643 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6644 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6645 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6646 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6647 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
6648 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6649 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6650 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6651 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6652
6653 // VST4
6654 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6655 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6656 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6657 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6658 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6659 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6660 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6661 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6662 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6663 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6664 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6665 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6666 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
6667 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6668 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6669 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
6670 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6671 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6672 }
6673 }
6674
getRealVLDOpcode(unsigned Opc,unsigned & Spacing)6675 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6676 switch(Opc) {
6677 default: llvm_unreachable("unexpected opcode!");
6678 // VLD1LN
6679 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6680 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6681 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6682 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6683 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6684 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6685 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
6686 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6687 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6688
6689 // VLD2LN
6690 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6691 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6692 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6693 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6694 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6695 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6696 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6697 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6698 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6699 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6700 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
6701 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6702 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6703 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6704 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6705
6706 // VLD3DUP
6707 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6708 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6709 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6710 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6711 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6712 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6713 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6714 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6715 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6716 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6717 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6718 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6719 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
6720 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6721 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6722 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6723 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6724 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6725
6726 // VLD3LN
6727 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6728 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6729 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6730 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6731 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6732 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6733 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6734 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6735 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6736 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6737 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
6738 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6739 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6740 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6741 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6742
6743 // VLD3
6744 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6745 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6746 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6747 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6748 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6749 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6750 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6751 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6752 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6753 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6754 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6755 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6756 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
6757 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6758 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6759 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
6760 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6761 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6762
6763 // VLD4LN
6764 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6765 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6766 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6767 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6768 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6769 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6770 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6771 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6772 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6773 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6774 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
6775 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6776 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6777 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6778 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6779
6780 // VLD4DUP
6781 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6782 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6783 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6784 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6785 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6786 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6787 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6788 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6789 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6790 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6791 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6792 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6793 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
6794 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6795 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6796 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6797 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6798 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6799
6800 // VLD4
6801 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6802 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6803 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6804 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6805 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6806 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6807 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6808 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6809 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6810 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6811 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6812 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6813 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
6814 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6815 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6816 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
6817 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6818 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6819 }
6820 }
6821
processInstruction(MCInst & Inst,const OperandVector & Operands,MCStreamer & Out)6822 bool ARMAsmParser::processInstruction(MCInst &Inst,
6823 const OperandVector &Operands,
6824 MCStreamer &Out) {
6825 switch (Inst.getOpcode()) {
6826 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6827 case ARM::LDRT_POST:
6828 case ARM::LDRBT_POST: {
6829 const unsigned Opcode =
6830 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6831 : ARM::LDRBT_POST_IMM;
6832 MCInst TmpInst;
6833 TmpInst.setOpcode(Opcode);
6834 TmpInst.addOperand(Inst.getOperand(0));
6835 TmpInst.addOperand(Inst.getOperand(1));
6836 TmpInst.addOperand(Inst.getOperand(1));
6837 TmpInst.addOperand(MCOperand::createReg(0));
6838 TmpInst.addOperand(MCOperand::createImm(0));
6839 TmpInst.addOperand(Inst.getOperand(2));
6840 TmpInst.addOperand(Inst.getOperand(3));
6841 Inst = TmpInst;
6842 return true;
6843 }
6844 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6845 case ARM::STRT_POST:
6846 case ARM::STRBT_POST: {
6847 const unsigned Opcode =
6848 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6849 : ARM::STRBT_POST_IMM;
6850 MCInst TmpInst;
6851 TmpInst.setOpcode(Opcode);
6852 TmpInst.addOperand(Inst.getOperand(1));
6853 TmpInst.addOperand(Inst.getOperand(0));
6854 TmpInst.addOperand(Inst.getOperand(1));
6855 TmpInst.addOperand(MCOperand::createReg(0));
6856 TmpInst.addOperand(MCOperand::createImm(0));
6857 TmpInst.addOperand(Inst.getOperand(2));
6858 TmpInst.addOperand(Inst.getOperand(3));
6859 Inst = TmpInst;
6860 return true;
6861 }
6862 // Alias for alternate form of 'ADR Rd, #imm' instruction.
6863 case ARM::ADDri: {
6864 if (Inst.getOperand(1).getReg() != ARM::PC ||
6865 Inst.getOperand(5).getReg() != 0 ||
6866 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6867 return false;
6868 MCInst TmpInst;
6869 TmpInst.setOpcode(ARM::ADR);
6870 TmpInst.addOperand(Inst.getOperand(0));
6871 if (Inst.getOperand(2).isImm()) {
6872 // Immediate (mod_imm) will be in its encoded form, we must unencode it
6873 // before passing it to the ADR instruction.
6874 unsigned Enc = Inst.getOperand(2).getImm();
6875 TmpInst.addOperand(MCOperand::createImm(
6876 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
6877 } else {
6878 // Turn PC-relative expression into absolute expression.
6879 // Reading PC provides the start of the current instruction + 8 and
6880 // the transform to adr is biased by that.
6881 MCSymbol *Dot = getContext().createTempSymbol();
6882 Out.EmitLabel(Dot);
6883 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6884 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
6885 MCSymbolRefExpr::VK_None,
6886 getContext());
6887 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
6888 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
6889 getContext());
6890 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
6891 getContext());
6892 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
6893 }
6894 TmpInst.addOperand(Inst.getOperand(3));
6895 TmpInst.addOperand(Inst.getOperand(4));
6896 Inst = TmpInst;
6897 return true;
6898 }
6899 // Aliases for alternate PC+imm syntax of LDR instructions.
6900 case ARM::t2LDRpcrel:
6901 // Select the narrow version if the immediate will fit.
6902 if (Inst.getOperand(1).getImm() > 0 &&
6903 Inst.getOperand(1).getImm() <= 0xff &&
6904 !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
6905 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
6906 Inst.setOpcode(ARM::tLDRpci);
6907 else
6908 Inst.setOpcode(ARM::t2LDRpci);
6909 return true;
6910 case ARM::t2LDRBpcrel:
6911 Inst.setOpcode(ARM::t2LDRBpci);
6912 return true;
6913 case ARM::t2LDRHpcrel:
6914 Inst.setOpcode(ARM::t2LDRHpci);
6915 return true;
6916 case ARM::t2LDRSBpcrel:
6917 Inst.setOpcode(ARM::t2LDRSBpci);
6918 return true;
6919 case ARM::t2LDRSHpcrel:
6920 Inst.setOpcode(ARM::t2LDRSHpci);
6921 return true;
6922 case ARM::LDRConstPool:
6923 case ARM::tLDRConstPool:
6924 case ARM::t2LDRConstPool: {
6925 // Pseudo instruction ldr rt, =immediate is converted to a
6926 // MOV rt, immediate if immediate is known and representable
6927 // otherwise we create a constant pool entry that we load from.
6928 MCInst TmpInst;
6929 if (Inst.getOpcode() == ARM::LDRConstPool)
6930 TmpInst.setOpcode(ARM::LDRi12);
6931 else if (Inst.getOpcode() == ARM::tLDRConstPool)
6932 TmpInst.setOpcode(ARM::tLDRpci);
6933 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
6934 TmpInst.setOpcode(ARM::t2LDRpci);
6935 const ARMOperand &PoolOperand =
6936 static_cast<ARMOperand &>(*Operands[3]);
6937 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
6938 // If SubExprVal is a constant we may be able to use a MOV
6939 if (isa<MCConstantExpr>(SubExprVal) &&
6940 Inst.getOperand(0).getReg() != ARM::PC &&
6941 Inst.getOperand(0).getReg() != ARM::SP) {
6942 int64_t Value =
6943 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
6944 bool UseMov = true;
6945 bool MovHasS = true;
6946 if (Inst.getOpcode() == ARM::LDRConstPool) {
6947 // ARM Constant
6948 if (ARM_AM::getSOImmVal(Value) != -1) {
6949 Value = ARM_AM::getSOImmVal(Value);
6950 TmpInst.setOpcode(ARM::MOVi);
6951 }
6952 else if (ARM_AM::getSOImmVal(~Value) != -1) {
6953 Value = ARM_AM::getSOImmVal(~Value);
6954 TmpInst.setOpcode(ARM::MVNi);
6955 }
6956 else if (hasV6T2Ops() &&
6957 Value >=0 && Value < 65536) {
6958 TmpInst.setOpcode(ARM::MOVi16);
6959 MovHasS = false;
6960 }
6961 else
6962 UseMov = false;
6963 }
6964 else {
6965 // Thumb/Thumb2 Constant
6966 if (hasThumb2() &&
6967 ARM_AM::getT2SOImmVal(Value) != -1)
6968 TmpInst.setOpcode(ARM::t2MOVi);
6969 else if (hasThumb2() &&
6970 ARM_AM::getT2SOImmVal(~Value) != -1) {
6971 TmpInst.setOpcode(ARM::t2MVNi);
6972 Value = ~Value;
6973 }
6974 else if (hasV8MBaseline() &&
6975 Value >=0 && Value < 65536) {
6976 TmpInst.setOpcode(ARM::t2MOVi16);
6977 MovHasS = false;
6978 }
6979 else
6980 UseMov = false;
6981 }
6982 if (UseMov) {
6983 TmpInst.addOperand(Inst.getOperand(0)); // Rt
6984 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
6985 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6986 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6987 if (MovHasS)
6988 TmpInst.addOperand(MCOperand::createReg(0)); // S
6989 Inst = TmpInst;
6990 return true;
6991 }
6992 }
6993 // No opportunity to use MOV/MVN create constant pool
6994 const MCExpr *CPLoc =
6995 getTargetStreamer().addConstantPoolEntry(SubExprVal,
6996 PoolOperand.getStartLoc());
6997 TmpInst.addOperand(Inst.getOperand(0)); // Rt
6998 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
6999 if (TmpInst.getOpcode() == ARM::LDRi12)
7000 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
7001 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7002 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7003 Inst = TmpInst;
7004 return true;
7005 }
7006 // Handle NEON VST complex aliases.
7007 case ARM::VST1LNdWB_register_Asm_8:
7008 case ARM::VST1LNdWB_register_Asm_16:
7009 case ARM::VST1LNdWB_register_Asm_32: {
7010 MCInst TmpInst;
7011 // Shuffle the operands around so the lane index operand is in the
7012 // right place.
7013 unsigned Spacing;
7014 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7015 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7016 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7017 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7018 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7019 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7020 TmpInst.addOperand(Inst.getOperand(1)); // lane
7021 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7022 TmpInst.addOperand(Inst.getOperand(6));
7023 Inst = TmpInst;
7024 return true;
7025 }
7026
7027 case ARM::VST2LNdWB_register_Asm_8:
7028 case ARM::VST2LNdWB_register_Asm_16:
7029 case ARM::VST2LNdWB_register_Asm_32:
7030 case ARM::VST2LNqWB_register_Asm_16:
7031 case ARM::VST2LNqWB_register_Asm_32: {
7032 MCInst TmpInst;
7033 // Shuffle the operands around so the lane index operand is in the
7034 // right place.
7035 unsigned Spacing;
7036 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7037 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7038 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7039 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7040 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7041 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7042 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7043 Spacing));
7044 TmpInst.addOperand(Inst.getOperand(1)); // lane
7045 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7046 TmpInst.addOperand(Inst.getOperand(6));
7047 Inst = TmpInst;
7048 return true;
7049 }
7050
7051 case ARM::VST3LNdWB_register_Asm_8:
7052 case ARM::VST3LNdWB_register_Asm_16:
7053 case ARM::VST3LNdWB_register_Asm_32:
7054 case ARM::VST3LNqWB_register_Asm_16:
7055 case ARM::VST3LNqWB_register_Asm_32: {
7056 MCInst TmpInst;
7057 // Shuffle the operands around so the lane index operand is in the
7058 // right place.
7059 unsigned Spacing;
7060 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7061 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7062 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7063 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7064 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7065 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7066 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7067 Spacing));
7068 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7069 Spacing * 2));
7070 TmpInst.addOperand(Inst.getOperand(1)); // lane
7071 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7072 TmpInst.addOperand(Inst.getOperand(6));
7073 Inst = TmpInst;
7074 return true;
7075 }
7076
7077 case ARM::VST4LNdWB_register_Asm_8:
7078 case ARM::VST4LNdWB_register_Asm_16:
7079 case ARM::VST4LNdWB_register_Asm_32:
7080 case ARM::VST4LNqWB_register_Asm_16:
7081 case ARM::VST4LNqWB_register_Asm_32: {
7082 MCInst TmpInst;
7083 // Shuffle the operands around so the lane index operand is in the
7084 // right place.
7085 unsigned Spacing;
7086 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7087 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7088 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7089 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7090 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7091 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7092 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7093 Spacing));
7094 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7095 Spacing * 2));
7096 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7097 Spacing * 3));
7098 TmpInst.addOperand(Inst.getOperand(1)); // lane
7099 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7100 TmpInst.addOperand(Inst.getOperand(6));
7101 Inst = TmpInst;
7102 return true;
7103 }
7104
7105 case ARM::VST1LNdWB_fixed_Asm_8:
7106 case ARM::VST1LNdWB_fixed_Asm_16:
7107 case ARM::VST1LNdWB_fixed_Asm_32: {
7108 MCInst TmpInst;
7109 // Shuffle the operands around so the lane index operand is in the
7110 // right place.
7111 unsigned Spacing;
7112 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7113 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7114 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7115 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7116 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7117 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7118 TmpInst.addOperand(Inst.getOperand(1)); // lane
7119 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7120 TmpInst.addOperand(Inst.getOperand(5));
7121 Inst = TmpInst;
7122 return true;
7123 }
7124
7125 case ARM::VST2LNdWB_fixed_Asm_8:
7126 case ARM::VST2LNdWB_fixed_Asm_16:
7127 case ARM::VST2LNdWB_fixed_Asm_32:
7128 case ARM::VST2LNqWB_fixed_Asm_16:
7129 case ARM::VST2LNqWB_fixed_Asm_32: {
7130 MCInst TmpInst;
7131 // Shuffle the operands around so the lane index operand is in the
7132 // right place.
7133 unsigned Spacing;
7134 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7135 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7136 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7137 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7138 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7139 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7140 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7141 Spacing));
7142 TmpInst.addOperand(Inst.getOperand(1)); // lane
7143 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7144 TmpInst.addOperand(Inst.getOperand(5));
7145 Inst = TmpInst;
7146 return true;
7147 }
7148
7149 case ARM::VST3LNdWB_fixed_Asm_8:
7150 case ARM::VST3LNdWB_fixed_Asm_16:
7151 case ARM::VST3LNdWB_fixed_Asm_32:
7152 case ARM::VST3LNqWB_fixed_Asm_16:
7153 case ARM::VST3LNqWB_fixed_Asm_32: {
7154 MCInst TmpInst;
7155 // Shuffle the operands around so the lane index operand is in the
7156 // right place.
7157 unsigned Spacing;
7158 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7159 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7160 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7161 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7162 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7163 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7164 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7165 Spacing));
7166 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7167 Spacing * 2));
7168 TmpInst.addOperand(Inst.getOperand(1)); // lane
7169 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7170 TmpInst.addOperand(Inst.getOperand(5));
7171 Inst = TmpInst;
7172 return true;
7173 }
7174
7175 case ARM::VST4LNdWB_fixed_Asm_8:
7176 case ARM::VST4LNdWB_fixed_Asm_16:
7177 case ARM::VST4LNdWB_fixed_Asm_32:
7178 case ARM::VST4LNqWB_fixed_Asm_16:
7179 case ARM::VST4LNqWB_fixed_Asm_32: {
7180 MCInst TmpInst;
7181 // Shuffle the operands around so the lane index operand is in the
7182 // right place.
7183 unsigned Spacing;
7184 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7185 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7186 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7187 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7188 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7189 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7190 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7191 Spacing));
7192 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7193 Spacing * 2));
7194 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7195 Spacing * 3));
7196 TmpInst.addOperand(Inst.getOperand(1)); // lane
7197 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7198 TmpInst.addOperand(Inst.getOperand(5));
7199 Inst = TmpInst;
7200 return true;
7201 }
7202
7203 case ARM::VST1LNdAsm_8:
7204 case ARM::VST1LNdAsm_16:
7205 case ARM::VST1LNdAsm_32: {
7206 MCInst TmpInst;
7207 // Shuffle the operands around so the lane index operand is in the
7208 // right place.
7209 unsigned Spacing;
7210 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7211 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7212 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7213 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7214 TmpInst.addOperand(Inst.getOperand(1)); // lane
7215 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7216 TmpInst.addOperand(Inst.getOperand(5));
7217 Inst = TmpInst;
7218 return true;
7219 }
7220
7221 case ARM::VST2LNdAsm_8:
7222 case ARM::VST2LNdAsm_16:
7223 case ARM::VST2LNdAsm_32:
7224 case ARM::VST2LNqAsm_16:
7225 case ARM::VST2LNqAsm_32: {
7226 MCInst TmpInst;
7227 // Shuffle the operands around so the lane index operand is in the
7228 // right place.
7229 unsigned Spacing;
7230 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7231 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7232 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7233 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7234 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7235 Spacing));
7236 TmpInst.addOperand(Inst.getOperand(1)); // lane
7237 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7238 TmpInst.addOperand(Inst.getOperand(5));
7239 Inst = TmpInst;
7240 return true;
7241 }
7242
7243 case ARM::VST3LNdAsm_8:
7244 case ARM::VST3LNdAsm_16:
7245 case ARM::VST3LNdAsm_32:
7246 case ARM::VST3LNqAsm_16:
7247 case ARM::VST3LNqAsm_32: {
7248 MCInst TmpInst;
7249 // Shuffle the operands around so the lane index operand is in the
7250 // right place.
7251 unsigned Spacing;
7252 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7253 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7254 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7255 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7256 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7257 Spacing));
7258 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7259 Spacing * 2));
7260 TmpInst.addOperand(Inst.getOperand(1)); // lane
7261 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7262 TmpInst.addOperand(Inst.getOperand(5));
7263 Inst = TmpInst;
7264 return true;
7265 }
7266
7267 case ARM::VST4LNdAsm_8:
7268 case ARM::VST4LNdAsm_16:
7269 case ARM::VST4LNdAsm_32:
7270 case ARM::VST4LNqAsm_16:
7271 case ARM::VST4LNqAsm_32: {
7272 MCInst TmpInst;
7273 // Shuffle the operands around so the lane index operand is in the
7274 // right place.
7275 unsigned Spacing;
7276 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7277 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7278 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7279 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7280 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7281 Spacing));
7282 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7283 Spacing * 2));
7284 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7285 Spacing * 3));
7286 TmpInst.addOperand(Inst.getOperand(1)); // lane
7287 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7288 TmpInst.addOperand(Inst.getOperand(5));
7289 Inst = TmpInst;
7290 return true;
7291 }
7292
7293 // Handle NEON VLD complex aliases.
7294 case ARM::VLD1LNdWB_register_Asm_8:
7295 case ARM::VLD1LNdWB_register_Asm_16:
7296 case ARM::VLD1LNdWB_register_Asm_32: {
7297 MCInst TmpInst;
7298 // Shuffle the operands around so the lane index operand is in the
7299 // right place.
7300 unsigned Spacing;
7301 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7302 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7303 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7304 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7305 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7306 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7307 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7308 TmpInst.addOperand(Inst.getOperand(1)); // lane
7309 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7310 TmpInst.addOperand(Inst.getOperand(6));
7311 Inst = TmpInst;
7312 return true;
7313 }
7314
7315 case ARM::VLD2LNdWB_register_Asm_8:
7316 case ARM::VLD2LNdWB_register_Asm_16:
7317 case ARM::VLD2LNdWB_register_Asm_32:
7318 case ARM::VLD2LNqWB_register_Asm_16:
7319 case ARM::VLD2LNqWB_register_Asm_32: {
7320 MCInst TmpInst;
7321 // Shuffle the operands around so the lane index operand is in the
7322 // right place.
7323 unsigned Spacing;
7324 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7325 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7326 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7327 Spacing));
7328 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7329 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7330 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7331 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7332 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7333 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7334 Spacing));
7335 TmpInst.addOperand(Inst.getOperand(1)); // lane
7336 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7337 TmpInst.addOperand(Inst.getOperand(6));
7338 Inst = TmpInst;
7339 return true;
7340 }
7341
7342 case ARM::VLD3LNdWB_register_Asm_8:
7343 case ARM::VLD3LNdWB_register_Asm_16:
7344 case ARM::VLD3LNdWB_register_Asm_32:
7345 case ARM::VLD3LNqWB_register_Asm_16:
7346 case ARM::VLD3LNqWB_register_Asm_32: {
7347 MCInst TmpInst;
7348 // Shuffle the operands around so the lane index operand is in the
7349 // right place.
7350 unsigned Spacing;
7351 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7352 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7353 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7354 Spacing));
7355 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7356 Spacing * 2));
7357 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7358 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7359 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7360 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7361 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7362 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7363 Spacing));
7364 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7365 Spacing * 2));
7366 TmpInst.addOperand(Inst.getOperand(1)); // lane
7367 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7368 TmpInst.addOperand(Inst.getOperand(6));
7369 Inst = TmpInst;
7370 return true;
7371 }
7372
7373 case ARM::VLD4LNdWB_register_Asm_8:
7374 case ARM::VLD4LNdWB_register_Asm_16:
7375 case ARM::VLD4LNdWB_register_Asm_32:
7376 case ARM::VLD4LNqWB_register_Asm_16:
7377 case ARM::VLD4LNqWB_register_Asm_32: {
7378 MCInst TmpInst;
7379 // Shuffle the operands around so the lane index operand is in the
7380 // right place.
7381 unsigned Spacing;
7382 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7383 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7384 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7385 Spacing));
7386 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7387 Spacing * 2));
7388 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7389 Spacing * 3));
7390 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7391 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7392 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7393 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7394 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7395 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7396 Spacing));
7397 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7398 Spacing * 2));
7399 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7400 Spacing * 3));
7401 TmpInst.addOperand(Inst.getOperand(1)); // lane
7402 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7403 TmpInst.addOperand(Inst.getOperand(6));
7404 Inst = TmpInst;
7405 return true;
7406 }
7407
7408 case ARM::VLD1LNdWB_fixed_Asm_8:
7409 case ARM::VLD1LNdWB_fixed_Asm_16:
7410 case ARM::VLD1LNdWB_fixed_Asm_32: {
7411 MCInst TmpInst;
7412 // Shuffle the operands around so the lane index operand is in the
7413 // right place.
7414 unsigned Spacing;
7415 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7416 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7417 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7418 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7419 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7420 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7421 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7422 TmpInst.addOperand(Inst.getOperand(1)); // lane
7423 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7424 TmpInst.addOperand(Inst.getOperand(5));
7425 Inst = TmpInst;
7426 return true;
7427 }
7428
7429 case ARM::VLD2LNdWB_fixed_Asm_8:
7430 case ARM::VLD2LNdWB_fixed_Asm_16:
7431 case ARM::VLD2LNdWB_fixed_Asm_32:
7432 case ARM::VLD2LNqWB_fixed_Asm_16:
7433 case ARM::VLD2LNqWB_fixed_Asm_32: {
7434 MCInst TmpInst;
7435 // Shuffle the operands around so the lane index operand is in the
7436 // right place.
7437 unsigned Spacing;
7438 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7439 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7440 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7441 Spacing));
7442 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7443 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7444 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7445 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7446 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7447 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7448 Spacing));
7449 TmpInst.addOperand(Inst.getOperand(1)); // lane
7450 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7451 TmpInst.addOperand(Inst.getOperand(5));
7452 Inst = TmpInst;
7453 return true;
7454 }
7455
7456 case ARM::VLD3LNdWB_fixed_Asm_8:
7457 case ARM::VLD3LNdWB_fixed_Asm_16:
7458 case ARM::VLD3LNdWB_fixed_Asm_32:
7459 case ARM::VLD3LNqWB_fixed_Asm_16:
7460 case ARM::VLD3LNqWB_fixed_Asm_32: {
7461 MCInst TmpInst;
7462 // Shuffle the operands around so the lane index operand is in the
7463 // right place.
7464 unsigned Spacing;
7465 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7466 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7467 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7468 Spacing));
7469 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7470 Spacing * 2));
7471 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7472 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7473 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7474 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7475 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7476 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7477 Spacing));
7478 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7479 Spacing * 2));
7480 TmpInst.addOperand(Inst.getOperand(1)); // lane
7481 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7482 TmpInst.addOperand(Inst.getOperand(5));
7483 Inst = TmpInst;
7484 return true;
7485 }
7486
7487 case ARM::VLD4LNdWB_fixed_Asm_8:
7488 case ARM::VLD4LNdWB_fixed_Asm_16:
7489 case ARM::VLD4LNdWB_fixed_Asm_32:
7490 case ARM::VLD4LNqWB_fixed_Asm_16:
7491 case ARM::VLD4LNqWB_fixed_Asm_32: {
7492 MCInst TmpInst;
7493 // Shuffle the operands around so the lane index operand is in the
7494 // right place.
7495 unsigned Spacing;
7496 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7497 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7498 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7499 Spacing));
7500 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7501 Spacing * 2));
7502 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7503 Spacing * 3));
7504 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7505 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7506 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7507 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7508 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7509 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7510 Spacing));
7511 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7512 Spacing * 2));
7513 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7514 Spacing * 3));
7515 TmpInst.addOperand(Inst.getOperand(1)); // lane
7516 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7517 TmpInst.addOperand(Inst.getOperand(5));
7518 Inst = TmpInst;
7519 return true;
7520 }
7521
7522 case ARM::VLD1LNdAsm_8:
7523 case ARM::VLD1LNdAsm_16:
7524 case ARM::VLD1LNdAsm_32: {
7525 MCInst TmpInst;
7526 // Shuffle the operands around so the lane index operand is in the
7527 // right place.
7528 unsigned Spacing;
7529 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7530 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7531 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7532 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7533 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7534 TmpInst.addOperand(Inst.getOperand(1)); // lane
7535 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7536 TmpInst.addOperand(Inst.getOperand(5));
7537 Inst = TmpInst;
7538 return true;
7539 }
7540
7541 case ARM::VLD2LNdAsm_8:
7542 case ARM::VLD2LNdAsm_16:
7543 case ARM::VLD2LNdAsm_32:
7544 case ARM::VLD2LNqAsm_16:
7545 case ARM::VLD2LNqAsm_32: {
7546 MCInst TmpInst;
7547 // Shuffle the operands around so the lane index operand is in the
7548 // right place.
7549 unsigned Spacing;
7550 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7551 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7552 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7553 Spacing));
7554 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7555 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7556 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7557 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7558 Spacing));
7559 TmpInst.addOperand(Inst.getOperand(1)); // lane
7560 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7561 TmpInst.addOperand(Inst.getOperand(5));
7562 Inst = TmpInst;
7563 return true;
7564 }
7565
7566 case ARM::VLD3LNdAsm_8:
7567 case ARM::VLD3LNdAsm_16:
7568 case ARM::VLD3LNdAsm_32:
7569 case ARM::VLD3LNqAsm_16:
7570 case ARM::VLD3LNqAsm_32: {
7571 MCInst TmpInst;
7572 // Shuffle the operands around so the lane index operand is in the
7573 // right place.
7574 unsigned Spacing;
7575 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7576 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7577 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7578 Spacing));
7579 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7580 Spacing * 2));
7581 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7582 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7583 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7584 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7585 Spacing));
7586 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7587 Spacing * 2));
7588 TmpInst.addOperand(Inst.getOperand(1)); // lane
7589 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7590 TmpInst.addOperand(Inst.getOperand(5));
7591 Inst = TmpInst;
7592 return true;
7593 }
7594
7595 case ARM::VLD4LNdAsm_8:
7596 case ARM::VLD4LNdAsm_16:
7597 case ARM::VLD4LNdAsm_32:
7598 case ARM::VLD4LNqAsm_16:
7599 case ARM::VLD4LNqAsm_32: {
7600 MCInst TmpInst;
7601 // Shuffle the operands around so the lane index operand is in the
7602 // right place.
7603 unsigned Spacing;
7604 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7605 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7606 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7607 Spacing));
7608 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7609 Spacing * 2));
7610 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7611 Spacing * 3));
7612 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7613 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7614 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7615 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7616 Spacing));
7617 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7618 Spacing * 2));
7619 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7620 Spacing * 3));
7621 TmpInst.addOperand(Inst.getOperand(1)); // lane
7622 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7623 TmpInst.addOperand(Inst.getOperand(5));
7624 Inst = TmpInst;
7625 return true;
7626 }
7627
7628 // VLD3DUP single 3-element structure to all lanes instructions.
7629 case ARM::VLD3DUPdAsm_8:
7630 case ARM::VLD3DUPdAsm_16:
7631 case ARM::VLD3DUPdAsm_32:
7632 case ARM::VLD3DUPqAsm_8:
7633 case ARM::VLD3DUPqAsm_16:
7634 case ARM::VLD3DUPqAsm_32: {
7635 MCInst TmpInst;
7636 unsigned Spacing;
7637 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7638 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7639 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7640 Spacing));
7641 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7642 Spacing * 2));
7643 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7644 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7645 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7646 TmpInst.addOperand(Inst.getOperand(4));
7647 Inst = TmpInst;
7648 return true;
7649 }
7650
7651 case ARM::VLD3DUPdWB_fixed_Asm_8:
7652 case ARM::VLD3DUPdWB_fixed_Asm_16:
7653 case ARM::VLD3DUPdWB_fixed_Asm_32:
7654 case ARM::VLD3DUPqWB_fixed_Asm_8:
7655 case ARM::VLD3DUPqWB_fixed_Asm_16:
7656 case ARM::VLD3DUPqWB_fixed_Asm_32: {
7657 MCInst TmpInst;
7658 unsigned Spacing;
7659 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7660 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7661 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7662 Spacing));
7663 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7664 Spacing * 2));
7665 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7666 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7667 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7668 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7669 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7670 TmpInst.addOperand(Inst.getOperand(4));
7671 Inst = TmpInst;
7672 return true;
7673 }
7674
7675 case ARM::VLD3DUPdWB_register_Asm_8:
7676 case ARM::VLD3DUPdWB_register_Asm_16:
7677 case ARM::VLD3DUPdWB_register_Asm_32:
7678 case ARM::VLD3DUPqWB_register_Asm_8:
7679 case ARM::VLD3DUPqWB_register_Asm_16:
7680 case ARM::VLD3DUPqWB_register_Asm_32: {
7681 MCInst TmpInst;
7682 unsigned Spacing;
7683 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7684 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7685 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7686 Spacing));
7687 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7688 Spacing * 2));
7689 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7690 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7691 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7692 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7693 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7694 TmpInst.addOperand(Inst.getOperand(5));
7695 Inst = TmpInst;
7696 return true;
7697 }
7698
7699 // VLD3 multiple 3-element structure instructions.
7700 case ARM::VLD3dAsm_8:
7701 case ARM::VLD3dAsm_16:
7702 case ARM::VLD3dAsm_32:
7703 case ARM::VLD3qAsm_8:
7704 case ARM::VLD3qAsm_16:
7705 case ARM::VLD3qAsm_32: {
7706 MCInst TmpInst;
7707 unsigned Spacing;
7708 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7709 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7710 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7711 Spacing));
7712 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7713 Spacing * 2));
7714 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7715 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7716 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7717 TmpInst.addOperand(Inst.getOperand(4));
7718 Inst = TmpInst;
7719 return true;
7720 }
7721
7722 case ARM::VLD3dWB_fixed_Asm_8:
7723 case ARM::VLD3dWB_fixed_Asm_16:
7724 case ARM::VLD3dWB_fixed_Asm_32:
7725 case ARM::VLD3qWB_fixed_Asm_8:
7726 case ARM::VLD3qWB_fixed_Asm_16:
7727 case ARM::VLD3qWB_fixed_Asm_32: {
7728 MCInst TmpInst;
7729 unsigned Spacing;
7730 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7731 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7732 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7733 Spacing));
7734 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7735 Spacing * 2));
7736 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7737 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7738 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7739 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7740 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7741 TmpInst.addOperand(Inst.getOperand(4));
7742 Inst = TmpInst;
7743 return true;
7744 }
7745
7746 case ARM::VLD3dWB_register_Asm_8:
7747 case ARM::VLD3dWB_register_Asm_16:
7748 case ARM::VLD3dWB_register_Asm_32:
7749 case ARM::VLD3qWB_register_Asm_8:
7750 case ARM::VLD3qWB_register_Asm_16:
7751 case ARM::VLD3qWB_register_Asm_32: {
7752 MCInst TmpInst;
7753 unsigned Spacing;
7754 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7755 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7756 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7757 Spacing));
7758 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7759 Spacing * 2));
7760 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7761 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7762 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7763 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7764 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7765 TmpInst.addOperand(Inst.getOperand(5));
7766 Inst = TmpInst;
7767 return true;
7768 }
7769
7770 // VLD4DUP single 3-element structure to all lanes instructions.
7771 case ARM::VLD4DUPdAsm_8:
7772 case ARM::VLD4DUPdAsm_16:
7773 case ARM::VLD4DUPdAsm_32:
7774 case ARM::VLD4DUPqAsm_8:
7775 case ARM::VLD4DUPqAsm_16:
7776 case ARM::VLD4DUPqAsm_32: {
7777 MCInst TmpInst;
7778 unsigned Spacing;
7779 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7780 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7781 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7782 Spacing));
7783 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7784 Spacing * 2));
7785 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7786 Spacing * 3));
7787 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7788 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7789 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7790 TmpInst.addOperand(Inst.getOperand(4));
7791 Inst = TmpInst;
7792 return true;
7793 }
7794
7795 case ARM::VLD4DUPdWB_fixed_Asm_8:
7796 case ARM::VLD4DUPdWB_fixed_Asm_16:
7797 case ARM::VLD4DUPdWB_fixed_Asm_32:
7798 case ARM::VLD4DUPqWB_fixed_Asm_8:
7799 case ARM::VLD4DUPqWB_fixed_Asm_16:
7800 case ARM::VLD4DUPqWB_fixed_Asm_32: {
7801 MCInst TmpInst;
7802 unsigned Spacing;
7803 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7804 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7805 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7806 Spacing));
7807 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7808 Spacing * 2));
7809 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7810 Spacing * 3));
7811 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7812 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7813 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7814 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7815 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7816 TmpInst.addOperand(Inst.getOperand(4));
7817 Inst = TmpInst;
7818 return true;
7819 }
7820
7821 case ARM::VLD4DUPdWB_register_Asm_8:
7822 case ARM::VLD4DUPdWB_register_Asm_16:
7823 case ARM::VLD4DUPdWB_register_Asm_32:
7824 case ARM::VLD4DUPqWB_register_Asm_8:
7825 case ARM::VLD4DUPqWB_register_Asm_16:
7826 case ARM::VLD4DUPqWB_register_Asm_32: {
7827 MCInst TmpInst;
7828 unsigned Spacing;
7829 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7830 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7831 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7832 Spacing));
7833 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7834 Spacing * 2));
7835 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7836 Spacing * 3));
7837 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7838 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7839 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7840 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7841 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7842 TmpInst.addOperand(Inst.getOperand(5));
7843 Inst = TmpInst;
7844 return true;
7845 }
7846
7847 // VLD4 multiple 4-element structure instructions.
7848 case ARM::VLD4dAsm_8:
7849 case ARM::VLD4dAsm_16:
7850 case ARM::VLD4dAsm_32:
7851 case ARM::VLD4qAsm_8:
7852 case ARM::VLD4qAsm_16:
7853 case ARM::VLD4qAsm_32: {
7854 MCInst TmpInst;
7855 unsigned Spacing;
7856 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7857 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7858 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7859 Spacing));
7860 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7861 Spacing * 2));
7862 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7863 Spacing * 3));
7864 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7865 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7866 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7867 TmpInst.addOperand(Inst.getOperand(4));
7868 Inst = TmpInst;
7869 return true;
7870 }
7871
7872 case ARM::VLD4dWB_fixed_Asm_8:
7873 case ARM::VLD4dWB_fixed_Asm_16:
7874 case ARM::VLD4dWB_fixed_Asm_32:
7875 case ARM::VLD4qWB_fixed_Asm_8:
7876 case ARM::VLD4qWB_fixed_Asm_16:
7877 case ARM::VLD4qWB_fixed_Asm_32: {
7878 MCInst TmpInst;
7879 unsigned Spacing;
7880 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7881 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7882 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7883 Spacing));
7884 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7885 Spacing * 2));
7886 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7887 Spacing * 3));
7888 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7889 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7890 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7891 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7892 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7893 TmpInst.addOperand(Inst.getOperand(4));
7894 Inst = TmpInst;
7895 return true;
7896 }
7897
7898 case ARM::VLD4dWB_register_Asm_8:
7899 case ARM::VLD4dWB_register_Asm_16:
7900 case ARM::VLD4dWB_register_Asm_32:
7901 case ARM::VLD4qWB_register_Asm_8:
7902 case ARM::VLD4qWB_register_Asm_16:
7903 case ARM::VLD4qWB_register_Asm_32: {
7904 MCInst TmpInst;
7905 unsigned Spacing;
7906 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7907 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7908 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7909 Spacing));
7910 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7911 Spacing * 2));
7912 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7913 Spacing * 3));
7914 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7915 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7916 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7917 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7918 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7919 TmpInst.addOperand(Inst.getOperand(5));
7920 Inst = TmpInst;
7921 return true;
7922 }
7923
7924 // VST3 multiple 3-element structure instructions.
7925 case ARM::VST3dAsm_8:
7926 case ARM::VST3dAsm_16:
7927 case ARM::VST3dAsm_32:
7928 case ARM::VST3qAsm_8:
7929 case ARM::VST3qAsm_16:
7930 case ARM::VST3qAsm_32: {
7931 MCInst TmpInst;
7932 unsigned Spacing;
7933 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7934 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7935 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7936 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7937 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7938 Spacing));
7939 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7940 Spacing * 2));
7941 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7942 TmpInst.addOperand(Inst.getOperand(4));
7943 Inst = TmpInst;
7944 return true;
7945 }
7946
7947 case ARM::VST3dWB_fixed_Asm_8:
7948 case ARM::VST3dWB_fixed_Asm_16:
7949 case ARM::VST3dWB_fixed_Asm_32:
7950 case ARM::VST3qWB_fixed_Asm_8:
7951 case ARM::VST3qWB_fixed_Asm_16:
7952 case ARM::VST3qWB_fixed_Asm_32: {
7953 MCInst TmpInst;
7954 unsigned Spacing;
7955 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7956 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7957 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7958 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7959 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7960 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7961 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7962 Spacing));
7963 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7964 Spacing * 2));
7965 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7966 TmpInst.addOperand(Inst.getOperand(4));
7967 Inst = TmpInst;
7968 return true;
7969 }
7970
7971 case ARM::VST3dWB_register_Asm_8:
7972 case ARM::VST3dWB_register_Asm_16:
7973 case ARM::VST3dWB_register_Asm_32:
7974 case ARM::VST3qWB_register_Asm_8:
7975 case ARM::VST3qWB_register_Asm_16:
7976 case ARM::VST3qWB_register_Asm_32: {
7977 MCInst TmpInst;
7978 unsigned Spacing;
7979 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7980 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7981 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7982 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7983 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7984 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7985 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7986 Spacing));
7987 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7988 Spacing * 2));
7989 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7990 TmpInst.addOperand(Inst.getOperand(5));
7991 Inst = TmpInst;
7992 return true;
7993 }
7994
7995 // VST4 multiple 3-element structure instructions.
7996 case ARM::VST4dAsm_8:
7997 case ARM::VST4dAsm_16:
7998 case ARM::VST4dAsm_32:
7999 case ARM::VST4qAsm_8:
8000 case ARM::VST4qAsm_16:
8001 case ARM::VST4qAsm_32: {
8002 MCInst TmpInst;
8003 unsigned Spacing;
8004 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8005 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8006 TmpInst.addOperand(Inst.getOperand(2)); // alignment
8007 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8008 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8009 Spacing));
8010 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8011 Spacing * 2));
8012 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8013 Spacing * 3));
8014 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8015 TmpInst.addOperand(Inst.getOperand(4));
8016 Inst = TmpInst;
8017 return true;
8018 }
8019
8020 case ARM::VST4dWB_fixed_Asm_8:
8021 case ARM::VST4dWB_fixed_Asm_16:
8022 case ARM::VST4dWB_fixed_Asm_32:
8023 case ARM::VST4qWB_fixed_Asm_8:
8024 case ARM::VST4qWB_fixed_Asm_16:
8025 case ARM::VST4qWB_fixed_Asm_32: {
8026 MCInst TmpInst;
8027 unsigned Spacing;
8028 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8029 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8030 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8031 TmpInst.addOperand(Inst.getOperand(2)); // alignment
8032 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8033 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8034 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8035 Spacing));
8036 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8037 Spacing * 2));
8038 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8039 Spacing * 3));
8040 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8041 TmpInst.addOperand(Inst.getOperand(4));
8042 Inst = TmpInst;
8043 return true;
8044 }
8045
8046 case ARM::VST4dWB_register_Asm_8:
8047 case ARM::VST4dWB_register_Asm_16:
8048 case ARM::VST4dWB_register_Asm_32:
8049 case ARM::VST4qWB_register_Asm_8:
8050 case ARM::VST4qWB_register_Asm_16:
8051 case ARM::VST4qWB_register_Asm_32: {
8052 MCInst TmpInst;
8053 unsigned Spacing;
8054 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8055 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8056 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8057 TmpInst.addOperand(Inst.getOperand(2)); // alignment
8058 TmpInst.addOperand(Inst.getOperand(3)); // Rm
8059 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8060 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8061 Spacing));
8062 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8063 Spacing * 2));
8064 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8065 Spacing * 3));
8066 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8067 TmpInst.addOperand(Inst.getOperand(5));
8068 Inst = TmpInst;
8069 return true;
8070 }
8071
8072 // Handle encoding choice for the shift-immediate instructions.
8073 case ARM::t2LSLri:
8074 case ARM::t2LSRri:
8075 case ARM::t2ASRri: {
8076 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8077 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8078 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8079 !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8080 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
8081 unsigned NewOpc;
8082 switch (Inst.getOpcode()) {
8083 default: llvm_unreachable("unexpected opcode");
8084 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8085 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8086 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8087 }
8088 // The Thumb1 operands aren't in the same order. Awesome, eh?
8089 MCInst TmpInst;
8090 TmpInst.setOpcode(NewOpc);
8091 TmpInst.addOperand(Inst.getOperand(0));
8092 TmpInst.addOperand(Inst.getOperand(5));
8093 TmpInst.addOperand(Inst.getOperand(1));
8094 TmpInst.addOperand(Inst.getOperand(2));
8095 TmpInst.addOperand(Inst.getOperand(3));
8096 TmpInst.addOperand(Inst.getOperand(4));
8097 Inst = TmpInst;
8098 return true;
8099 }
8100 return false;
8101 }
8102
8103 // Handle the Thumb2 mode MOV complex aliases.
8104 case ARM::t2MOVsr:
8105 case ARM::t2MOVSsr: {
8106 // Which instruction to expand to depends on the CCOut operand and
8107 // whether we're in an IT block if the register operands are low
8108 // registers.
8109 bool isNarrow = false;
8110 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8111 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8112 isARMLowRegister(Inst.getOperand(2).getReg()) &&
8113 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8114 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
8115 isNarrow = true;
8116 MCInst TmpInst;
8117 unsigned newOpc;
8118 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8119 default: llvm_unreachable("unexpected opcode!");
8120 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8121 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8122 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8123 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
8124 }
8125 TmpInst.setOpcode(newOpc);
8126 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8127 if (isNarrow)
8128 TmpInst.addOperand(MCOperand::createReg(
8129 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8130 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8131 TmpInst.addOperand(Inst.getOperand(2)); // Rm
8132 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8133 TmpInst.addOperand(Inst.getOperand(5));
8134 if (!isNarrow)
8135 TmpInst.addOperand(MCOperand::createReg(
8136 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8137 Inst = TmpInst;
8138 return true;
8139 }
8140 case ARM::t2MOVsi:
8141 case ARM::t2MOVSsi: {
8142 // Which instruction to expand to depends on the CCOut operand and
8143 // whether we're in an IT block if the register operands are low
8144 // registers.
8145 bool isNarrow = false;
8146 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8147 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8148 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
8149 isNarrow = true;
8150 MCInst TmpInst;
8151 unsigned newOpc;
8152 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
8153 default: llvm_unreachable("unexpected opcode!");
8154 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
8155 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
8156 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
8157 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
8158 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
8159 }
8160 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
8161 if (Amount == 32) Amount = 0;
8162 TmpInst.setOpcode(newOpc);
8163 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8164 if (isNarrow)
8165 TmpInst.addOperand(MCOperand::createReg(
8166 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8167 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8168 if (newOpc != ARM::t2RRX)
8169 TmpInst.addOperand(MCOperand::createImm(Amount));
8170 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8171 TmpInst.addOperand(Inst.getOperand(4));
8172 if (!isNarrow)
8173 TmpInst.addOperand(MCOperand::createReg(
8174 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8175 Inst = TmpInst;
8176 return true;
8177 }
8178 // Handle the ARM mode MOV complex aliases.
8179 case ARM::ASRr:
8180 case ARM::LSRr:
8181 case ARM::LSLr:
8182 case ARM::RORr: {
8183 ARM_AM::ShiftOpc ShiftTy;
8184 switch(Inst.getOpcode()) {
8185 default: llvm_unreachable("unexpected opcode!");
8186 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
8187 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
8188 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
8189 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
8190 }
8191 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
8192 MCInst TmpInst;
8193 TmpInst.setOpcode(ARM::MOVsr);
8194 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8195 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8196 TmpInst.addOperand(Inst.getOperand(2)); // Rm
8197 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8198 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8199 TmpInst.addOperand(Inst.getOperand(4));
8200 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8201 Inst = TmpInst;
8202 return true;
8203 }
8204 case ARM::ASRi:
8205 case ARM::LSRi:
8206 case ARM::LSLi:
8207 case ARM::RORi: {
8208 ARM_AM::ShiftOpc ShiftTy;
8209 switch(Inst.getOpcode()) {
8210 default: llvm_unreachable("unexpected opcode!");
8211 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8212 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8213 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8214 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8215 }
8216 // A shift by zero is a plain MOVr, not a MOVsi.
8217 unsigned Amt = Inst.getOperand(2).getImm();
8218 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8219 // A shift by 32 should be encoded as 0 when permitted
8220 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8221 Amt = 0;
8222 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8223 MCInst TmpInst;
8224 TmpInst.setOpcode(Opc);
8225 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8226 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8227 if (Opc == ARM::MOVsi)
8228 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8229 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8230 TmpInst.addOperand(Inst.getOperand(4));
8231 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8232 Inst = TmpInst;
8233 return true;
8234 }
8235 case ARM::RRXi: {
8236 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8237 MCInst TmpInst;
8238 TmpInst.setOpcode(ARM::MOVsi);
8239 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8240 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8241 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8242 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8243 TmpInst.addOperand(Inst.getOperand(3));
8244 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8245 Inst = TmpInst;
8246 return true;
8247 }
8248 case ARM::t2LDMIA_UPD: {
8249 // If this is a load of a single register, then we should use
8250 // a post-indexed LDR instruction instead, per the ARM ARM.
8251 if (Inst.getNumOperands() != 5)
8252 return false;
8253 MCInst TmpInst;
8254 TmpInst.setOpcode(ARM::t2LDR_POST);
8255 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8256 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8257 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8258 TmpInst.addOperand(MCOperand::createImm(4));
8259 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8260 TmpInst.addOperand(Inst.getOperand(3));
8261 Inst = TmpInst;
8262 return true;
8263 }
8264 case ARM::t2STMDB_UPD: {
8265 // If this is a store of a single register, then we should use
8266 // a pre-indexed STR instruction instead, per the ARM ARM.
8267 if (Inst.getNumOperands() != 5)
8268 return false;
8269 MCInst TmpInst;
8270 TmpInst.setOpcode(ARM::t2STR_PRE);
8271 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8272 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8273 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8274 TmpInst.addOperand(MCOperand::createImm(-4));
8275 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8276 TmpInst.addOperand(Inst.getOperand(3));
8277 Inst = TmpInst;
8278 return true;
8279 }
8280 case ARM::LDMIA_UPD:
8281 // If this is a load of a single register via a 'pop', then we should use
8282 // a post-indexed LDR instruction instead, per the ARM ARM.
8283 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8284 Inst.getNumOperands() == 5) {
8285 MCInst TmpInst;
8286 TmpInst.setOpcode(ARM::LDR_POST_IMM);
8287 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8288 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8289 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8290 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
8291 TmpInst.addOperand(MCOperand::createImm(4));
8292 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8293 TmpInst.addOperand(Inst.getOperand(3));
8294 Inst = TmpInst;
8295 return true;
8296 }
8297 break;
8298 case ARM::STMDB_UPD:
8299 // If this is a store of a single register via a 'push', then we should use
8300 // a pre-indexed STR instruction instead, per the ARM ARM.
8301 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8302 Inst.getNumOperands() == 5) {
8303 MCInst TmpInst;
8304 TmpInst.setOpcode(ARM::STR_PRE_IMM);
8305 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8306 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8307 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8308 TmpInst.addOperand(MCOperand::createImm(-4));
8309 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8310 TmpInst.addOperand(Inst.getOperand(3));
8311 Inst = TmpInst;
8312 }
8313 break;
8314 case ARM::t2ADDri12:
8315 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8316 // mnemonic was used (not "addw"), encoding T3 is preferred.
8317 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8318 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8319 break;
8320 Inst.setOpcode(ARM::t2ADDri);
8321 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8322 break;
8323 case ARM::t2SUBri12:
8324 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8325 // mnemonic was used (not "subw"), encoding T3 is preferred.
8326 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8327 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8328 break;
8329 Inst.setOpcode(ARM::t2SUBri);
8330 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8331 break;
8332 case ARM::tADDi8:
8333 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8334 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8335 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8336 // to encoding T1 if <Rd> is omitted."
8337 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8338 Inst.setOpcode(ARM::tADDi3);
8339 return true;
8340 }
8341 break;
8342 case ARM::tSUBi8:
8343 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8344 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8345 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8346 // to encoding T1 if <Rd> is omitted."
8347 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8348 Inst.setOpcode(ARM::tSUBi3);
8349 return true;
8350 }
8351 break;
8352 case ARM::t2ADDri:
8353 case ARM::t2SUBri: {
8354 // If the destination and first source operand are the same, and
8355 // the flags are compatible with the current IT status, use encoding T2
8356 // instead of T3. For compatibility with the system 'as'. Make sure the
8357 // wide encoding wasn't explicit.
8358 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8359 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8360 (unsigned)Inst.getOperand(2).getImm() > 255 ||
8361 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
8362 (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
8363 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8364 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8365 break;
8366 MCInst TmpInst;
8367 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8368 ARM::tADDi8 : ARM::tSUBi8);
8369 TmpInst.addOperand(Inst.getOperand(0));
8370 TmpInst.addOperand(Inst.getOperand(5));
8371 TmpInst.addOperand(Inst.getOperand(0));
8372 TmpInst.addOperand(Inst.getOperand(2));
8373 TmpInst.addOperand(Inst.getOperand(3));
8374 TmpInst.addOperand(Inst.getOperand(4));
8375 Inst = TmpInst;
8376 return true;
8377 }
8378 case ARM::t2ADDrr: {
8379 // If the destination and first source operand are the same, and
8380 // there's no setting of the flags, use encoding T2 instead of T3.
8381 // Note that this is only for ADD, not SUB. This mirrors the system
8382 // 'as' behaviour. Also take advantage of ADD being commutative.
8383 // Make sure the wide encoding wasn't explicit.
8384 bool Swap = false;
8385 auto DestReg = Inst.getOperand(0).getReg();
8386 bool Transform = DestReg == Inst.getOperand(1).getReg();
8387 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8388 Transform = true;
8389 Swap = true;
8390 }
8391 if (!Transform ||
8392 Inst.getOperand(5).getReg() != 0 ||
8393 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8394 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8395 break;
8396 MCInst TmpInst;
8397 TmpInst.setOpcode(ARM::tADDhirr);
8398 TmpInst.addOperand(Inst.getOperand(0));
8399 TmpInst.addOperand(Inst.getOperand(0));
8400 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8401 TmpInst.addOperand(Inst.getOperand(3));
8402 TmpInst.addOperand(Inst.getOperand(4));
8403 Inst = TmpInst;
8404 return true;
8405 }
8406 case ARM::tADDrSP: {
8407 // If the non-SP source operand and the destination operand are not the
8408 // same, we need to use the 32-bit encoding if it's available.
8409 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8410 Inst.setOpcode(ARM::t2ADDrr);
8411 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8412 return true;
8413 }
8414 break;
8415 }
8416 case ARM::tB:
8417 // A Thumb conditional branch outside of an IT block is a tBcc.
8418 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8419 Inst.setOpcode(ARM::tBcc);
8420 return true;
8421 }
8422 break;
8423 case ARM::t2B:
8424 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8425 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8426 Inst.setOpcode(ARM::t2Bcc);
8427 return true;
8428 }
8429 break;
8430 case ARM::t2Bcc:
8431 // If the conditional is AL or we're in an IT block, we really want t2B.
8432 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8433 Inst.setOpcode(ARM::t2B);
8434 return true;
8435 }
8436 break;
8437 case ARM::tBcc:
8438 // If the conditional is AL, we really want tB.
8439 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8440 Inst.setOpcode(ARM::tB);
8441 return true;
8442 }
8443 break;
8444 case ARM::tLDMIA: {
8445 // If the register list contains any high registers, or if the writeback
8446 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8447 // instead if we're in Thumb2. Otherwise, this should have generated
8448 // an error in validateInstruction().
8449 unsigned Rn = Inst.getOperand(0).getReg();
8450 bool hasWritebackToken =
8451 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8452 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8453 bool listContainsBase;
8454 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8455 (!listContainsBase && !hasWritebackToken) ||
8456 (listContainsBase && hasWritebackToken)) {
8457 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8458 assert (isThumbTwo());
8459 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8460 // If we're switching to the updating version, we need to insert
8461 // the writeback tied operand.
8462 if (hasWritebackToken)
8463 Inst.insert(Inst.begin(),
8464 MCOperand::createReg(Inst.getOperand(0).getReg()));
8465 return true;
8466 }
8467 break;
8468 }
8469 case ARM::tSTMIA_UPD: {
8470 // If the register list contains any high registers, we need to use
8471 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8472 // should have generated an error in validateInstruction().
8473 unsigned Rn = Inst.getOperand(0).getReg();
8474 bool listContainsBase;
8475 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8476 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8477 assert (isThumbTwo());
8478 Inst.setOpcode(ARM::t2STMIA_UPD);
8479 return true;
8480 }
8481 break;
8482 }
8483 case ARM::tPOP: {
8484 bool listContainsBase;
8485 // If the register list contains any high registers, we need to use
8486 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8487 // should have generated an error in validateInstruction().
8488 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8489 return false;
8490 assert (isThumbTwo());
8491 Inst.setOpcode(ARM::t2LDMIA_UPD);
8492 // Add the base register and writeback operands.
8493 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8494 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8495 return true;
8496 }
8497 case ARM::tPUSH: {
8498 bool listContainsBase;
8499 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8500 return false;
8501 assert (isThumbTwo());
8502 Inst.setOpcode(ARM::t2STMDB_UPD);
8503 // Add the base register and writeback operands.
8504 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8505 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8506 return true;
8507 }
8508 case ARM::t2MOVi: {
8509 // If we can use the 16-bit encoding and the user didn't explicitly
8510 // request the 32-bit variant, transform it here.
8511 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8512 (unsigned)Inst.getOperand(1).getImm() <= 255 &&
8513 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
8514 Inst.getOperand(4).getReg() == ARM::CPSR) ||
8515 (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
8516 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8517 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8518 // The operands aren't in the same order for tMOVi8...
8519 MCInst TmpInst;
8520 TmpInst.setOpcode(ARM::tMOVi8);
8521 TmpInst.addOperand(Inst.getOperand(0));
8522 TmpInst.addOperand(Inst.getOperand(4));
8523 TmpInst.addOperand(Inst.getOperand(1));
8524 TmpInst.addOperand(Inst.getOperand(2));
8525 TmpInst.addOperand(Inst.getOperand(3));
8526 Inst = TmpInst;
8527 return true;
8528 }
8529 break;
8530 }
8531 case ARM::t2MOVr: {
8532 // If we can use the 16-bit encoding and the user didn't explicitly
8533 // request the 32-bit variant, transform it here.
8534 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8535 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8536 Inst.getOperand(2).getImm() == ARMCC::AL &&
8537 Inst.getOperand(4).getReg() == ARM::CPSR &&
8538 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8539 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8540 // The operands aren't the same for tMOV[S]r... (no cc_out)
8541 MCInst TmpInst;
8542 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8543 TmpInst.addOperand(Inst.getOperand(0));
8544 TmpInst.addOperand(Inst.getOperand(1));
8545 TmpInst.addOperand(Inst.getOperand(2));
8546 TmpInst.addOperand(Inst.getOperand(3));
8547 Inst = TmpInst;
8548 return true;
8549 }
8550 break;
8551 }
8552 case ARM::t2SXTH:
8553 case ARM::t2SXTB:
8554 case ARM::t2UXTH:
8555 case ARM::t2UXTB: {
8556 // If we can use the 16-bit encoding and the user didn't explicitly
8557 // request the 32-bit variant, transform it here.
8558 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8559 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8560 Inst.getOperand(2).getImm() == 0 &&
8561 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8562 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8563 unsigned NewOpc;
8564 switch (Inst.getOpcode()) {
8565 default: llvm_unreachable("Illegal opcode!");
8566 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8567 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8568 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8569 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8570 }
8571 // The operands aren't the same for thumb1 (no rotate operand).
8572 MCInst TmpInst;
8573 TmpInst.setOpcode(NewOpc);
8574 TmpInst.addOperand(Inst.getOperand(0));
8575 TmpInst.addOperand(Inst.getOperand(1));
8576 TmpInst.addOperand(Inst.getOperand(3));
8577 TmpInst.addOperand(Inst.getOperand(4));
8578 Inst = TmpInst;
8579 return true;
8580 }
8581 break;
8582 }
8583 case ARM::MOVsi: {
8584 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8585 // rrx shifts and asr/lsr of #32 is encoded as 0
8586 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8587 return false;
8588 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8589 // Shifting by zero is accepted as a vanilla 'MOVr'
8590 MCInst TmpInst;
8591 TmpInst.setOpcode(ARM::MOVr);
8592 TmpInst.addOperand(Inst.getOperand(0));
8593 TmpInst.addOperand(Inst.getOperand(1));
8594 TmpInst.addOperand(Inst.getOperand(3));
8595 TmpInst.addOperand(Inst.getOperand(4));
8596 TmpInst.addOperand(Inst.getOperand(5));
8597 Inst = TmpInst;
8598 return true;
8599 }
8600 return false;
8601 }
8602 case ARM::ANDrsi:
8603 case ARM::ORRrsi:
8604 case ARM::EORrsi:
8605 case ARM::BICrsi:
8606 case ARM::SUBrsi:
8607 case ARM::ADDrsi: {
8608 unsigned newOpc;
8609 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8610 if (SOpc == ARM_AM::rrx) return false;
8611 switch (Inst.getOpcode()) {
8612 default: llvm_unreachable("unexpected opcode!");
8613 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8614 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8615 case ARM::EORrsi: newOpc = ARM::EORrr; break;
8616 case ARM::BICrsi: newOpc = ARM::BICrr; break;
8617 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8618 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8619 }
8620 // If the shift is by zero, use the non-shifted instruction definition.
8621 // The exception is for right shifts, where 0 == 32
8622 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8623 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8624 MCInst TmpInst;
8625 TmpInst.setOpcode(newOpc);
8626 TmpInst.addOperand(Inst.getOperand(0));
8627 TmpInst.addOperand(Inst.getOperand(1));
8628 TmpInst.addOperand(Inst.getOperand(2));
8629 TmpInst.addOperand(Inst.getOperand(4));
8630 TmpInst.addOperand(Inst.getOperand(5));
8631 TmpInst.addOperand(Inst.getOperand(6));
8632 Inst = TmpInst;
8633 return true;
8634 }
8635 return false;
8636 }
8637 case ARM::ITasm:
8638 case ARM::t2IT: {
8639 // The mask bits for all but the first condition are represented as
8640 // the low bit of the condition code value implies 't'. We currently
8641 // always have 1 implies 't', so XOR toggle the bits if the low bit
8642 // of the condition code is zero.
8643 MCOperand &MO = Inst.getOperand(1);
8644 unsigned Mask = MO.getImm();
8645 unsigned OrigMask = Mask;
8646 unsigned TZ = countTrailingZeros(Mask);
8647 if ((Inst.getOperand(0).getImm() & 1) == 0) {
8648 assert(Mask && TZ <= 3 && "illegal IT mask value!");
8649 Mask ^= (0xE << TZ) & 0xF;
8650 }
8651 MO.setImm(Mask);
8652
8653 // Set up the IT block state according to the IT instruction we just
8654 // matched.
8655 assert(!inITBlock() && "nested IT blocks?!");
8656 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8657 ITState.Mask = OrigMask; // Use the original mask, not the updated one.
8658 ITState.CurPosition = 0;
8659 ITState.FirstCond = true;
8660 break;
8661 }
8662 case ARM::t2LSLrr:
8663 case ARM::t2LSRrr:
8664 case ARM::t2ASRrr:
8665 case ARM::t2SBCrr:
8666 case ARM::t2RORrr:
8667 case ARM::t2BICrr:
8668 {
8669 // Assemblers should use the narrow encodings of these instructions when permissible.
8670 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8671 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8672 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8673 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8674 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8675 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8676 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8677 ".w"))) {
8678 unsigned NewOpc;
8679 switch (Inst.getOpcode()) {
8680 default: llvm_unreachable("unexpected opcode");
8681 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8682 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8683 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8684 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8685 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8686 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8687 }
8688 MCInst TmpInst;
8689 TmpInst.setOpcode(NewOpc);
8690 TmpInst.addOperand(Inst.getOperand(0));
8691 TmpInst.addOperand(Inst.getOperand(5));
8692 TmpInst.addOperand(Inst.getOperand(1));
8693 TmpInst.addOperand(Inst.getOperand(2));
8694 TmpInst.addOperand(Inst.getOperand(3));
8695 TmpInst.addOperand(Inst.getOperand(4));
8696 Inst = TmpInst;
8697 return true;
8698 }
8699 return false;
8700 }
8701 case ARM::t2ANDrr:
8702 case ARM::t2EORrr:
8703 case ARM::t2ADCrr:
8704 case ARM::t2ORRrr:
8705 {
8706 // Assemblers should use the narrow encodings of these instructions when permissible.
8707 // These instructions are special in that they are commutable, so shorter encodings
8708 // are available more often.
8709 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8710 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8711 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8712 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8713 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8714 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8715 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8716 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8717 ".w"))) {
8718 unsigned NewOpc;
8719 switch (Inst.getOpcode()) {
8720 default: llvm_unreachable("unexpected opcode");
8721 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8722 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8723 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8724 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8725 }
8726 MCInst TmpInst;
8727 TmpInst.setOpcode(NewOpc);
8728 TmpInst.addOperand(Inst.getOperand(0));
8729 TmpInst.addOperand(Inst.getOperand(5));
8730 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8731 TmpInst.addOperand(Inst.getOperand(1));
8732 TmpInst.addOperand(Inst.getOperand(2));
8733 } else {
8734 TmpInst.addOperand(Inst.getOperand(2));
8735 TmpInst.addOperand(Inst.getOperand(1));
8736 }
8737 TmpInst.addOperand(Inst.getOperand(3));
8738 TmpInst.addOperand(Inst.getOperand(4));
8739 Inst = TmpInst;
8740 return true;
8741 }
8742 return false;
8743 }
8744 }
8745 return false;
8746 }
8747
checkTargetMatchPredicate(MCInst & Inst)8748 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8749 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8750 // suffix depending on whether they're in an IT block or not.
8751 unsigned Opc = Inst.getOpcode();
8752 const MCInstrDesc &MCID = MII.get(Opc);
8753 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8754 assert(MCID.hasOptionalDef() &&
8755 "optionally flag setting instruction missing optional def operand");
8756 assert(MCID.NumOperands == Inst.getNumOperands() &&
8757 "operand count mismatch!");
8758 // Find the optional-def operand (cc_out).
8759 unsigned OpNo;
8760 for (OpNo = 0;
8761 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8762 ++OpNo)
8763 ;
8764 // If we're parsing Thumb1, reject it completely.
8765 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8766 return Match_MnemonicFail;
8767 // If we're parsing Thumb2, which form is legal depends on whether we're
8768 // in an IT block.
8769 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8770 !inITBlock())
8771 return Match_RequiresITBlock;
8772 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8773 inITBlock())
8774 return Match_RequiresNotITBlock;
8775 } else if (isThumbOne()) {
8776 // Some high-register supporting Thumb1 encodings only allow both registers
8777 // to be from r0-r7 when in Thumb2.
8778 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
8779 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8780 isARMLowRegister(Inst.getOperand(2).getReg()))
8781 return Match_RequiresThumb2;
8782 // Others only require ARMv6 or later.
8783 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
8784 isARMLowRegister(Inst.getOperand(0).getReg()) &&
8785 isARMLowRegister(Inst.getOperand(1).getReg()))
8786 return Match_RequiresV6;
8787 }
8788
8789 for (unsigned I = 0; I < MCID.NumOperands; ++I)
8790 if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
8791 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
8792 if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
8793 return Match_RequiresV8;
8794 else if (Inst.getOperand(I).getReg() == ARM::PC)
8795 return Match_InvalidOperand;
8796 }
8797
8798 return Match_Success;
8799 }
8800
8801 namespace llvm {
IsCPSRDead(MCInst * Instr)8802 template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
8803 return true; // In an assembly source, no need to second-guess
8804 }
8805 }
8806
8807 static const char *getSubtargetFeatureName(uint64_t Val);
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)8808 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
8809 OperandVector &Operands,
8810 MCStreamer &Out, uint64_t &ErrorInfo,
8811 bool MatchingInlineAsm) {
8812 MCInst Inst;
8813 unsigned MatchResult;
8814
8815 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
8816 MatchingInlineAsm);
8817 switch (MatchResult) {
8818 case Match_Success:
8819 // Context sensitive operand constraints aren't handled by the matcher,
8820 // so check them here.
8821 if (validateInstruction(Inst, Operands)) {
8822 // Still progress the IT block, otherwise one wrong condition causes
8823 // nasty cascading errors.
8824 forwardITPosition();
8825 return true;
8826 }
8827
8828 { // processInstruction() updates inITBlock state, we need to save it away
8829 bool wasInITBlock = inITBlock();
8830
8831 // Some instructions need post-processing to, for example, tweak which
8832 // encoding is selected. Loop on it while changes happen so the
8833 // individual transformations can chain off each other. E.g.,
8834 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
8835 while (processInstruction(Inst, Operands, Out))
8836 ;
8837
8838 // Only after the instruction is fully processed, we can validate it
8839 if (wasInITBlock && hasV8Ops() && isThumb() &&
8840 !isV8EligibleForIT(&Inst)) {
8841 Warning(IDLoc, "deprecated instruction in IT block");
8842 }
8843 }
8844
8845 // Only move forward at the very end so that everything in validate
8846 // and process gets a consistent answer about whether we're in an IT
8847 // block.
8848 forwardITPosition();
8849
8850 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
8851 // doesn't actually encode.
8852 if (Inst.getOpcode() == ARM::ITasm)
8853 return false;
8854
8855 Inst.setLoc(IDLoc);
8856 Out.EmitInstruction(Inst, getSTI());
8857 return false;
8858 case Match_MissingFeature: {
8859 assert(ErrorInfo && "Unknown missing feature!");
8860 // Special case the error message for the very common case where only
8861 // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
8862 std::string Msg = "instruction requires:";
8863 uint64_t Mask = 1;
8864 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
8865 if (ErrorInfo & Mask) {
8866 Msg += " ";
8867 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
8868 }
8869 Mask <<= 1;
8870 }
8871 return Error(IDLoc, Msg);
8872 }
8873 case Match_InvalidOperand: {
8874 SMLoc ErrorLoc = IDLoc;
8875 if (ErrorInfo != ~0ULL) {
8876 if (ErrorInfo >= Operands.size())
8877 return Error(IDLoc, "too few operands for instruction");
8878
8879 ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8880 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8881 }
8882
8883 return Error(ErrorLoc, "invalid operand for instruction");
8884 }
8885 case Match_MnemonicFail:
8886 return Error(IDLoc, "invalid instruction",
8887 ((ARMOperand &)*Operands[0]).getLocRange());
8888 case Match_RequiresNotITBlock:
8889 return Error(IDLoc, "flag setting instruction only valid outside IT block");
8890 case Match_RequiresITBlock:
8891 return Error(IDLoc, "instruction only valid inside IT block");
8892 case Match_RequiresV6:
8893 return Error(IDLoc, "instruction variant requires ARMv6 or later");
8894 case Match_RequiresThumb2:
8895 return Error(IDLoc, "instruction variant requires Thumb2");
8896 case Match_RequiresV8:
8897 return Error(IDLoc, "instruction variant requires ARMv8 or later");
8898 case Match_ImmRange0_15: {
8899 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8900 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8901 return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
8902 }
8903 case Match_ImmRange0_239: {
8904 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8905 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8906 return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
8907 }
8908 case Match_AlignedMemoryRequiresNone:
8909 case Match_DupAlignedMemoryRequiresNone:
8910 case Match_AlignedMemoryRequires16:
8911 case Match_DupAlignedMemoryRequires16:
8912 case Match_AlignedMemoryRequires32:
8913 case Match_DupAlignedMemoryRequires32:
8914 case Match_AlignedMemoryRequires64:
8915 case Match_DupAlignedMemoryRequires64:
8916 case Match_AlignedMemoryRequires64or128:
8917 case Match_DupAlignedMemoryRequires64or128:
8918 case Match_AlignedMemoryRequires64or128or256:
8919 {
8920 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
8921 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8922 switch (MatchResult) {
8923 default:
8924 llvm_unreachable("Missing Match_Aligned type");
8925 case Match_AlignedMemoryRequiresNone:
8926 case Match_DupAlignedMemoryRequiresNone:
8927 return Error(ErrorLoc, "alignment must be omitted");
8928 case Match_AlignedMemoryRequires16:
8929 case Match_DupAlignedMemoryRequires16:
8930 return Error(ErrorLoc, "alignment must be 16 or omitted");
8931 case Match_AlignedMemoryRequires32:
8932 case Match_DupAlignedMemoryRequires32:
8933 return Error(ErrorLoc, "alignment must be 32 or omitted");
8934 case Match_AlignedMemoryRequires64:
8935 case Match_DupAlignedMemoryRequires64:
8936 return Error(ErrorLoc, "alignment must be 64 or omitted");
8937 case Match_AlignedMemoryRequires64or128:
8938 case Match_DupAlignedMemoryRequires64or128:
8939 return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
8940 case Match_AlignedMemoryRequires64or128or256:
8941 return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
8942 }
8943 }
8944 }
8945
8946 llvm_unreachable("Implement any new match types added!");
8947 }
8948
8949 /// parseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)8950 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
8951 const MCObjectFileInfo::Environment Format =
8952 getContext().getObjectFileInfo()->getObjectFileType();
8953 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8954 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
8955
8956 StringRef IDVal = DirectiveID.getIdentifier();
8957 if (IDVal == ".word")
8958 return parseLiteralValues(4, DirectiveID.getLoc());
8959 else if (IDVal == ".short" || IDVal == ".hword")
8960 return parseLiteralValues(2, DirectiveID.getLoc());
8961 else if (IDVal == ".thumb")
8962 return parseDirectiveThumb(DirectiveID.getLoc());
8963 else if (IDVal == ".arm")
8964 return parseDirectiveARM(DirectiveID.getLoc());
8965 else if (IDVal == ".thumb_func")
8966 return parseDirectiveThumbFunc(DirectiveID.getLoc());
8967 else if (IDVal == ".code")
8968 return parseDirectiveCode(DirectiveID.getLoc());
8969 else if (IDVal == ".syntax")
8970 return parseDirectiveSyntax(DirectiveID.getLoc());
8971 else if (IDVal == ".unreq")
8972 return parseDirectiveUnreq(DirectiveID.getLoc());
8973 else if (IDVal == ".fnend")
8974 return parseDirectiveFnEnd(DirectiveID.getLoc());
8975 else if (IDVal == ".cantunwind")
8976 return parseDirectiveCantUnwind(DirectiveID.getLoc());
8977 else if (IDVal == ".personality")
8978 return parseDirectivePersonality(DirectiveID.getLoc());
8979 else if (IDVal == ".handlerdata")
8980 return parseDirectiveHandlerData(DirectiveID.getLoc());
8981 else if (IDVal == ".setfp")
8982 return parseDirectiveSetFP(DirectiveID.getLoc());
8983 else if (IDVal == ".pad")
8984 return parseDirectivePad(DirectiveID.getLoc());
8985 else if (IDVal == ".save")
8986 return parseDirectiveRegSave(DirectiveID.getLoc(), false);
8987 else if (IDVal == ".vsave")
8988 return parseDirectiveRegSave(DirectiveID.getLoc(), true);
8989 else if (IDVal == ".ltorg" || IDVal == ".pool")
8990 return parseDirectiveLtorg(DirectiveID.getLoc());
8991 else if (IDVal == ".even")
8992 return parseDirectiveEven(DirectiveID.getLoc());
8993 else if (IDVal == ".personalityindex")
8994 return parseDirectivePersonalityIndex(DirectiveID.getLoc());
8995 else if (IDVal == ".unwind_raw")
8996 return parseDirectiveUnwindRaw(DirectiveID.getLoc());
8997 else if (IDVal == ".movsp")
8998 return parseDirectiveMovSP(DirectiveID.getLoc());
8999 else if (IDVal == ".arch_extension")
9000 return parseDirectiveArchExtension(DirectiveID.getLoc());
9001 else if (IDVal == ".align")
9002 return parseDirectiveAlign(DirectiveID.getLoc());
9003 else if (IDVal == ".thumb_set")
9004 return parseDirectiveThumbSet(DirectiveID.getLoc());
9005
9006 if (!IsMachO && !IsCOFF) {
9007 if (IDVal == ".arch")
9008 return parseDirectiveArch(DirectiveID.getLoc());
9009 else if (IDVal == ".cpu")
9010 return parseDirectiveCPU(DirectiveID.getLoc());
9011 else if (IDVal == ".eabi_attribute")
9012 return parseDirectiveEabiAttr(DirectiveID.getLoc());
9013 else if (IDVal == ".fpu")
9014 return parseDirectiveFPU(DirectiveID.getLoc());
9015 else if (IDVal == ".fnstart")
9016 return parseDirectiveFnStart(DirectiveID.getLoc());
9017 else if (IDVal == ".inst")
9018 return parseDirectiveInst(DirectiveID.getLoc());
9019 else if (IDVal == ".inst.n")
9020 return parseDirectiveInst(DirectiveID.getLoc(), 'n');
9021 else if (IDVal == ".inst.w")
9022 return parseDirectiveInst(DirectiveID.getLoc(), 'w');
9023 else if (IDVal == ".object_arch")
9024 return parseDirectiveObjectArch(DirectiveID.getLoc());
9025 else if (IDVal == ".tlsdescseq")
9026 return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9027 }
9028
9029 return true;
9030 }
9031
9032 /// parseLiteralValues
9033 /// ::= .hword expression [, expression]*
9034 /// ::= .short expression [, expression]*
9035 /// ::= .word expression [, expression]*
parseLiteralValues(unsigned Size,SMLoc L)9036 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
9037 MCAsmParser &Parser = getParser();
9038 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9039 for (;;) {
9040 const MCExpr *Value;
9041 if (getParser().parseExpression(Value)) {
9042 Parser.eatToEndOfStatement();
9043 return false;
9044 }
9045
9046 getParser().getStreamer().EmitValue(Value, Size, L);
9047
9048 if (getLexer().is(AsmToken::EndOfStatement))
9049 break;
9050
9051 // FIXME: Improve diagnostic.
9052 if (getLexer().isNot(AsmToken::Comma)) {
9053 Error(L, "unexpected token in directive");
9054 return false;
9055 }
9056 Parser.Lex();
9057 }
9058 }
9059
9060 Parser.Lex();
9061 return false;
9062 }
9063
9064 /// parseDirectiveThumb
9065 /// ::= .thumb
parseDirectiveThumb(SMLoc L)9066 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
9067 MCAsmParser &Parser = getParser();
9068 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9069 Error(L, "unexpected token in directive");
9070 return false;
9071 }
9072 Parser.Lex();
9073
9074 if (!hasThumb()) {
9075 Error(L, "target does not support Thumb mode");
9076 return false;
9077 }
9078
9079 if (!isThumb())
9080 SwitchMode();
9081
9082 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9083 return false;
9084 }
9085
9086 /// parseDirectiveARM
9087 /// ::= .arm
parseDirectiveARM(SMLoc L)9088 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
9089 MCAsmParser &Parser = getParser();
9090 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9091 Error(L, "unexpected token in directive");
9092 return false;
9093 }
9094 Parser.Lex();
9095
9096 if (!hasARM()) {
9097 Error(L, "target does not support ARM mode");
9098 return false;
9099 }
9100
9101 if (isThumb())
9102 SwitchMode();
9103
9104 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9105 return false;
9106 }
9107
onLabelParsed(MCSymbol * Symbol)9108 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
9109 if (NextSymbolIsThumb) {
9110 getParser().getStreamer().EmitThumbFunc(Symbol);
9111 NextSymbolIsThumb = false;
9112 }
9113 }
9114
9115 /// parseDirectiveThumbFunc
9116 /// ::= .thumbfunc symbol_name
parseDirectiveThumbFunc(SMLoc L)9117 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
9118 MCAsmParser &Parser = getParser();
9119 const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
9120 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9121
9122 // Darwin asm has (optionally) function name after .thumb_func direction
9123 // ELF doesn't
9124 if (IsMachO) {
9125 const AsmToken &Tok = Parser.getTok();
9126 if (Tok.isNot(AsmToken::EndOfStatement)) {
9127 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
9128 Error(L, "unexpected token in .thumb_func directive");
9129 return false;
9130 }
9131
9132 MCSymbol *Func =
9133 getParser().getContext().getOrCreateSymbol(Tok.getIdentifier());
9134 getParser().getStreamer().EmitThumbFunc(Func);
9135 Parser.Lex(); // Consume the identifier token.
9136 return false;
9137 }
9138 }
9139
9140 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9141 Error(Parser.getTok().getLoc(), "unexpected token in directive");
9142 Parser.eatToEndOfStatement();
9143 return false;
9144 }
9145
9146 NextSymbolIsThumb = true;
9147 return false;
9148 }
9149
9150 /// parseDirectiveSyntax
9151 /// ::= .syntax unified | divided
parseDirectiveSyntax(SMLoc L)9152 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
9153 MCAsmParser &Parser = getParser();
9154 const AsmToken &Tok = Parser.getTok();
9155 if (Tok.isNot(AsmToken::Identifier)) {
9156 Error(L, "unexpected token in .syntax directive");
9157 return false;
9158 }
9159
9160 StringRef Mode = Tok.getString();
9161 if (Mode == "unified" || Mode == "UNIFIED") {
9162 Parser.Lex();
9163 } else if (Mode == "divided" || Mode == "DIVIDED") {
9164 Error(L, "'.syntax divided' arm asssembly not supported");
9165 return false;
9166 } else {
9167 Error(L, "unrecognized syntax mode in .syntax directive");
9168 return false;
9169 }
9170
9171 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9172 Error(Parser.getTok().getLoc(), "unexpected token in directive");
9173 return false;
9174 }
9175 Parser.Lex();
9176
9177 // TODO tell the MC streamer the mode
9178 // getParser().getStreamer().Emit???();
9179 return false;
9180 }
9181
9182 /// parseDirectiveCode
9183 /// ::= .code 16 | 32
parseDirectiveCode(SMLoc L)9184 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
9185 MCAsmParser &Parser = getParser();
9186 const AsmToken &Tok = Parser.getTok();
9187 if (Tok.isNot(AsmToken::Integer)) {
9188 Error(L, "unexpected token in .code directive");
9189 return false;
9190 }
9191 int64_t Val = Parser.getTok().getIntVal();
9192 if (Val != 16 && Val != 32) {
9193 Error(L, "invalid operand to .code directive");
9194 return false;
9195 }
9196 Parser.Lex();
9197
9198 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9199 Error(Parser.getTok().getLoc(), "unexpected token in directive");
9200 return false;
9201 }
9202 Parser.Lex();
9203
9204 if (Val == 16) {
9205 if (!hasThumb()) {
9206 Error(L, "target does not support Thumb mode");
9207 return false;
9208 }
9209
9210 if (!isThumb())
9211 SwitchMode();
9212 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9213 } else {
9214 if (!hasARM()) {
9215 Error(L, "target does not support ARM mode");
9216 return false;
9217 }
9218
9219 if (isThumb())
9220 SwitchMode();
9221 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9222 }
9223
9224 return false;
9225 }
9226
9227 /// parseDirectiveReq
9228 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)9229 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
9230 MCAsmParser &Parser = getParser();
9231 Parser.Lex(); // Eat the '.req' token.
9232 unsigned Reg;
9233 SMLoc SRegLoc, ERegLoc;
9234 if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
9235 Parser.eatToEndOfStatement();
9236 Error(SRegLoc, "register name expected");
9237 return false;
9238 }
9239
9240 // Shouldn't be anything else.
9241 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
9242 Parser.eatToEndOfStatement();
9243 Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
9244 return false;
9245 }
9246
9247 Parser.Lex(); // Consume the EndOfStatement
9248
9249 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) {
9250 Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
9251 return false;
9252 }
9253
9254 return false;
9255 }
9256
9257 /// parseDirectiveUneq
9258 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)9259 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9260 MCAsmParser &Parser = getParser();
9261 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9262 Parser.eatToEndOfStatement();
9263 Error(L, "unexpected input in .unreq directive.");
9264 return false;
9265 }
9266 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9267 Parser.Lex(); // Eat the identifier.
9268 return false;
9269 }
9270
9271 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
9272 // before, if supported by the new target, or emit mapping symbols for the mode
9273 // switch.
FixModeAfterArchChange(bool WasThumb,SMLoc Loc)9274 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
9275 if (WasThumb != isThumb()) {
9276 if (WasThumb && hasThumb()) {
9277 // Stay in Thumb mode
9278 SwitchMode();
9279 } else if (!WasThumb && hasARM()) {
9280 // Stay in ARM mode
9281 SwitchMode();
9282 } else {
9283 // Mode switch forced, because the new arch doesn't support the old mode.
9284 getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
9285 : MCAF_Code32);
9286 // Warn about the implcit mode switch. GAS does not switch modes here,
9287 // but instead stays in the old mode, reporting an error on any following
9288 // instructions as the mode does not exist on the target.
9289 Warning(Loc, Twine("new target does not support ") +
9290 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
9291 (!WasThumb ? "thumb" : "arm") + " mode");
9292 }
9293 }
9294 }
9295
9296 /// parseDirectiveArch
9297 /// ::= .arch token
parseDirectiveArch(SMLoc L)9298 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9299 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9300
9301 unsigned ID = ARM::parseArch(Arch);
9302
9303 if (ID == ARM::AK_INVALID) {
9304 Error(L, "Unknown arch name");
9305 return false;
9306 }
9307
9308 bool WasThumb = isThumb();
9309 Triple T;
9310 MCSubtargetInfo &STI = copySTI();
9311 STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9312 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9313 FixModeAfterArchChange(WasThumb, L);
9314
9315 getTargetStreamer().emitArch(ID);
9316 return false;
9317 }
9318
9319 /// parseDirectiveEabiAttr
9320 /// ::= .eabi_attribute int, int [, "str"]
9321 /// ::= .eabi_attribute Tag_name, int [, "str"]
parseDirectiveEabiAttr(SMLoc L)9322 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9323 MCAsmParser &Parser = getParser();
9324 int64_t Tag;
9325 SMLoc TagLoc;
9326 TagLoc = Parser.getTok().getLoc();
9327 if (Parser.getTok().is(AsmToken::Identifier)) {
9328 StringRef Name = Parser.getTok().getIdentifier();
9329 Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9330 if (Tag == -1) {
9331 Error(TagLoc, "attribute name not recognised: " + Name);
9332 Parser.eatToEndOfStatement();
9333 return false;
9334 }
9335 Parser.Lex();
9336 } else {
9337 const MCExpr *AttrExpr;
9338
9339 TagLoc = Parser.getTok().getLoc();
9340 if (Parser.parseExpression(AttrExpr)) {
9341 Parser.eatToEndOfStatement();
9342 return false;
9343 }
9344
9345 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9346 if (!CE) {
9347 Error(TagLoc, "expected numeric constant");
9348 Parser.eatToEndOfStatement();
9349 return false;
9350 }
9351
9352 Tag = CE->getValue();
9353 }
9354
9355 if (Parser.getTok().isNot(AsmToken::Comma)) {
9356 Error(Parser.getTok().getLoc(), "comma expected");
9357 Parser.eatToEndOfStatement();
9358 return false;
9359 }
9360 Parser.Lex(); // skip comma
9361
9362 StringRef StringValue = "";
9363 bool IsStringValue = false;
9364
9365 int64_t IntegerValue = 0;
9366 bool IsIntegerValue = false;
9367
9368 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9369 IsStringValue = true;
9370 else if (Tag == ARMBuildAttrs::compatibility) {
9371 IsStringValue = true;
9372 IsIntegerValue = true;
9373 } else if (Tag < 32 || Tag % 2 == 0)
9374 IsIntegerValue = true;
9375 else if (Tag % 2 == 1)
9376 IsStringValue = true;
9377 else
9378 llvm_unreachable("invalid tag type");
9379
9380 if (IsIntegerValue) {
9381 const MCExpr *ValueExpr;
9382 SMLoc ValueExprLoc = Parser.getTok().getLoc();
9383 if (Parser.parseExpression(ValueExpr)) {
9384 Parser.eatToEndOfStatement();
9385 return false;
9386 }
9387
9388 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9389 if (!CE) {
9390 Error(ValueExprLoc, "expected numeric constant");
9391 Parser.eatToEndOfStatement();
9392 return false;
9393 }
9394
9395 IntegerValue = CE->getValue();
9396 }
9397
9398 if (Tag == ARMBuildAttrs::compatibility) {
9399 if (Parser.getTok().isNot(AsmToken::Comma))
9400 IsStringValue = false;
9401 if (Parser.getTok().isNot(AsmToken::Comma)) {
9402 Error(Parser.getTok().getLoc(), "comma expected");
9403 Parser.eatToEndOfStatement();
9404 return false;
9405 } else {
9406 Parser.Lex();
9407 }
9408 }
9409
9410 if (IsStringValue) {
9411 if (Parser.getTok().isNot(AsmToken::String)) {
9412 Error(Parser.getTok().getLoc(), "bad string constant");
9413 Parser.eatToEndOfStatement();
9414 return false;
9415 }
9416
9417 StringValue = Parser.getTok().getStringContents();
9418 Parser.Lex();
9419 }
9420
9421 if (IsIntegerValue && IsStringValue) {
9422 assert(Tag == ARMBuildAttrs::compatibility);
9423 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9424 } else if (IsIntegerValue)
9425 getTargetStreamer().emitAttribute(Tag, IntegerValue);
9426 else if (IsStringValue)
9427 getTargetStreamer().emitTextAttribute(Tag, StringValue);
9428 return false;
9429 }
9430
9431 /// parseDirectiveCPU
9432 /// ::= .cpu str
parseDirectiveCPU(SMLoc L)9433 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9434 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9435 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9436
9437 // FIXME: This is using table-gen data, but should be moved to
9438 // ARMTargetParser once that is table-gen'd.
9439 if (!getSTI().isCPUStringValid(CPU)) {
9440 Error(L, "Unknown CPU name");
9441 return false;
9442 }
9443
9444 bool WasThumb = isThumb();
9445 MCSubtargetInfo &STI = copySTI();
9446 STI.setDefaultFeatures(CPU, "");
9447 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9448 FixModeAfterArchChange(WasThumb, L);
9449
9450 return false;
9451 }
9452 /// parseDirectiveFPU
9453 /// ::= .fpu str
parseDirectiveFPU(SMLoc L)9454 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9455 SMLoc FPUNameLoc = getTok().getLoc();
9456 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9457
9458 unsigned ID = ARM::parseFPU(FPU);
9459 std::vector<const char *> Features;
9460 if (!ARM::getFPUFeatures(ID, Features)) {
9461 Error(FPUNameLoc, "Unknown FPU name");
9462 return false;
9463 }
9464
9465 MCSubtargetInfo &STI = copySTI();
9466 for (auto Feature : Features)
9467 STI.ApplyFeatureFlag(Feature);
9468 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9469
9470 getTargetStreamer().emitFPU(ID);
9471 return false;
9472 }
9473
9474 /// parseDirectiveFnStart
9475 /// ::= .fnstart
parseDirectiveFnStart(SMLoc L)9476 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9477 if (UC.hasFnStart()) {
9478 Error(L, ".fnstart starts before the end of previous one");
9479 UC.emitFnStartLocNotes();
9480 return false;
9481 }
9482
9483 // Reset the unwind directives parser state
9484 UC.reset();
9485
9486 getTargetStreamer().emitFnStart();
9487
9488 UC.recordFnStart(L);
9489 return false;
9490 }
9491
9492 /// parseDirectiveFnEnd
9493 /// ::= .fnend
parseDirectiveFnEnd(SMLoc L)9494 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9495 // Check the ordering of unwind directives
9496 if (!UC.hasFnStart()) {
9497 Error(L, ".fnstart must precede .fnend directive");
9498 return false;
9499 }
9500
9501 // Reset the unwind directives parser state
9502 getTargetStreamer().emitFnEnd();
9503
9504 UC.reset();
9505 return false;
9506 }
9507
9508 /// parseDirectiveCantUnwind
9509 /// ::= .cantunwind
parseDirectiveCantUnwind(SMLoc L)9510 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9511 UC.recordCantUnwind(L);
9512
9513 // Check the ordering of unwind directives
9514 if (!UC.hasFnStart()) {
9515 Error(L, ".fnstart must precede .cantunwind directive");
9516 return false;
9517 }
9518 if (UC.hasHandlerData()) {
9519 Error(L, ".cantunwind can't be used with .handlerdata directive");
9520 UC.emitHandlerDataLocNotes();
9521 return false;
9522 }
9523 if (UC.hasPersonality()) {
9524 Error(L, ".cantunwind can't be used with .personality directive");
9525 UC.emitPersonalityLocNotes();
9526 return false;
9527 }
9528
9529 getTargetStreamer().emitCantUnwind();
9530 return false;
9531 }
9532
9533 /// parseDirectivePersonality
9534 /// ::= .personality name
parseDirectivePersonality(SMLoc L)9535 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9536 MCAsmParser &Parser = getParser();
9537 bool HasExistingPersonality = UC.hasPersonality();
9538
9539 UC.recordPersonality(L);
9540
9541 // Check the ordering of unwind directives
9542 if (!UC.hasFnStart()) {
9543 Error(L, ".fnstart must precede .personality directive");
9544 return false;
9545 }
9546 if (UC.cantUnwind()) {
9547 Error(L, ".personality can't be used with .cantunwind directive");
9548 UC.emitCantUnwindLocNotes();
9549 return false;
9550 }
9551 if (UC.hasHandlerData()) {
9552 Error(L, ".personality must precede .handlerdata directive");
9553 UC.emitHandlerDataLocNotes();
9554 return false;
9555 }
9556 if (HasExistingPersonality) {
9557 Parser.eatToEndOfStatement();
9558 Error(L, "multiple personality directives");
9559 UC.emitPersonalityLocNotes();
9560 return false;
9561 }
9562
9563 // Parse the name of the personality routine
9564 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9565 Parser.eatToEndOfStatement();
9566 Error(L, "unexpected input in .personality directive.");
9567 return false;
9568 }
9569 StringRef Name(Parser.getTok().getIdentifier());
9570 Parser.Lex();
9571
9572 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9573 getTargetStreamer().emitPersonality(PR);
9574 return false;
9575 }
9576
9577 /// parseDirectiveHandlerData
9578 /// ::= .handlerdata
parseDirectiveHandlerData(SMLoc L)9579 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9580 UC.recordHandlerData(L);
9581
9582 // Check the ordering of unwind directives
9583 if (!UC.hasFnStart()) {
9584 Error(L, ".fnstart must precede .personality directive");
9585 return false;
9586 }
9587 if (UC.cantUnwind()) {
9588 Error(L, ".handlerdata can't be used with .cantunwind directive");
9589 UC.emitCantUnwindLocNotes();
9590 return false;
9591 }
9592
9593 getTargetStreamer().emitHandlerData();
9594 return false;
9595 }
9596
9597 /// parseDirectiveSetFP
9598 /// ::= .setfp fpreg, spreg [, offset]
parseDirectiveSetFP(SMLoc L)9599 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9600 MCAsmParser &Parser = getParser();
9601 // Check the ordering of unwind directives
9602 if (!UC.hasFnStart()) {
9603 Error(L, ".fnstart must precede .setfp directive");
9604 return false;
9605 }
9606 if (UC.hasHandlerData()) {
9607 Error(L, ".setfp must precede .handlerdata directive");
9608 return false;
9609 }
9610
9611 // Parse fpreg
9612 SMLoc FPRegLoc = Parser.getTok().getLoc();
9613 int FPReg = tryParseRegister();
9614 if (FPReg == -1) {
9615 Error(FPRegLoc, "frame pointer register expected");
9616 return false;
9617 }
9618
9619 // Consume comma
9620 if (Parser.getTok().isNot(AsmToken::Comma)) {
9621 Error(Parser.getTok().getLoc(), "comma expected");
9622 return false;
9623 }
9624 Parser.Lex(); // skip comma
9625
9626 // Parse spreg
9627 SMLoc SPRegLoc = Parser.getTok().getLoc();
9628 int SPReg = tryParseRegister();
9629 if (SPReg == -1) {
9630 Error(SPRegLoc, "stack pointer register expected");
9631 return false;
9632 }
9633
9634 if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
9635 Error(SPRegLoc, "register should be either $sp or the latest fp register");
9636 return false;
9637 }
9638
9639 // Update the frame pointer register
9640 UC.saveFPReg(FPReg);
9641
9642 // Parse offset
9643 int64_t Offset = 0;
9644 if (Parser.getTok().is(AsmToken::Comma)) {
9645 Parser.Lex(); // skip comma
9646
9647 if (Parser.getTok().isNot(AsmToken::Hash) &&
9648 Parser.getTok().isNot(AsmToken::Dollar)) {
9649 Error(Parser.getTok().getLoc(), "'#' expected");
9650 return false;
9651 }
9652 Parser.Lex(); // skip hash token.
9653
9654 const MCExpr *OffsetExpr;
9655 SMLoc ExLoc = Parser.getTok().getLoc();
9656 SMLoc EndLoc;
9657 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9658 Error(ExLoc, "malformed setfp offset");
9659 return false;
9660 }
9661 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9662 if (!CE) {
9663 Error(ExLoc, "setfp offset must be an immediate");
9664 return false;
9665 }
9666
9667 Offset = CE->getValue();
9668 }
9669
9670 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9671 static_cast<unsigned>(SPReg), Offset);
9672 return false;
9673 }
9674
9675 /// parseDirective
9676 /// ::= .pad offset
parseDirectivePad(SMLoc L)9677 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9678 MCAsmParser &Parser = getParser();
9679 // Check the ordering of unwind directives
9680 if (!UC.hasFnStart()) {
9681 Error(L, ".fnstart must precede .pad directive");
9682 return false;
9683 }
9684 if (UC.hasHandlerData()) {
9685 Error(L, ".pad must precede .handlerdata directive");
9686 return false;
9687 }
9688
9689 // Parse the offset
9690 if (Parser.getTok().isNot(AsmToken::Hash) &&
9691 Parser.getTok().isNot(AsmToken::Dollar)) {
9692 Error(Parser.getTok().getLoc(), "'#' expected");
9693 return false;
9694 }
9695 Parser.Lex(); // skip hash token.
9696
9697 const MCExpr *OffsetExpr;
9698 SMLoc ExLoc = Parser.getTok().getLoc();
9699 SMLoc EndLoc;
9700 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9701 Error(ExLoc, "malformed pad offset");
9702 return false;
9703 }
9704 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9705 if (!CE) {
9706 Error(ExLoc, "pad offset must be an immediate");
9707 return false;
9708 }
9709
9710 getTargetStreamer().emitPad(CE->getValue());
9711 return false;
9712 }
9713
9714 /// parseDirectiveRegSave
9715 /// ::= .save { registers }
9716 /// ::= .vsave { registers }
parseDirectiveRegSave(SMLoc L,bool IsVector)9717 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9718 // Check the ordering of unwind directives
9719 if (!UC.hasFnStart()) {
9720 Error(L, ".fnstart must precede .save or .vsave directives");
9721 return false;
9722 }
9723 if (UC.hasHandlerData()) {
9724 Error(L, ".save or .vsave must precede .handlerdata directive");
9725 return false;
9726 }
9727
9728 // RAII object to make sure parsed operands are deleted.
9729 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
9730
9731 // Parse the register list
9732 if (parseRegisterList(Operands))
9733 return false;
9734 ARMOperand &Op = (ARMOperand &)*Operands[0];
9735 if (!IsVector && !Op.isRegList()) {
9736 Error(L, ".save expects GPR registers");
9737 return false;
9738 }
9739 if (IsVector && !Op.isDPRRegList()) {
9740 Error(L, ".vsave expects DPR registers");
9741 return false;
9742 }
9743
9744 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
9745 return false;
9746 }
9747
9748 /// parseDirectiveInst
9749 /// ::= .inst opcode [, ...]
9750 /// ::= .inst.n opcode [, ...]
9751 /// ::= .inst.w opcode [, ...]
parseDirectiveInst(SMLoc Loc,char Suffix)9752 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
9753 MCAsmParser &Parser = getParser();
9754 int Width;
9755
9756 if (isThumb()) {
9757 switch (Suffix) {
9758 case 'n':
9759 Width = 2;
9760 break;
9761 case 'w':
9762 Width = 4;
9763 break;
9764 default:
9765 Parser.eatToEndOfStatement();
9766 Error(Loc, "cannot determine Thumb instruction size, "
9767 "use inst.n/inst.w instead");
9768 return false;
9769 }
9770 } else {
9771 if (Suffix) {
9772 Parser.eatToEndOfStatement();
9773 Error(Loc, "width suffixes are invalid in ARM mode");
9774 return false;
9775 }
9776 Width = 4;
9777 }
9778
9779 if (getLexer().is(AsmToken::EndOfStatement)) {
9780 Parser.eatToEndOfStatement();
9781 Error(Loc, "expected expression following directive");
9782 return false;
9783 }
9784
9785 for (;;) {
9786 const MCExpr *Expr;
9787
9788 if (getParser().parseExpression(Expr)) {
9789 Error(Loc, "expected expression");
9790 return false;
9791 }
9792
9793 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9794 if (!Value) {
9795 Error(Loc, "expected constant expression");
9796 return false;
9797 }
9798
9799 switch (Width) {
9800 case 2:
9801 if (Value->getValue() > 0xffff) {
9802 Error(Loc, "inst.n operand is too big, use inst.w instead");
9803 return false;
9804 }
9805 break;
9806 case 4:
9807 if (Value->getValue() > 0xffffffff) {
9808 Error(Loc,
9809 StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
9810 return false;
9811 }
9812 break;
9813 default:
9814 llvm_unreachable("only supported widths are 2 and 4");
9815 }
9816
9817 getTargetStreamer().emitInst(Value->getValue(), Suffix);
9818
9819 if (getLexer().is(AsmToken::EndOfStatement))
9820 break;
9821
9822 if (getLexer().isNot(AsmToken::Comma)) {
9823 Error(Loc, "unexpected token in directive");
9824 return false;
9825 }
9826
9827 Parser.Lex();
9828 }
9829
9830 Parser.Lex();
9831 return false;
9832 }
9833
9834 /// parseDirectiveLtorg
9835 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)9836 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9837 getTargetStreamer().emitCurrentConstantPool();
9838 return false;
9839 }
9840
parseDirectiveEven(SMLoc L)9841 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9842 const MCSection *Section = getStreamer().getCurrentSection().first;
9843
9844 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9845 TokError("unexpected token in directive");
9846 return false;
9847 }
9848
9849 if (!Section) {
9850 getStreamer().InitSections(false);
9851 Section = getStreamer().getCurrentSection().first;
9852 }
9853
9854 assert(Section && "must have section to emit alignment");
9855 if (Section->UseCodeAlign())
9856 getStreamer().EmitCodeAlignment(2);
9857 else
9858 getStreamer().EmitValueToAlignment(2);
9859
9860 return false;
9861 }
9862
9863 /// parseDirectivePersonalityIndex
9864 /// ::= .personalityindex index
parseDirectivePersonalityIndex(SMLoc L)9865 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9866 MCAsmParser &Parser = getParser();
9867 bool HasExistingPersonality = UC.hasPersonality();
9868
9869 UC.recordPersonalityIndex(L);
9870
9871 if (!UC.hasFnStart()) {
9872 Parser.eatToEndOfStatement();
9873 Error(L, ".fnstart must precede .personalityindex directive");
9874 return false;
9875 }
9876 if (UC.cantUnwind()) {
9877 Parser.eatToEndOfStatement();
9878 Error(L, ".personalityindex cannot be used with .cantunwind");
9879 UC.emitCantUnwindLocNotes();
9880 return false;
9881 }
9882 if (UC.hasHandlerData()) {
9883 Parser.eatToEndOfStatement();
9884 Error(L, ".personalityindex must precede .handlerdata directive");
9885 UC.emitHandlerDataLocNotes();
9886 return false;
9887 }
9888 if (HasExistingPersonality) {
9889 Parser.eatToEndOfStatement();
9890 Error(L, "multiple personality directives");
9891 UC.emitPersonalityLocNotes();
9892 return false;
9893 }
9894
9895 const MCExpr *IndexExpression;
9896 SMLoc IndexLoc = Parser.getTok().getLoc();
9897 if (Parser.parseExpression(IndexExpression)) {
9898 Parser.eatToEndOfStatement();
9899 return false;
9900 }
9901
9902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
9903 if (!CE) {
9904 Parser.eatToEndOfStatement();
9905 Error(IndexLoc, "index must be a constant number");
9906 return false;
9907 }
9908 if (CE->getValue() < 0 ||
9909 CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
9910 Parser.eatToEndOfStatement();
9911 Error(IndexLoc, "personality routine index should be in range [0-3]");
9912 return false;
9913 }
9914
9915 getTargetStreamer().emitPersonalityIndex(CE->getValue());
9916 return false;
9917 }
9918
9919 /// parseDirectiveUnwindRaw
9920 /// ::= .unwind_raw offset, opcode [, opcode...]
parseDirectiveUnwindRaw(SMLoc L)9921 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
9922 MCAsmParser &Parser = getParser();
9923 if (!UC.hasFnStart()) {
9924 Parser.eatToEndOfStatement();
9925 Error(L, ".fnstart must precede .unwind_raw directives");
9926 return false;
9927 }
9928
9929 int64_t StackOffset;
9930
9931 const MCExpr *OffsetExpr;
9932 SMLoc OffsetLoc = getLexer().getLoc();
9933 if (getLexer().is(AsmToken::EndOfStatement) ||
9934 getParser().parseExpression(OffsetExpr)) {
9935 Error(OffsetLoc, "expected expression");
9936 Parser.eatToEndOfStatement();
9937 return false;
9938 }
9939
9940 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9941 if (!CE) {
9942 Error(OffsetLoc, "offset must be a constant");
9943 Parser.eatToEndOfStatement();
9944 return false;
9945 }
9946
9947 StackOffset = CE->getValue();
9948
9949 if (getLexer().isNot(AsmToken::Comma)) {
9950 Error(getLexer().getLoc(), "expected comma");
9951 Parser.eatToEndOfStatement();
9952 return false;
9953 }
9954 Parser.Lex();
9955
9956 SmallVector<uint8_t, 16> Opcodes;
9957 for (;;) {
9958 const MCExpr *OE;
9959
9960 SMLoc OpcodeLoc = getLexer().getLoc();
9961 if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
9962 Error(OpcodeLoc, "expected opcode expression");
9963 Parser.eatToEndOfStatement();
9964 return false;
9965 }
9966
9967 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
9968 if (!OC) {
9969 Error(OpcodeLoc, "opcode value must be a constant");
9970 Parser.eatToEndOfStatement();
9971 return false;
9972 }
9973
9974 const int64_t Opcode = OC->getValue();
9975 if (Opcode & ~0xff) {
9976 Error(OpcodeLoc, "invalid opcode");
9977 Parser.eatToEndOfStatement();
9978 return false;
9979 }
9980
9981 Opcodes.push_back(uint8_t(Opcode));
9982
9983 if (getLexer().is(AsmToken::EndOfStatement))
9984 break;
9985
9986 if (getLexer().isNot(AsmToken::Comma)) {
9987 Error(getLexer().getLoc(), "unexpected token in directive");
9988 Parser.eatToEndOfStatement();
9989 return false;
9990 }
9991
9992 Parser.Lex();
9993 }
9994
9995 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9996
9997 Parser.Lex();
9998 return false;
9999 }
10000
10001 /// parseDirectiveTLSDescSeq
10002 /// ::= .tlsdescseq tls-variable
parseDirectiveTLSDescSeq(SMLoc L)10003 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10004 MCAsmParser &Parser = getParser();
10005
10006 if (getLexer().isNot(AsmToken::Identifier)) {
10007 TokError("expected variable after '.tlsdescseq' directive");
10008 Parser.eatToEndOfStatement();
10009 return false;
10010 }
10011
10012 const MCSymbolRefExpr *SRE =
10013 MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10014 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10015 Lex();
10016
10017 if (getLexer().isNot(AsmToken::EndOfStatement)) {
10018 Error(Parser.getTok().getLoc(), "unexpected token");
10019 Parser.eatToEndOfStatement();
10020 return false;
10021 }
10022
10023 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10024 return false;
10025 }
10026
10027 /// parseDirectiveMovSP
10028 /// ::= .movsp reg [, #offset]
parseDirectiveMovSP(SMLoc L)10029 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10030 MCAsmParser &Parser = getParser();
10031 if (!UC.hasFnStart()) {
10032 Parser.eatToEndOfStatement();
10033 Error(L, ".fnstart must precede .movsp directives");
10034 return false;
10035 }
10036 if (UC.getFPReg() != ARM::SP) {
10037 Parser.eatToEndOfStatement();
10038 Error(L, "unexpected .movsp directive");
10039 return false;
10040 }
10041
10042 SMLoc SPRegLoc = Parser.getTok().getLoc();
10043 int SPReg = tryParseRegister();
10044 if (SPReg == -1) {
10045 Parser.eatToEndOfStatement();
10046 Error(SPRegLoc, "register expected");
10047 return false;
10048 }
10049
10050 if (SPReg == ARM::SP || SPReg == ARM::PC) {
10051 Parser.eatToEndOfStatement();
10052 Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10053 return false;
10054 }
10055
10056 int64_t Offset = 0;
10057 if (Parser.getTok().is(AsmToken::Comma)) {
10058 Parser.Lex();
10059
10060 if (Parser.getTok().isNot(AsmToken::Hash)) {
10061 Error(Parser.getTok().getLoc(), "expected #constant");
10062 Parser.eatToEndOfStatement();
10063 return false;
10064 }
10065 Parser.Lex();
10066
10067 const MCExpr *OffsetExpr;
10068 SMLoc OffsetLoc = Parser.getTok().getLoc();
10069 if (Parser.parseExpression(OffsetExpr)) {
10070 Parser.eatToEndOfStatement();
10071 Error(OffsetLoc, "malformed offset expression");
10072 return false;
10073 }
10074
10075 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10076 if (!CE) {
10077 Parser.eatToEndOfStatement();
10078 Error(OffsetLoc, "offset must be an immediate constant");
10079 return false;
10080 }
10081
10082 Offset = CE->getValue();
10083 }
10084
10085 getTargetStreamer().emitMovSP(SPReg, Offset);
10086 UC.saveFPReg(SPReg);
10087
10088 return false;
10089 }
10090
10091 /// parseDirectiveObjectArch
10092 /// ::= .object_arch name
parseDirectiveObjectArch(SMLoc L)10093 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10094 MCAsmParser &Parser = getParser();
10095 if (getLexer().isNot(AsmToken::Identifier)) {
10096 Error(getLexer().getLoc(), "unexpected token");
10097 Parser.eatToEndOfStatement();
10098 return false;
10099 }
10100
10101 StringRef Arch = Parser.getTok().getString();
10102 SMLoc ArchLoc = Parser.getTok().getLoc();
10103 Lex();
10104
10105 unsigned ID = ARM::parseArch(Arch);
10106
10107 if (ID == ARM::AK_INVALID) {
10108 Error(ArchLoc, "unknown architecture '" + Arch + "'");
10109 Parser.eatToEndOfStatement();
10110 return false;
10111 }
10112
10113 getTargetStreamer().emitObjectArch(ID);
10114
10115 if (getLexer().isNot(AsmToken::EndOfStatement)) {
10116 Error(getLexer().getLoc(), "unexpected token");
10117 Parser.eatToEndOfStatement();
10118 }
10119
10120 return false;
10121 }
10122
10123 /// parseDirectiveAlign
10124 /// ::= .align
parseDirectiveAlign(SMLoc L)10125 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10126 // NOTE: if this is not the end of the statement, fall back to the target
10127 // agnostic handling for this directive which will correctly handle this.
10128 if (getLexer().isNot(AsmToken::EndOfStatement))
10129 return true;
10130
10131 // '.align' is target specifically handled to mean 2**2 byte alignment.
10132 const MCSection *Section = getStreamer().getCurrentSection().first;
10133 assert(Section && "must have section to emit alignment");
10134 if (Section->UseCodeAlign())
10135 getStreamer().EmitCodeAlignment(4, 0);
10136 else
10137 getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10138
10139 return false;
10140 }
10141
10142 /// parseDirectiveThumbSet
10143 /// ::= .thumb_set name, value
parseDirectiveThumbSet(SMLoc L)10144 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10145 MCAsmParser &Parser = getParser();
10146
10147 StringRef Name;
10148 if (Parser.parseIdentifier(Name)) {
10149 TokError("expected identifier after '.thumb_set'");
10150 Parser.eatToEndOfStatement();
10151 return false;
10152 }
10153
10154 if (getLexer().isNot(AsmToken::Comma)) {
10155 TokError("expected comma after name '" + Name + "'");
10156 Parser.eatToEndOfStatement();
10157 return false;
10158 }
10159 Lex();
10160
10161 MCSymbol *Sym;
10162 const MCExpr *Value;
10163 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10164 Parser, Sym, Value))
10165 return true;
10166
10167 getTargetStreamer().emitThumbSet(Sym, Value);
10168 return false;
10169 }
10170
10171 /// Force static initialization.
LLVMInitializeARMAsmParser()10172 extern "C" void LLVMInitializeARMAsmParser() {
10173 RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget);
10174 RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget);
10175 RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget);
10176 RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget);
10177 }
10178
10179 #define GET_REGISTER_MATCHER
10180 #define GET_SUBTARGET_FEATURE_NAME
10181 #define GET_MATCHER_IMPLEMENTATION
10182 #include "ARMGenAsmMatcher.inc"
10183
10184 // FIXME: This structure should be moved inside ARMTargetParser
10185 // when we start to table-generate them, and we can use the ARM
10186 // flags below, that were generated by table-gen.
10187 static const struct {
10188 const unsigned Kind;
10189 const uint64_t ArchCheck;
10190 const FeatureBitset Features;
10191 } Extensions[] = {
10192 { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
10193 { ARM::AEK_CRYPTO, Feature_HasV8,
10194 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10195 { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
10196 { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
10197 {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} },
10198 { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
10199 { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10200 { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
10201 // FIXME: Only available in A-class, isel not predicated
10202 { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
10203 { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
10204 { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
10205 // FIXME: Unsupported extensions.
10206 { ARM::AEK_OS, Feature_None, {} },
10207 { ARM::AEK_IWMMXT, Feature_None, {} },
10208 { ARM::AEK_IWMMXT2, Feature_None, {} },
10209 { ARM::AEK_MAVERICK, Feature_None, {} },
10210 { ARM::AEK_XSCALE, Feature_None, {} },
10211 };
10212
10213 /// parseDirectiveArchExtension
10214 /// ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)10215 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10216 MCAsmParser &Parser = getParser();
10217
10218 if (getLexer().isNot(AsmToken::Identifier)) {
10219 Error(getLexer().getLoc(), "unexpected token");
10220 Parser.eatToEndOfStatement();
10221 return false;
10222 }
10223
10224 StringRef Name = Parser.getTok().getString();
10225 SMLoc ExtLoc = Parser.getTok().getLoc();
10226 Lex();
10227
10228 bool EnableFeature = true;
10229 if (Name.startswith_lower("no")) {
10230 EnableFeature = false;
10231 Name = Name.substr(2);
10232 }
10233 unsigned FeatureKind = ARM::parseArchExt(Name);
10234 if (FeatureKind == ARM::AEK_INVALID)
10235 Error(ExtLoc, "unknown architectural extension: " + Name);
10236
10237 for (const auto &Extension : Extensions) {
10238 if (Extension.Kind != FeatureKind)
10239 continue;
10240
10241 if (Extension.Features.none())
10242 report_fatal_error("unsupported architectural extension: " + Name);
10243
10244 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) {
10245 Error(ExtLoc, "architectural extension '" + Name + "' is not "
10246 "allowed for the current base architecture");
10247 return false;
10248 }
10249
10250 MCSubtargetInfo &STI = copySTI();
10251 FeatureBitset ToggleFeatures = EnableFeature
10252 ? (~STI.getFeatureBits() & Extension.Features)
10253 : ( STI.getFeatureBits() & Extension.Features);
10254
10255 uint64_t Features =
10256 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10257 setAvailableFeatures(Features);
10258 return false;
10259 }
10260
10261 Error(ExtLoc, "unknown architectural extension: " + Name);
10262 Parser.eatToEndOfStatement();
10263 return false;
10264 }
10265
10266 // Define this matcher function after the auto-generated include so we
10267 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)10268 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10269 unsigned Kind) {
10270 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10271 // If the kind is a token for a literal immediate, check if our asm
10272 // operand matches. This is for InstAliases which have a fixed-value
10273 // immediate in the syntax.
10274 switch (Kind) {
10275 default: break;
10276 case MCK__35_0:
10277 if (Op.isImm())
10278 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10279 if (CE->getValue() == 0)
10280 return Match_Success;
10281 break;
10282 case MCK_ModImm:
10283 if (Op.isImm()) {
10284 const MCExpr *SOExpr = Op.getImm();
10285 int64_t Value;
10286 if (!SOExpr->evaluateAsAbsolute(Value))
10287 return Match_Success;
10288 assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
10289 "expression value must be representable in 32 bits");
10290 }
10291 break;
10292 case MCK_rGPR:
10293 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10294 return Match_Success;
10295 break;
10296 case MCK_GPRPair:
10297 if (Op.isReg() &&
10298 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10299 return Match_Success;
10300 break;
10301 }
10302 return Match_InvalidOperand;
10303 }
10304