1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64MCCodeEmitter class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/MC/MCCodeEmitter.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCFixup.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCSubtargetInfo.h"
27 #include "llvm/Support/Casting.h"
28 #include "llvm/Support/Endian.h"
29 #include "llvm/Support/EndianStream.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include <cassert>
33 #include <cstdint>
34
35 using namespace llvm;
36
37 #define DEBUG_TYPE "mccodeemitter"
38
39 STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
40 STATISTIC(MCNumFixups, "Number of MC fixups created.");
41
42 namespace {
43
44 class AArch64MCCodeEmitter : public MCCodeEmitter {
45 MCContext &Ctx;
46
47 public:
AArch64MCCodeEmitter(const MCInstrInfo &,MCContext & ctx)48 AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {}
49 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
50 void operator=(const AArch64MCCodeEmitter &) = delete;
51 ~AArch64MCCodeEmitter() override = default;
52
53 // getBinaryCodeForInstr - TableGen'erated function for getting the
54 // binary encoding for an instruction.
55 uint64_t getBinaryCodeForInstr(const MCInst &MI,
56 SmallVectorImpl<MCFixup> &Fixups,
57 const MCSubtargetInfo &STI) const;
58
59 /// getMachineOpValue - Return binary encoding of operand. If the machine
60 /// operand requires relocation, record the relocation and return zero.
61 unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
62 SmallVectorImpl<MCFixup> &Fixups,
63 const MCSubtargetInfo &STI) const;
64
65 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
66 /// attached to a load, store or prfm instruction. If operand requires a
67 /// relocation, record it and return zero in that part of the encoding.
68 template <uint32_t FixupKind>
69 uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
70 SmallVectorImpl<MCFixup> &Fixups,
71 const MCSubtargetInfo &STI) const;
72
73 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
74 /// target.
75 uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
76 SmallVectorImpl<MCFixup> &Fixups,
77 const MCSubtargetInfo &STI) const;
78
79 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
80 /// the 2-bit shift field.
81 uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
82 SmallVectorImpl<MCFixup> &Fixups,
83 const MCSubtargetInfo &STI) const;
84
85 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
86 /// branch target.
87 uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
88 SmallVectorImpl<MCFixup> &Fixups,
89 const MCSubtargetInfo &STI) const;
90
91 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
92 /// pc-relative address.
93 uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
94 SmallVectorImpl<MCFixup> &Fixups,
95 const MCSubtargetInfo &STI) const;
96
97 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
98 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
99 /// operation is a sign extend (as opposed to a zero extend).
100 uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
101 SmallVectorImpl<MCFixup> &Fixups,
102 const MCSubtargetInfo &STI) const;
103
104 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
105 /// branch target.
106 uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
107 SmallVectorImpl<MCFixup> &Fixups,
108 const MCSubtargetInfo &STI) const;
109
110 /// getBranchTargetOpValue - Return the encoded value for an unconditional
111 /// branch target.
112 uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
113 SmallVectorImpl<MCFixup> &Fixups,
114 const MCSubtargetInfo &STI) const;
115
116 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
117 /// of a MOVZ or MOVK instruction.
118 uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
119 SmallVectorImpl<MCFixup> &Fixups,
120 const MCSubtargetInfo &STI) const;
121
122 /// getVecShifterOpValue - Return the encoded value for the vector shifter.
123 uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
124 SmallVectorImpl<MCFixup> &Fixups,
125 const MCSubtargetInfo &STI) const;
126
127 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
128 /// shifter (MSL).
129 uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
130 SmallVectorImpl<MCFixup> &Fixups,
131 const MCSubtargetInfo &STI) const;
132
133 /// getFixedPointScaleOpValue - Return the encoded value for the
134 // FP-to-fixed-point scale factor.
135 uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
136 SmallVectorImpl<MCFixup> &Fixups,
137 const MCSubtargetInfo &STI) const;
138
139 uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
140 SmallVectorImpl<MCFixup> &Fixups,
141 const MCSubtargetInfo &STI) const;
142 uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
143 SmallVectorImpl<MCFixup> &Fixups,
144 const MCSubtargetInfo &STI) const;
145 uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
146 SmallVectorImpl<MCFixup> &Fixups,
147 const MCSubtargetInfo &STI) const;
148 uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
149 SmallVectorImpl<MCFixup> &Fixups,
150 const MCSubtargetInfo &STI) const;
151 uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
152 SmallVectorImpl<MCFixup> &Fixups,
153 const MCSubtargetInfo &STI) const;
154 uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
155 SmallVectorImpl<MCFixup> &Fixups,
156 const MCSubtargetInfo &STI) const;
157 uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
158 SmallVectorImpl<MCFixup> &Fixups,
159 const MCSubtargetInfo &STI) const;
160 uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
161 SmallVectorImpl<MCFixup> &Fixups,
162 const MCSubtargetInfo &STI) const;
163
164 uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
165 SmallVectorImpl<MCFixup> &Fixups,
166 const MCSubtargetInfo &STI) const;
167 uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
168 SmallVectorImpl<MCFixup> &Fixups,
169 const MCSubtargetInfo &STI) const;
170
171 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
172 const MCSubtargetInfo &STI) const;
173
174 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
175 SmallVectorImpl<MCFixup> &Fixups,
176 const MCSubtargetInfo &STI) const override;
177
178 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
179 const MCSubtargetInfo &STI) const;
180
181 template<int hasRs, int hasRt2> unsigned
182 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
183 const MCSubtargetInfo &STI) const;
184
185 unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
186 const MCSubtargetInfo &STI) const;
187
188 template <unsigned Multiple>
189 uint32_t EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx,
190 SmallVectorImpl<MCFixup> &Fixups,
191 const MCSubtargetInfo &STI) const;
192 uint32_t EncodePPR_p8to15(const MCInst &MI, unsigned OpIdx,
193 SmallVectorImpl<MCFixup> &Fixups,
194 const MCSubtargetInfo &STI) const;
195
196 uint32_t EncodeZPR2StridedRegisterClass(const MCInst &MI, unsigned OpIdx,
197 SmallVectorImpl<MCFixup> &Fixups,
198 const MCSubtargetInfo &STI) const;
199 uint32_t EncodeZPR4StridedRegisterClass(const MCInst &MI, unsigned OpIdx,
200 SmallVectorImpl<MCFixup> &Fixups,
201 const MCSubtargetInfo &STI) const;
202
203 uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx,
204 SmallVectorImpl<MCFixup> &Fixups,
205 const MCSubtargetInfo &STI) const;
206 template <unsigned BaseReg>
207 uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
208 SmallVectorImpl<MCFixup> &Fixups,
209 const MCSubtargetInfo &STI) const;
210 };
211
212 } // end anonymous namespace
213
214 /// getMachineOpValue - Return binary encoding of operand. If the machine
215 /// operand requires relocation, record the relocation and return zero.
216 unsigned
getMachineOpValue(const MCInst & MI,const MCOperand & MO,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const217 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
218 SmallVectorImpl<MCFixup> &Fixups,
219 const MCSubtargetInfo &STI) const {
220 if (MO.isReg())
221 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
222
223 assert(MO.isImm() && "did not expect relocated expression");
224 return static_cast<unsigned>(MO.getImm());
225 }
226
227 template<unsigned FixupKind> uint32_t
getLdStUImm12OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const228 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
229 SmallVectorImpl<MCFixup> &Fixups,
230 const MCSubtargetInfo &STI) const {
231 const MCOperand &MO = MI.getOperand(OpIdx);
232 uint32_t ImmVal = 0;
233
234 if (MO.isImm())
235 ImmVal = static_cast<uint32_t>(MO.getImm());
236 else {
237 assert(MO.isExpr() && "unable to encode load/store imm operand");
238 MCFixupKind Kind = MCFixupKind(FixupKind);
239 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
240 ++MCNumFixups;
241 }
242
243 return ImmVal;
244 }
245
246 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
247 /// target.
248 uint32_t
getAdrLabelOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const249 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
250 SmallVectorImpl<MCFixup> &Fixups,
251 const MCSubtargetInfo &STI) const {
252 const MCOperand &MO = MI.getOperand(OpIdx);
253
254 // If the destination is an immediate, we have nothing to do.
255 if (MO.isImm())
256 return MO.getImm();
257 assert(MO.isExpr() && "Unexpected target type!");
258 const MCExpr *Expr = MO.getExpr();
259
260 MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
261 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
262 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
263 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
264
265 MCNumFixups += 1;
266
267 // All of the information is in the fixup.
268 return 0;
269 }
270
271 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
272 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the
273 /// return value.
274 uint32_t
getAddSubImmOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const275 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
276 SmallVectorImpl<MCFixup> &Fixups,
277 const MCSubtargetInfo &STI) const {
278 // Suboperands are [imm, shifter].
279 const MCOperand &MO = MI.getOperand(OpIdx);
280 const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
281 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
282 "unexpected shift type for add/sub immediate");
283 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
284 assert((ShiftVal == 0 || ShiftVal == 12) &&
285 "unexpected shift value for add/sub immediate");
286 if (MO.isImm())
287 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
288 assert(MO.isExpr() && "Unable to encode MCOperand!");
289 const MCExpr *Expr = MO.getExpr();
290
291 // Encode the 12 bits of the fixup.
292 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
293 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
294
295 ++MCNumFixups;
296
297 // Set the shift bit of the add instruction for relocation types
298 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
299 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
300 AArch64MCExpr::VariantKind RefKind = A64E->getKind();
301 if (RefKind == AArch64MCExpr::VK_TPREL_HI12 ||
302 RefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
303 RefKind == AArch64MCExpr::VK_SECREL_HI12)
304 ShiftVal = 12;
305 }
306 return ShiftVal == 0 ? 0 : (1 << ShiftVal);
307 }
308
309 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
310 /// branch target.
getCondBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const311 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
312 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
313 const MCSubtargetInfo &STI) const {
314 const MCOperand &MO = MI.getOperand(OpIdx);
315
316 // If the destination is an immediate, we have nothing to do.
317 if (MO.isImm())
318 return MO.getImm();
319 assert(MO.isExpr() && "Unexpected target type!");
320
321 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
322 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
323
324 ++MCNumFixups;
325
326 // All of the information is in the fixup.
327 return 0;
328 }
329
330 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
331 /// pc-relative address.
332 uint32_t
getLoadLiteralOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const333 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
334 SmallVectorImpl<MCFixup> &Fixups,
335 const MCSubtargetInfo &STI) const {
336 const MCOperand &MO = MI.getOperand(OpIdx);
337
338 // If the destination is an immediate, we have nothing to do.
339 if (MO.isImm())
340 return MO.getImm();
341 assert(MO.isExpr() && "Unexpected target type!");
342
343 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
344 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
345
346 ++MCNumFixups;
347
348 // All of the information is in the fixup.
349 return 0;
350 }
351
352 uint32_t
getMemExtendOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const353 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
354 SmallVectorImpl<MCFixup> &Fixups,
355 const MCSubtargetInfo &STI) const {
356 unsigned SignExtend = MI.getOperand(OpIdx).getImm();
357 unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
358 return (SignExtend << 1) | DoShift;
359 }
360
361 uint32_t
getMoveWideImmOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const362 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
363 SmallVectorImpl<MCFixup> &Fixups,
364 const MCSubtargetInfo &STI) const {
365 const MCOperand &MO = MI.getOperand(OpIdx);
366
367 if (MO.isImm())
368 return MO.getImm();
369 assert(MO.isExpr() && "Unexpected movz/movk immediate");
370
371 Fixups.push_back(MCFixup::create(
372 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
373
374 ++MCNumFixups;
375
376 return 0;
377 }
378
379 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
380 /// branch target.
getTestBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const381 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
382 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
383 const MCSubtargetInfo &STI) const {
384 const MCOperand &MO = MI.getOperand(OpIdx);
385
386 // If the destination is an immediate, we have nothing to do.
387 if (MO.isImm())
388 return MO.getImm();
389 assert(MO.isExpr() && "Unexpected ADR target type!");
390
391 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
392 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
393
394 ++MCNumFixups;
395
396 // All of the information is in the fixup.
397 return 0;
398 }
399
400 /// getBranchTargetOpValue - Return the encoded value for an unconditional
401 /// branch target.
402 uint32_t
getBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const403 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
404 SmallVectorImpl<MCFixup> &Fixups,
405 const MCSubtargetInfo &STI) const {
406 const MCOperand &MO = MI.getOperand(OpIdx);
407
408 // If the destination is an immediate, we have nothing to do.
409 if (MO.isImm())
410 return MO.getImm();
411 assert(MO.isExpr() && "Unexpected ADR target type!");
412
413 MCFixupKind Kind = MI.getOpcode() == AArch64::BL
414 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
415 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
416 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
417
418 ++MCNumFixups;
419
420 // All of the information is in the fixup.
421 return 0;
422 }
423
424 /// getVecShifterOpValue - Return the encoded value for the vector shifter:
425 ///
426 /// 00 -> 0
427 /// 01 -> 8
428 /// 10 -> 16
429 /// 11 -> 24
430 uint32_t
getVecShifterOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const431 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
432 SmallVectorImpl<MCFixup> &Fixups,
433 const MCSubtargetInfo &STI) const {
434 const MCOperand &MO = MI.getOperand(OpIdx);
435 assert(MO.isImm() && "Expected an immediate value for the shift amount!");
436
437 switch (MO.getImm()) {
438 default:
439 break;
440 case 0:
441 return 0;
442 case 8:
443 return 1;
444 case 16:
445 return 2;
446 case 24:
447 return 3;
448 }
449
450 llvm_unreachable("Invalid value for vector shift amount!");
451 }
452
453 /// getFixedPointScaleOpValue - Return the encoded value for the
454 // FP-to-fixed-point scale factor.
getFixedPointScaleOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const455 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
456 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
457 const MCSubtargetInfo &STI) const {
458 const MCOperand &MO = MI.getOperand(OpIdx);
459 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
460 return 64 - MO.getImm();
461 }
462
463 uint32_t
getVecShiftR64OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const464 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
465 SmallVectorImpl<MCFixup> &Fixups,
466 const MCSubtargetInfo &STI) const {
467 const MCOperand &MO = MI.getOperand(OpIdx);
468 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
469 return 64 - MO.getImm();
470 }
471
472 uint32_t
getVecShiftR32OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const473 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
474 SmallVectorImpl<MCFixup> &Fixups,
475 const MCSubtargetInfo &STI) const {
476 const MCOperand &MO = MI.getOperand(OpIdx);
477 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
478 return 32 - MO.getImm();
479 }
480
481 uint32_t
getVecShiftR16OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const482 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
483 SmallVectorImpl<MCFixup> &Fixups,
484 const MCSubtargetInfo &STI) const {
485 const MCOperand &MO = MI.getOperand(OpIdx);
486 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
487 return 16 - MO.getImm();
488 }
489
490 uint32_t
getVecShiftR8OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const491 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
492 SmallVectorImpl<MCFixup> &Fixups,
493 const MCSubtargetInfo &STI) const {
494 const MCOperand &MO = MI.getOperand(OpIdx);
495 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
496 return 8 - MO.getImm();
497 }
498
499 uint32_t
getVecShiftL64OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const500 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
501 SmallVectorImpl<MCFixup> &Fixups,
502 const MCSubtargetInfo &STI) const {
503 const MCOperand &MO = MI.getOperand(OpIdx);
504 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
505 return MO.getImm() - 64;
506 }
507
508 uint32_t
getVecShiftL32OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const509 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
510 SmallVectorImpl<MCFixup> &Fixups,
511 const MCSubtargetInfo &STI) const {
512 const MCOperand &MO = MI.getOperand(OpIdx);
513 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
514 return MO.getImm() - 32;
515 }
516
517 uint32_t
getVecShiftL16OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const518 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
519 SmallVectorImpl<MCFixup> &Fixups,
520 const MCSubtargetInfo &STI) const {
521 const MCOperand &MO = MI.getOperand(OpIdx);
522 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
523 return MO.getImm() - 16;
524 }
525
526 uint32_t
getVecShiftL8OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const527 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
528 SmallVectorImpl<MCFixup> &Fixups,
529 const MCSubtargetInfo &STI) const {
530 const MCOperand &MO = MI.getOperand(OpIdx);
531 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
532 return MO.getImm() - 8;
533 }
534
535 template <unsigned Multiple>
536 uint32_t
EncodeRegAsMultipleOf(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const537 AArch64MCCodeEmitter::EncodeRegAsMultipleOf(const MCInst &MI, unsigned OpIdx,
538 SmallVectorImpl<MCFixup> &Fixups,
539 const MCSubtargetInfo &STI) const {
540 assert(llvm::isPowerOf2_32(Multiple) && "Multiple is not a power of 2");
541 auto RegOpnd = MI.getOperand(OpIdx).getReg();
542 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
543 return RegVal / Multiple;
544 }
545
546 uint32_t
EncodePPR_p8to15(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const547 AArch64MCCodeEmitter::EncodePPR_p8to15(const MCInst &MI, unsigned OpIdx,
548 SmallVectorImpl<MCFixup> &Fixups,
549 const MCSubtargetInfo &STI) const {
550 auto RegOpnd = MI.getOperand(OpIdx).getReg();
551 return RegOpnd - AArch64::P8;
552 }
553
EncodeZPR2StridedRegisterClass(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const554 uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass(
555 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
556 const MCSubtargetInfo &STI) const {
557 auto RegOpnd = MI.getOperand(OpIdx).getReg();
558 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
559 unsigned T = (RegVal & 0x10) >> 1;
560 unsigned Zt = RegVal & 0x7;
561 return T | Zt;
562 }
563
EncodeZPR4StridedRegisterClass(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const564 uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass(
565 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
566 const MCSubtargetInfo &STI) const {
567 auto RegOpnd = MI.getOperand(OpIdx).getReg();
568 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd);
569 unsigned T = (RegVal & 0x10) >> 2;
570 unsigned Zt = RegVal & 0x3;
571 return T | Zt;
572 }
573
EncodeMatrixTileListRegisterClass(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const574 uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass(
575 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
576 const MCSubtargetInfo &STI) const {
577 unsigned RegMask = MI.getOperand(OpIdx).getImm();
578 assert(RegMask <= 0xFF && "Invalid register mask!");
579 return RegMask;
580 }
581
582 template <unsigned BaseReg>
583 uint32_t
encodeMatrixIndexGPR32(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const584 AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
585 SmallVectorImpl<MCFixup> &Fixups,
586 const MCSubtargetInfo &STI) const {
587 auto RegOpnd = MI.getOperand(OpIdx).getReg();
588 return RegOpnd - BaseReg;
589 }
590
591 uint32_t
getImm8OptLsl(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const592 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
593 SmallVectorImpl<MCFixup> &Fixups,
594 const MCSubtargetInfo &STI) const {
595 // Test shift
596 auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm();
597 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL &&
598 "Unexpected shift type for imm8_opt_lsl immediate.");
599
600 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd);
601 assert((ShiftVal == 0 || ShiftVal == 8) &&
602 "Unexpected shift value for imm8_opt_lsl immediate.");
603
604 // Test immediate
605 auto Immediate = MI.getOperand(OpIdx).getImm();
606 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
607 }
608
609 uint32_t
getSVEIncDecImm(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const610 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
611 SmallVectorImpl<MCFixup> &Fixups,
612 const MCSubtargetInfo &STI) const {
613 const MCOperand &MO = MI.getOperand(OpIdx);
614 assert(MO.isImm() && "Expected an immediate value!");
615 // Normalize 1-16 range to 0-15.
616 return MO.getImm() - 1;
617 }
618
619 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
620 /// shifter (MSL).
getMoveVecShifterOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const621 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
622 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
623 const MCSubtargetInfo &STI) const {
624 const MCOperand &MO = MI.getOperand(OpIdx);
625 assert(MO.isImm() &&
626 "Expected an immediate value for the move shift amount!");
627 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
628 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
629 return ShiftVal == 8 ? 0 : 1;
630 }
631
fixMOVZ(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const632 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
633 const MCSubtargetInfo &STI) const {
634 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
635 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
636 // job to ensure that any bits possibly affected by this are 0. This means we
637 // must zero out bit 30 (essentially emitting a MOVN).
638 MCOperand UImm16MO = MI.getOperand(1);
639
640 // Nothing to do if there's no fixup.
641 if (UImm16MO.isImm())
642 return EncodedValue;
643
644 const MCExpr *E = UImm16MO.getExpr();
645 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
646 switch (A64E->getKind()) {
647 case AArch64MCExpr::VK_DTPREL_G2:
648 case AArch64MCExpr::VK_DTPREL_G1:
649 case AArch64MCExpr::VK_DTPREL_G0:
650 case AArch64MCExpr::VK_GOTTPREL_G1:
651 case AArch64MCExpr::VK_TPREL_G2:
652 case AArch64MCExpr::VK_TPREL_G1:
653 case AArch64MCExpr::VK_TPREL_G0:
654 return EncodedValue & ~(1u << 30);
655 default:
656 // Nothing to do for an unsigned fixup.
657 return EncodedValue;
658 }
659 }
660
661 return EncodedValue;
662 }
663
encodeInstruction(const MCInst & MI,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const664 void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
665 SmallVectorImpl<MCFixup> &Fixups,
666 const MCSubtargetInfo &STI) const {
667 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
668 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
669 // following (BLR) instruction. It doesn't emit any code itself so it
670 // doesn't go through the normal TableGenerated channels.
671 auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32
672 ? ELF::R_AARCH64_P32_TLSDESC_CALL
673 : ELF::R_AARCH64_TLSDESC_CALL;
674 Fixups.push_back(
675 MCFixup::create(0, MI.getOperand(0).getExpr(),
676 MCFixupKind(FirstLiteralRelocationKind + Reloc)));
677 return;
678 }
679
680 if (MI.getOpcode() == AArch64::SPACE) {
681 // SPACE just increases basic block size, in both cases no actual code.
682 return;
683 }
684
685 uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
686 support::endian::write<uint32_t>(OS, Binary, support::little);
687 ++MCNumEmitted; // Keep track of the # of mi's emitted.
688 }
689
690 unsigned
fixMulHigh(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const691 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
692 unsigned EncodedValue,
693 const MCSubtargetInfo &STI) const {
694 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
695 // (i.e. all bits 1) but is ignored by the processor.
696 EncodedValue |= 0x1f << 10;
697 return EncodedValue;
698 }
699
700 template<int hasRs, int hasRt2> unsigned
fixLoadStoreExclusive(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const701 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
702 unsigned EncodedValue,
703 const MCSubtargetInfo &STI) const {
704 if (!hasRs) EncodedValue |= 0x001F0000;
705 if (!hasRt2) EncodedValue |= 0x00007C00;
706
707 return EncodedValue;
708 }
709
fixOneOperandFPComparison(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const710 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
711 const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
712 // The Rm field of FCMP and friends is unused - it should be assembled
713 // as 0, but is ignored by the processor.
714 EncodedValue &= ~(0x1f << 16);
715 return EncodedValue;
716 }
717
718 #include "AArch64GenMCCodeEmitter.inc"
719
createAArch64MCCodeEmitter(const MCInstrInfo & MCII,MCContext & Ctx)720 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
721 MCContext &Ctx) {
722 return new AArch64MCCodeEmitter(MCII, Ctx);
723 }
724