1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/BinaryFormat/MachO.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCDirectives.h"
19 #include "llvm/MC/MCELFObjectWriter.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionELF.h"
24 #include "llvm/MC/MCSectionMachO.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetOptions.h"
27 #include "llvm/MC/MCValue.h"
28 #include "llvm/MC/TargetRegistry.h"
29 #include "llvm/Support/ErrorHandling.h"
30 using namespace llvm;
31
32 namespace {
33
34 class AArch64AsmBackend : public MCAsmBackend {
35 static const unsigned PCRelFlagVal =
36 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
37 protected:
38 Triple TheTriple;
39
40 public:
AArch64AsmBackend(const Target & T,const Triple & TT,bool IsLittleEndian)41 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
42 : MCAsmBackend(IsLittleEndian ? support::little : support::big),
43 TheTriple(TT) {}
44
getNumFixupKinds() const45 unsigned getNumFixupKinds() const override {
46 return AArch64::NumTargetFixupKinds;
47 }
48
49 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
50
getFixupKindInfo(MCFixupKind Kind) const51 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
52 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
53 // This table *must* be in the order that the fixup_* kinds are defined
54 // in AArch64FixupKinds.h.
55 //
56 // Name Offset (bits) Size (bits) Flags
57 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
58 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
59 {"fixup_aarch64_add_imm12", 10, 12, 0},
60 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
61 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
62 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
63 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
64 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
65 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
66 {"fixup_aarch64_movw", 5, 16, 0},
67 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
68 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
69 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
70 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
71
72 // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
73 // require any extra processing.
74 if (Kind >= FirstLiteralRelocationKind)
75 return MCAsmBackend::getFixupKindInfo(FK_NONE);
76
77 if (Kind < FirstTargetFixupKind)
78 return MCAsmBackend::getFixupKindInfo(Kind);
79
80 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
81 "Invalid kind!");
82 return Infos[Kind - FirstTargetFixupKind];
83 }
84
85 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
86 const MCValue &Target, MutableArrayRef<char> Data,
87 uint64_t Value, bool IsResolved,
88 const MCSubtargetInfo *STI) const override;
89
90 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
91 const MCRelaxableFragment *DF,
92 const MCAsmLayout &Layout) const override;
93 void relaxInstruction(MCInst &Inst,
94 const MCSubtargetInfo &STI) const override;
95 bool writeNopData(raw_ostream &OS, uint64_t Count,
96 const MCSubtargetInfo *STI) const override;
97
98 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
99
100 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
101 const MCValue &Target) override;
102 };
103
104 } // end anonymous namespace
105
106 /// The number of bytes the fixup may change.
getFixupKindNumBytes(unsigned Kind)107 static unsigned getFixupKindNumBytes(unsigned Kind) {
108 switch (Kind) {
109 default:
110 llvm_unreachable("Unknown fixup kind!");
111
112 case FK_Data_1:
113 return 1;
114
115 case FK_Data_2:
116 case FK_SecRel_2:
117 return 2;
118
119 case AArch64::fixup_aarch64_movw:
120 case AArch64::fixup_aarch64_pcrel_branch14:
121 case AArch64::fixup_aarch64_add_imm12:
122 case AArch64::fixup_aarch64_ldst_imm12_scale1:
123 case AArch64::fixup_aarch64_ldst_imm12_scale2:
124 case AArch64::fixup_aarch64_ldst_imm12_scale4:
125 case AArch64::fixup_aarch64_ldst_imm12_scale8:
126 case AArch64::fixup_aarch64_ldst_imm12_scale16:
127 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
128 case AArch64::fixup_aarch64_pcrel_branch19:
129 return 3;
130
131 case AArch64::fixup_aarch64_pcrel_adr_imm21:
132 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
133 case AArch64::fixup_aarch64_pcrel_branch26:
134 case AArch64::fixup_aarch64_pcrel_call26:
135 case FK_Data_4:
136 case FK_SecRel_4:
137 return 4;
138
139 case FK_Data_8:
140 return 8;
141 }
142 }
143
AdrImmBits(unsigned Value)144 static unsigned AdrImmBits(unsigned Value) {
145 unsigned lo2 = Value & 0x3;
146 unsigned hi19 = (Value & 0x1ffffc) >> 2;
147 return (hi19 << 5) | (lo2 << 29);
148 }
149
adjustFixupValue(const MCFixup & Fixup,const MCValue & Target,uint64_t Value,MCContext & Ctx,const Triple & TheTriple,bool IsResolved)150 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
151 uint64_t Value, MCContext &Ctx,
152 const Triple &TheTriple, bool IsResolved) {
153 int64_t SignedValue = static_cast<int64_t>(Value);
154 switch (Fixup.getTargetKind()) {
155 default:
156 llvm_unreachable("Unknown fixup kind!");
157 case AArch64::fixup_aarch64_pcrel_adr_imm21:
158 if (SignedValue > 2097151 || SignedValue < -2097152)
159 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
160 return AdrImmBits(Value & 0x1fffffULL);
161 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
162 assert(!IsResolved);
163 if (TheTriple.isOSBinFormatCOFF()) {
164 if (!isInt<21>(SignedValue))
165 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
166 return AdrImmBits(Value & 0x1fffffULL);
167 }
168 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
169 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
170 case AArch64::fixup_aarch64_pcrel_branch19:
171 // Signed 21-bit immediate
172 if (SignedValue > 2097151 || SignedValue < -2097152)
173 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
174 if (Value & 0x3)
175 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
176 // Low two bits are not encoded.
177 return (Value >> 2) & 0x7ffff;
178 case AArch64::fixup_aarch64_add_imm12:
179 case AArch64::fixup_aarch64_ldst_imm12_scale1:
180 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
181 Value &= 0xfff;
182 // Unsigned 12-bit immediate
183 if (Value >= 0x1000)
184 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
185 return Value;
186 case AArch64::fixup_aarch64_ldst_imm12_scale2:
187 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
188 Value &= 0xfff;
189 // Unsigned 12-bit immediate which gets multiplied by 2
190 if (Value >= 0x2000)
191 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
192 if (Value & 0x1)
193 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
194 return Value >> 1;
195 case AArch64::fixup_aarch64_ldst_imm12_scale4:
196 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
197 Value &= 0xfff;
198 // Unsigned 12-bit immediate which gets multiplied by 4
199 if (Value >= 0x4000)
200 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
201 if (Value & 0x3)
202 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
203 return Value >> 2;
204 case AArch64::fixup_aarch64_ldst_imm12_scale8:
205 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
206 Value &= 0xfff;
207 // Unsigned 12-bit immediate which gets multiplied by 8
208 if (Value >= 0x8000)
209 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
210 if (Value & 0x7)
211 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
212 return Value >> 3;
213 case AArch64::fixup_aarch64_ldst_imm12_scale16:
214 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
215 Value &= 0xfff;
216 // Unsigned 12-bit immediate which gets multiplied by 16
217 if (Value >= 0x10000)
218 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
219 if (Value & 0xf)
220 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
221 return Value >> 4;
222 case AArch64::fixup_aarch64_movw: {
223 AArch64MCExpr::VariantKind RefKind =
224 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
225 if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
226 AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
227 if (!RefKind) {
228 // The fixup is an expression
229 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
230 Ctx.reportError(Fixup.getLoc(),
231 "fixup value out of range [-0xFFFF, 0xFFFF]");
232
233 // Invert the negative immediate because it will feed into a MOVN.
234 if (SignedValue < 0)
235 SignedValue = ~SignedValue;
236 Value = static_cast<uint64_t>(SignedValue);
237 } else
238 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
239 // ever be resolved in the assembler.
240 Ctx.reportError(Fixup.getLoc(),
241 "relocation for a thread-local variable points to an "
242 "absolute symbol");
243 return Value;
244 }
245
246 if (!IsResolved) {
247 // FIXME: Figure out when this can actually happen, and verify our
248 // behavior.
249 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
250 "implemented");
251 return Value;
252 }
253
254 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
255 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
256 case AArch64MCExpr::VK_G0:
257 break;
258 case AArch64MCExpr::VK_G1:
259 SignedValue = SignedValue >> 16;
260 break;
261 case AArch64MCExpr::VK_G2:
262 SignedValue = SignedValue >> 32;
263 break;
264 case AArch64MCExpr::VK_G3:
265 SignedValue = SignedValue >> 48;
266 break;
267 default:
268 llvm_unreachable("Variant kind doesn't correspond to fixup");
269 }
270
271 } else {
272 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
273 case AArch64MCExpr::VK_G0:
274 break;
275 case AArch64MCExpr::VK_G1:
276 Value = Value >> 16;
277 break;
278 case AArch64MCExpr::VK_G2:
279 Value = Value >> 32;
280 break;
281 case AArch64MCExpr::VK_G3:
282 Value = Value >> 48;
283 break;
284 default:
285 llvm_unreachable("Variant kind doesn't correspond to fixup");
286 }
287 }
288
289 if (RefKind & AArch64MCExpr::VK_NC) {
290 Value &= 0xFFFF;
291 }
292 else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
293 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
294 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
295
296 // Invert the negative immediate because it will feed into a MOVN.
297 if (SignedValue < 0)
298 SignedValue = ~SignedValue;
299 Value = static_cast<uint64_t>(SignedValue);
300 }
301 else if (Value > 0xFFFF) {
302 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
303 }
304 return Value;
305 }
306 case AArch64::fixup_aarch64_pcrel_branch14:
307 // Signed 16-bit immediate
308 if (SignedValue > 32767 || SignedValue < -32768)
309 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
310 // Low two bits are not encoded (4-byte alignment assumed).
311 if (Value & 0x3)
312 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
313 return (Value >> 2) & 0x3fff;
314 case AArch64::fixup_aarch64_pcrel_branch26:
315 case AArch64::fixup_aarch64_pcrel_call26:
316 // Signed 28-bit immediate
317 if (SignedValue > 134217727 || SignedValue < -134217728)
318 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
319 // Low two bits are not encoded (4-byte alignment assumed).
320 if (Value & 0x3)
321 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
322 return (Value >> 2) & 0x3ffffff;
323 case FK_Data_1:
324 case FK_Data_2:
325 case FK_Data_4:
326 case FK_Data_8:
327 case FK_SecRel_2:
328 case FK_SecRel_4:
329 return Value;
330 }
331 }
332
333 std::optional<MCFixupKind>
getFixupKind(StringRef Name) const334 AArch64AsmBackend::getFixupKind(StringRef Name) const {
335 if (!TheTriple.isOSBinFormatELF())
336 return std::nullopt;
337
338 unsigned Type = llvm::StringSwitch<unsigned>(Name)
339 #define ELF_RELOC(X, Y) .Case(#X, Y)
340 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
341 #undef ELF_RELOC
342 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
343 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
344 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
345 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
346 .Default(-1u);
347 if (Type == -1u)
348 return std::nullopt;
349 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
350 }
351
352 /// getFixupKindContainereSizeInBytes - The number of bytes of the
353 /// container involved in big endian or 0 if the item is little endian
getFixupKindContainereSizeInBytes(unsigned Kind) const354 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
355 if (Endian == support::little)
356 return 0;
357
358 switch (Kind) {
359 default:
360 llvm_unreachable("Unknown fixup kind!");
361
362 case FK_Data_1:
363 return 1;
364 case FK_Data_2:
365 return 2;
366 case FK_Data_4:
367 return 4;
368 case FK_Data_8:
369 return 8;
370
371 case AArch64::fixup_aarch64_movw:
372 case AArch64::fixup_aarch64_pcrel_branch14:
373 case AArch64::fixup_aarch64_add_imm12:
374 case AArch64::fixup_aarch64_ldst_imm12_scale1:
375 case AArch64::fixup_aarch64_ldst_imm12_scale2:
376 case AArch64::fixup_aarch64_ldst_imm12_scale4:
377 case AArch64::fixup_aarch64_ldst_imm12_scale8:
378 case AArch64::fixup_aarch64_ldst_imm12_scale16:
379 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
380 case AArch64::fixup_aarch64_pcrel_branch19:
381 case AArch64::fixup_aarch64_pcrel_adr_imm21:
382 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
383 case AArch64::fixup_aarch64_pcrel_branch26:
384 case AArch64::fixup_aarch64_pcrel_call26:
385 // Instructions are always little endian
386 return 0;
387 }
388 }
389
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const390 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
391 const MCValue &Target,
392 MutableArrayRef<char> Data, uint64_t Value,
393 bool IsResolved,
394 const MCSubtargetInfo *STI) const {
395 if (!Value)
396 return; // Doesn't change encoding.
397 unsigned Kind = Fixup.getKind();
398 if (Kind >= FirstLiteralRelocationKind)
399 return;
400 unsigned NumBytes = getFixupKindNumBytes(Kind);
401 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
402 MCContext &Ctx = Asm.getContext();
403 int64_t SignedValue = static_cast<int64_t>(Value);
404 // Apply any target-specific value adjustments.
405 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
406
407 // Shift the value into position.
408 Value <<= Info.TargetOffset;
409
410 unsigned Offset = Fixup.getOffset();
411 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
412
413 // Used to point to big endian bytes.
414 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
415
416 // For each byte of the fragment that the fixup touches, mask in the
417 // bits from the fixup value.
418 if (FulleSizeInBytes == 0) {
419 // Handle as little-endian
420 for (unsigned i = 0; i != NumBytes; ++i) {
421 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
422 }
423 } else {
424 // Handle as big-endian
425 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
426 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
427 for (unsigned i = 0; i != NumBytes; ++i) {
428 unsigned Idx = FulleSizeInBytes - 1 - i;
429 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
430 }
431 }
432
433 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
434 // handle this more cleanly. This may affect the output of -show-mc-encoding.
435 AArch64MCExpr::VariantKind RefKind =
436 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
437 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS ||
438 (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
439 // If the immediate is negative, generate MOVN else MOVZ.
440 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
441 if (SignedValue < 0)
442 Data[Offset + 3] &= ~(1 << 6);
443 else
444 Data[Offset + 3] |= (1 << 6);
445 }
446 }
447
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const448 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
449 uint64_t Value,
450 const MCRelaxableFragment *DF,
451 const MCAsmLayout &Layout) const {
452 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
453 // into the targets for now.
454 //
455 // Relax if the value is too big for a (signed) i8.
456 return int64_t(Value) != int64_t(int8_t(Value));
457 }
458
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const459 void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
460 const MCSubtargetInfo &STI) const {
461 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
462 }
463
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const464 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
465 const MCSubtargetInfo *STI) const {
466 // If the count is not 4-byte aligned, we must be writing data into the text
467 // section (otherwise we have unaligned instructions, and thus have far
468 // bigger problems), so just write zeros instead.
469 OS.write_zeros(Count % 4);
470
471 // We are properly aligned, so write NOPs as requested.
472 Count /= 4;
473 for (uint64_t i = 0; i != Count; ++i)
474 OS.write("\x1f\x20\x03\xd5", 4);
475 return true;
476 }
477
shouldForceRelocation(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target)478 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
479 const MCFixup &Fixup,
480 const MCValue &Target) {
481 unsigned Kind = Fixup.getKind();
482 if (Kind >= FirstLiteralRelocationKind)
483 return true;
484
485 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
486 // ~0xfff. This means that the required offset to reach a symbol can vary by
487 // up to one step depending on where the ADRP is in memory. For example:
488 //
489 // ADRP x0, there
490 // there:
491 //
492 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
493 // we'll need that as an offset. At any other address "there" will be in the
494 // same page as the ADRP and the instruction should encode 0x0. Assuming the
495 // section isn't 0x1000-aligned, we therefore need to delegate this decision
496 // to the linker -- a relocation!
497 if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
498 return true;
499
500 return false;
501 }
502
503 namespace {
504
505 namespace CU {
506
507 /// Compact unwind encoding values.
508 enum CompactUnwindEncodings {
509 /// A "frameless" leaf function, where no non-volatile registers are
510 /// saved. The return remains in LR throughout the function.
511 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
512
513 /// No compact unwind encoding available. Instead the low 23-bits of
514 /// the compact unwind encoding is the offset of the DWARF FDE in the
515 /// __eh_frame section. This mode is never used in object files. It is only
516 /// generated by the linker in final linked images, which have only DWARF info
517 /// for a function.
518 UNWIND_ARM64_MODE_DWARF = 0x03000000,
519
520 /// This is a standard arm64 prologue where FP/LR are immediately
521 /// pushed on the stack, then SP is copied to FP. If there are any
522 /// non-volatile register saved, they are copied into the stack fame in pairs
523 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
524 /// five X pairs and four D pairs can be saved, but the memory layout must be
525 /// in register number order.
526 UNWIND_ARM64_MODE_FRAME = 0x04000000,
527
528 /// Frame register pair encodings.
529 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
530 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
531 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
532 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
533 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
534 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
535 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
536 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
537 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
538 };
539
540 } // end CU namespace
541
542 // FIXME: This should be in a separate file.
543 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
544 const MCRegisterInfo &MRI;
545
546 /// Encode compact unwind stack adjustment for frameless functions.
547 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
548 /// The stack size always needs to be 16 byte aligned.
encodeStackAdjustment(uint32_t StackSize) const549 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
550 return (StackSize / 16) << 12;
551 }
552
553 public:
DarwinAArch64AsmBackend(const Target & T,const Triple & TT,const MCRegisterInfo & MRI)554 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
555 const MCRegisterInfo &MRI)
556 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
557
558 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const559 createObjectTargetWriter() const override {
560 uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
561 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
562 return createAArch64MachObjectWriter(CPUType, CPUSubType,
563 TheTriple.isArch32Bit());
564 }
565
566 /// Generate the compact unwind encoding from the CFI directives.
generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const567 uint32_t generateCompactUnwindEncoding(
568 ArrayRef<MCCFIInstruction> Instrs) const override {
569 if (Instrs.empty())
570 return CU::UNWIND_ARM64_MODE_FRAMELESS;
571
572 bool HasFP = false;
573 unsigned StackSize = 0;
574
575 uint32_t CompactUnwindEncoding = 0;
576 int CurOffset = 0;
577 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
578 const MCCFIInstruction &Inst = Instrs[i];
579
580 switch (Inst.getOperation()) {
581 default:
582 // Cannot handle this directive: bail out.
583 return CU::UNWIND_ARM64_MODE_DWARF;
584 case MCCFIInstruction::OpDefCfa: {
585 // Defines a frame pointer.
586 unsigned XReg =
587 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
588
589 // Other CFA registers than FP are not supported by compact unwind.
590 // Fallback on DWARF.
591 // FIXME: When opt-remarks are supported in MC, add a remark to notify
592 // the user.
593 if (XReg != AArch64::FP)
594 return CU::UNWIND_ARM64_MODE_DWARF;
595
596 if (i + 2 >= e)
597 return CU::UNWIND_ARM64_MODE_DWARF;
598
599 const MCCFIInstruction &LRPush = Instrs[++i];
600 if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
601 return CU::UNWIND_ARM64_MODE_DWARF;
602 const MCCFIInstruction &FPPush = Instrs[++i];
603 if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
604 return CU::UNWIND_ARM64_MODE_DWARF;
605
606 if (FPPush.getOffset() + 8 != LRPush.getOffset())
607 return CU::UNWIND_ARM64_MODE_DWARF;
608 CurOffset = FPPush.getOffset();
609
610 unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
611 unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
612
613 LRReg = getXRegFromWReg(LRReg);
614 FPReg = getXRegFromWReg(FPReg);
615
616 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
617 return CU::UNWIND_ARM64_MODE_DWARF;
618
619 // Indicate that the function has a frame.
620 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
621 HasFP = true;
622 break;
623 }
624 case MCCFIInstruction::OpDefCfaOffset: {
625 if (StackSize != 0)
626 return CU::UNWIND_ARM64_MODE_DWARF;
627 StackSize = std::abs(Inst.getOffset());
628 break;
629 }
630 case MCCFIInstruction::OpOffset: {
631 // Registers are saved in pairs. We expect there to be two consecutive
632 // `.cfi_offset' instructions with the appropriate registers specified.
633 unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
634 if (i + 1 == e)
635 return CU::UNWIND_ARM64_MODE_DWARF;
636
637 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
638 return CU::UNWIND_ARM64_MODE_DWARF;
639 CurOffset = Inst.getOffset();
640
641 const MCCFIInstruction &Inst2 = Instrs[++i];
642 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
643 return CU::UNWIND_ARM64_MODE_DWARF;
644 unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
645
646 if (Inst2.getOffset() != CurOffset - 8)
647 return CU::UNWIND_ARM64_MODE_DWARF;
648 CurOffset = Inst2.getOffset();
649
650 // N.B. The encodings must be in register number order, and the X
651 // registers before the D registers.
652
653 // X19/X20 pair = 0x00000001,
654 // X21/X22 pair = 0x00000002,
655 // X23/X24 pair = 0x00000004,
656 // X25/X26 pair = 0x00000008,
657 // X27/X28 pair = 0x00000010
658 Reg1 = getXRegFromWReg(Reg1);
659 Reg2 = getXRegFromWReg(Reg2);
660
661 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
662 (CompactUnwindEncoding & 0xF1E) == 0)
663 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
664 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
665 (CompactUnwindEncoding & 0xF1C) == 0)
666 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
667 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
668 (CompactUnwindEncoding & 0xF18) == 0)
669 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
670 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
671 (CompactUnwindEncoding & 0xF10) == 0)
672 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
673 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
674 (CompactUnwindEncoding & 0xF00) == 0)
675 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
676 else {
677 Reg1 = getDRegFromBReg(Reg1);
678 Reg2 = getDRegFromBReg(Reg2);
679
680 // D8/D9 pair = 0x00000100,
681 // D10/D11 pair = 0x00000200,
682 // D12/D13 pair = 0x00000400,
683 // D14/D15 pair = 0x00000800
684 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
685 (CompactUnwindEncoding & 0xE00) == 0)
686 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
687 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
688 (CompactUnwindEncoding & 0xC00) == 0)
689 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
690 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
691 (CompactUnwindEncoding & 0x800) == 0)
692 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
693 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
694 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
695 else
696 // A pair was pushed which we cannot handle.
697 return CU::UNWIND_ARM64_MODE_DWARF;
698 }
699
700 break;
701 }
702 }
703 }
704
705 if (!HasFP) {
706 // With compact unwind info we can only represent stack adjustments of up
707 // to 65520 bytes.
708 if (StackSize > 65520)
709 return CU::UNWIND_ARM64_MODE_DWARF;
710
711 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
712 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
713 }
714
715 return CompactUnwindEncoding;
716 }
717 };
718
719 } // end anonymous namespace
720
721 namespace {
722
723 class ELFAArch64AsmBackend : public AArch64AsmBackend {
724 public:
725 uint8_t OSABI;
726 bool IsILP32;
727
ELFAArch64AsmBackend(const Target & T,const Triple & TT,uint8_t OSABI,bool IsLittleEndian,bool IsILP32)728 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
729 bool IsLittleEndian, bool IsILP32)
730 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
731 IsILP32(IsILP32) {}
732
733 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const734 createObjectTargetWriter() const override {
735 return createAArch64ELFObjectWriter(OSABI, IsILP32);
736 }
737 };
738
739 }
740
741 namespace {
742 class COFFAArch64AsmBackend : public AArch64AsmBackend {
743 public:
COFFAArch64AsmBackend(const Target & T,const Triple & TheTriple)744 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
745 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
746
747 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const748 createObjectTargetWriter() const override {
749 return createAArch64WinCOFFObjectWriter(TheTriple);
750 }
751 };
752 }
753
createAArch64leAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)754 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
755 const MCSubtargetInfo &STI,
756 const MCRegisterInfo &MRI,
757 const MCTargetOptions &Options) {
758 const Triple &TheTriple = STI.getTargetTriple();
759 if (TheTriple.isOSBinFormatMachO()) {
760 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
761 }
762
763 if (TheTriple.isOSBinFormatCOFF())
764 return new COFFAArch64AsmBackend(T, TheTriple);
765
766 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
767
768 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
769 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
770 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
771 IsILP32);
772 }
773
createAArch64beAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)774 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
775 const MCSubtargetInfo &STI,
776 const MCRegisterInfo &MRI,
777 const MCTargetOptions &Options) {
778 const Triple &TheTriple = STI.getTargetTriple();
779 assert(TheTriple.isOSBinFormatELF() &&
780 "Big endian is only supported for ELF targets!");
781 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
782 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
783 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
784 IsILP32);
785 }
786