• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 #include "MCTargetDesc/AMDGPUFixupKinds.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCValue.h"
21 #include "llvm/Support/TargetRegistry.h"
22 
23 using namespace llvm;
24 
25 namespace {
26 
27 class AMDGPUAsmBackend : public MCAsmBackend {
28 public:
AMDGPUAsmBackend(const Target & T)29   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
30 
getNumFixupKinds() const31   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
32 
33   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
34                   const MCValue &Target, MutableArrayRef<char> Data,
35                   uint64_t Value, bool IsResolved,
36                   const MCSubtargetInfo *STI) const override;
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const37   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
38                             const MCRelaxableFragment *DF,
39                             const MCAsmLayout &Layout) const override {
40     return false;
41   }
relaxInstruction(const MCInst & Inst,const MCSubtargetInfo & STI,MCInst & Res) const42   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
43                         MCInst &Res) const override {
44     llvm_unreachable("Not implemented");
45   }
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const46   bool mayNeedRelaxation(const MCInst &Inst,
47                          const MCSubtargetInfo &STI) const override {
48     return false;
49   }
50 
51   unsigned getMinimumNopSize() const override;
52   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
53 
54   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
55 };
56 
57 } //End anonymous namespace
58 
getFixupKindNumBytes(unsigned Kind)59 static unsigned getFixupKindNumBytes(unsigned Kind) {
60   switch (Kind) {
61   case AMDGPU::fixup_si_sopp_br:
62     return 2;
63   case FK_SecRel_1:
64   case FK_Data_1:
65     return 1;
66   case FK_SecRel_2:
67   case FK_Data_2:
68     return 2;
69   case FK_SecRel_4:
70   case FK_Data_4:
71   case FK_PCRel_4:
72     return 4;
73   case FK_SecRel_8:
74   case FK_Data_8:
75     return 8;
76   default:
77     llvm_unreachable("Unknown fixup kind!");
78   }
79 }
80 
adjustFixupValue(const MCFixup & Fixup,uint64_t Value,MCContext * Ctx)81 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
82                                  MCContext *Ctx) {
83   int64_t SignedValue = static_cast<int64_t>(Value);
84 
85   switch (static_cast<unsigned>(Fixup.getKind())) {
86   case AMDGPU::fixup_si_sopp_br: {
87     int64_t BrImm = (SignedValue - 4) / 4;
88 
89     if (Ctx && !isInt<16>(BrImm))
90       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
91 
92     return BrImm;
93   }
94   case FK_Data_1:
95   case FK_Data_2:
96   case FK_Data_4:
97   case FK_Data_8:
98   case FK_PCRel_4:
99   case FK_SecRel_4:
100     return Value;
101   default:
102     llvm_unreachable("unhandled fixup kind");
103   }
104 }
105 
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const106 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
107                                   const MCValue &Target,
108                                   MutableArrayRef<char> Data, uint64_t Value,
109                                   bool IsResolved,
110                                   const MCSubtargetInfo *STI) const {
111   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
112   if (!Value)
113     return; // Doesn't change encoding.
114 
115   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
116 
117   // Shift the value into position.
118   Value <<= Info.TargetOffset;
119 
120   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
121   uint32_t Offset = Fixup.getOffset();
122   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
123 
124   // For each byte of the fragment that the fixup touches, mask in the bits from
125   // the fixup value.
126   for (unsigned i = 0; i != NumBytes; ++i)
127     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
128 }
129 
getFixupKindInfo(MCFixupKind Kind) const130 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
131                                                        MCFixupKind Kind) const {
132   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
133     // name                   offset bits  flags
134     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
135   };
136 
137   if (Kind < FirstTargetFixupKind)
138     return MCAsmBackend::getFixupKindInfo(Kind);
139 
140   return Infos[Kind - FirstTargetFixupKind];
141 }
142 
getMinimumNopSize() const143 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
144   return 4;
145 }
146 
writeNopData(raw_ostream & OS,uint64_t Count) const147 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
148   // If the count is not 4-byte aligned, we must be writing data into the text
149   // section (otherwise we have unaligned instructions, and thus have far
150   // bigger problems), so just write zeros instead.
151   OS.write_zeros(Count % 4);
152 
153   // We are properly aligned, so write NOPs as requested.
154   Count /= 4;
155 
156   // FIXME: R600 support.
157   // s_nop 0
158   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
159 
160   for (uint64_t I = 0; I != Count; ++I)
161     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
162 
163   return true;
164 }
165 
166 //===----------------------------------------------------------------------===//
167 // ELFAMDGPUAsmBackend class
168 //===----------------------------------------------------------------------===//
169 
170 namespace {
171 
172 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
173   bool Is64Bit;
174   bool HasRelocationAddend;
175   uint8_t OSABI = ELF::ELFOSABI_NONE;
176 
177 public:
ELFAMDGPUAsmBackend(const Target & T,const Triple & TT)178   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
179       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
180       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
181     switch (TT.getOS()) {
182     case Triple::AMDHSA:
183       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
184       break;
185     case Triple::AMDPAL:
186       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
187       break;
188     case Triple::Mesa3D:
189       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
190       break;
191     default:
192       break;
193     }
194   }
195 
196   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const197   createObjectTargetWriter() const override {
198     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend);
199   }
200 };
201 
202 } // end anonymous namespace
203 
createAMDGPUAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)204 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
205                                            const MCSubtargetInfo &STI,
206                                            const MCRegisterInfo &MRI,
207                                            const MCTargetOptions &Options) {
208   // Use 64-bit ELF for amdgcn
209   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());
210 }
211