• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUFixupKinds.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCFixupKindInfo.h"
17 #include "llvm/MC/MCObjectWriter.h"
18 #include "llvm/MC/MCValue.h"
19 #include "llvm/Support/TargetRegistry.h"
20 
21 using namespace llvm;
22 
23 namespace {
24 
25 class AMDGPUMCObjectWriter : public MCObjectWriter {
26 public:
AMDGPUMCObjectWriter(raw_pwrite_stream & OS)27   AMDGPUMCObjectWriter(raw_pwrite_stream &OS) : MCObjectWriter(OS, true) {}
executePostLayoutBinding(MCAssembler & Asm,const MCAsmLayout & Layout)28   void executePostLayoutBinding(MCAssembler &Asm,
29                                 const MCAsmLayout &Layout) override {
30     //XXX: Implement if necessary.
31   }
recordRelocation(MCAssembler & Asm,const MCAsmLayout & Layout,const MCFragment * Fragment,const MCFixup & Fixup,MCValue Target,bool & IsPCRel,uint64_t & FixedValue)32   void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
33                         const MCFragment *Fragment, const MCFixup &Fixup,
34                         MCValue Target, bool &IsPCRel,
35                         uint64_t &FixedValue) override {
36     assert(!"Not implemented");
37   }
38 
39   void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
40 
41 };
42 
43 class AMDGPUAsmBackend : public MCAsmBackend {
44 public:
AMDGPUAsmBackend(const Target & T)45   AMDGPUAsmBackend(const Target &T)
46     : MCAsmBackend() {}
47 
getNumFixupKinds() const48   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
49   void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
50                   uint64_t Value, bool IsPCRel) const override;
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const51   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
52                             const MCRelaxableFragment *DF,
53                             const MCAsmLayout &Layout) const override {
54     return false;
55   }
relaxInstruction(const MCInst & Inst,const MCSubtargetInfo & STI,MCInst & Res) const56   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
57                         MCInst &Res) const override {
58     assert(!"Not implemented");
59   }
mayNeedRelaxation(const MCInst & Inst) const60   bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
61   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
62 
63   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
64 };
65 
66 } //End anonymous namespace
67 
writeObject(MCAssembler & Asm,const MCAsmLayout & Layout)68 void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm,
69                                        const MCAsmLayout &Layout) {
70   for (MCAssembler::iterator I = Asm.begin(), E = Asm.end(); I != E; ++I) {
71     Asm.writeSectionData(&*I, Layout);
72   }
73 }
74 
getFixupKindNumBytes(unsigned Kind)75 static unsigned getFixupKindNumBytes(unsigned Kind) {
76   switch (Kind) {
77   case FK_SecRel_1:
78   case FK_Data_1:
79     return 1;
80   case FK_SecRel_2:
81   case FK_Data_2:
82     return 2;
83   case FK_SecRel_4:
84   case FK_Data_4:
85   case FK_PCRel_4:
86     return 4;
87   case FK_SecRel_8:
88   case FK_Data_8:
89     return 8;
90   default:
91     llvm_unreachable("Unknown fixup kind!");
92   }
93 }
94 
applyFixup(const MCFixup & Fixup,char * Data,unsigned DataSize,uint64_t Value,bool IsPCRel) const95 void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
96                                   unsigned DataSize, uint64_t Value,
97                                   bool IsPCRel) const {
98 
99   switch ((unsigned)Fixup.getKind()) {
100     case AMDGPU::fixup_si_sopp_br: {
101       int64_t BrImm = ((int64_t)Value - 4) / 4;
102       if (!isInt<16>(BrImm))
103         report_fatal_error("branch size exceeds simm16");
104 
105       uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
106       *Dst = BrImm;
107       break;
108     }
109 
110     default: {
111       // FIXME: Copied from AArch64
112       unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
113       if (!Value)
114         return; // Doesn't change encoding.
115       MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
116 
117       // Shift the value into position.
118       Value <<= Info.TargetOffset;
119 
120       unsigned Offset = Fixup.getOffset();
121       assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
122 
123       // For each byte of the fragment that the fixup touches, mask in the
124       // bits from the fixup value.
125       for (unsigned i = 0; i != NumBytes; ++i)
126         Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
127     }
128   }
129 }
130 
getFixupKindInfo(MCFixupKind Kind) const131 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
132                                                        MCFixupKind Kind) const {
133   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
134     // name                   offset bits  flags
135     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
136   };
137 
138   if (Kind < FirstTargetFixupKind)
139     return MCAsmBackend::getFixupKindInfo(Kind);
140 
141   return Infos[Kind - FirstTargetFixupKind];
142 }
143 
writeNopData(uint64_t Count,MCObjectWriter * OW) const144 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
145   OW->WriteZeros(Count);
146 
147   return true;
148 }
149 
150 //===----------------------------------------------------------------------===//
151 // ELFAMDGPUAsmBackend class
152 //===----------------------------------------------------------------------===//
153 
154 namespace {
155 
156 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
157   bool Is64Bit;
158   bool HasRelocationAddend;
159 
160 public:
ELFAMDGPUAsmBackend(const Target & T,const Triple & TT)161   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
162       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
163       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
164 
createObjectWriter(raw_pwrite_stream & OS) const165   MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
166     return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
167   }
168 };
169 
170 } // end anonymous namespace
171 
createAMDGPUAsmBackend(const Target & T,const MCRegisterInfo & MRI,const Triple & TT,StringRef CPU)172 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
173                                            const MCRegisterInfo &MRI,
174                                            const Triple &TT, StringRef CPU) {
175   // Use 64-bit ELF for amdgcn
176   return new ELFAMDGPUAsmBackend(T, TT);
177 }
178