1 //===- Target.h -------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef LLD_ELF_TARGET_H
10 #define LLD_ELF_TARGET_H
11
12 #include "InputSection.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "llvm/Object/ELF.h"
15 #include "llvm/Support/MathExtras.h"
16 #include <array>
17
18 namespace lld {
19 std::string toString(elf::RelType type);
20
21 namespace elf {
22 class Defined;
23 class InputFile;
24 class Symbol;
25
26 class TargetInfo {
27 public:
calcEFlags()28 virtual uint32_t calcEFlags() const { return 0; }
29 virtual RelExpr getRelExpr(RelType type, const Symbol &s,
30 const uint8_t *loc) const = 0;
getDynRel(RelType type)31 virtual RelType getDynRel(RelType type) const { return 0; }
writeGotPltHeader(uint8_t * buf)32 virtual void writeGotPltHeader(uint8_t *buf) const {}
writeGotHeader(uint8_t * buf)33 virtual void writeGotHeader(uint8_t *buf) const {}
writeGotPlt(uint8_t * buf,const Symbol & s)34 virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {};
writeIgotPlt(uint8_t * buf,const Symbol & s)35 virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
36 virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
getTlsGdRelaxSkip(RelType type)37 virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
38
39 // If lazy binding is supported, the first entry of the PLT has code
40 // to call the dynamic linker to resolve PLT entries the first time
41 // they are called. This function writes that code.
writePltHeader(uint8_t * buf)42 virtual void writePltHeader(uint8_t *buf) const {}
43
writePlt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr)44 virtual void writePlt(uint8_t *buf, const Symbol &sym,
45 uint64_t pltEntryAddr) const {}
writeIplt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr)46 virtual void writeIplt(uint8_t *buf, const Symbol &sym,
47 uint64_t pltEntryAddr) const {
48 // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
49 writePlt(buf, sym, pltEntryAddr);
50 }
writeIBTPlt(uint8_t * buf,size_t numEntries)51 virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
addPltHeaderSymbols(InputSection & isec)52 virtual void addPltHeaderSymbols(InputSection &isec) const {}
addPltSymbols(InputSection & isec,uint64_t off)53 virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
54
55 // Returns true if a relocation only uses the low bits of a value such that
56 // all those bits are in the same page. For example, if the relocation
57 // only uses the low 12 bits in a system with 4k pages. If this is true, the
58 // bits will always have the same value at runtime and we don't have to emit
59 // a dynamic relocation.
60 virtual bool usesOnlyLowPageBits(RelType type) const;
61
62 // Decide whether a Thunk is needed for the relocation from File
63 // targeting S.
64 virtual bool needsThunk(RelExpr expr, RelType relocType,
65 const InputFile *file, uint64_t branchAddr,
66 const Symbol &s, int64_t a) const;
67
68 // On systems with range extensions we place collections of Thunks at
69 // regular spacings that enable the majority of branches reach the Thunks.
70 // a value of 0 means range extension thunks are not supported.
getThunkSectionSpacing()71 virtual uint32_t getThunkSectionSpacing() const { return 0; }
72
73 // The function with a prologue starting at Loc was compiled with
74 // -fsplit-stack and it calls a function compiled without. Adjust the prologue
75 // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
76 // The symbols st_other flags are needed on PowerPC64 for determining the
77 // offset to the split-stack prologue.
78 virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
79 uint8_t stOther) const;
80
81 // Return true if we can reach dst from src with RelType type.
82 virtual bool inBranchRange(RelType type, uint64_t src,
83 uint64_t dst) const;
84
85 virtual void relocate(uint8_t *loc, const Relocation &rel,
86 uint64_t val) const = 0;
relocateNoSym(uint8_t * loc,RelType type,uint64_t val)87 void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
88 relocate(loc, Relocation{R_NONE, type, 0, 0, nullptr}, val);
89 }
90
applyJumpInstrMod(uint8_t * loc,JumpModType type,JumpModType val)91 virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
92 JumpModType val) const {}
93
94 virtual ~TargetInfo();
95
96 // This deletes a jump insn at the end of the section if it is a fall thru to
97 // the next section. Further, if there is a conditional jump and a direct
98 // jump consecutively, it tries to flip the conditional jump to convert the
99 // direct jump into a fall thru and delete it. Returns true if a jump
100 // instruction can be deleted.
deleteFallThruJmpInsn(InputSection & is,InputFile * file,InputSection * nextIS)101 virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
102 InputSection *nextIS) const {
103 return false;
104 }
105
106 unsigned defaultCommonPageSize = 4096;
107 unsigned defaultMaxPageSize = 4096;
108
109 uint64_t getImageBase() const;
110
111 // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
112 bool gotBaseSymInGotPlt = true;
113
114 RelType copyRel;
115 RelType gotRel;
116 RelType noneRel;
117 RelType pltRel;
118 RelType relativeRel;
119 RelType iRelativeRel;
120 RelType symbolicRel;
121 RelType tlsDescRel;
122 RelType tlsGotRel;
123 RelType tlsModuleIndexRel;
124 RelType tlsOffsetRel;
125 unsigned pltEntrySize;
126 unsigned pltHeaderSize;
127 unsigned ipltEntrySize;
128
129 // At least on x86_64 positions 1 and 2 are used by the first plt entry
130 // to support lazy loading.
131 unsigned gotPltHeaderEntriesNum = 3;
132
133 // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
134 unsigned gotHeaderEntriesNum = 0;
135
136 bool needsThunks = false;
137
138 // A 4-byte field corresponding to one or more trap instructions, used to pad
139 // executable OutputSections.
140 std::array<uint8_t, 4> trapInstr;
141
142 // Stores the NOP instructions of different sizes for the target and is used
143 // to pad sections that are relaxed.
144 llvm::Optional<std::vector<std::vector<uint8_t>>> nopInstrs;
145
146 // If a target needs to rewrite calls to __morestack to instead call
147 // __morestack_non_split when a split-stack enabled caller calls a
148 // non-split-stack callee this will return true. Otherwise returns false.
149 bool needsMoreStackNonSplit = true;
150
151 virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
152 virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
153 const uint8_t *loc) const;
154 virtual void relaxGot(uint8_t *loc, const Relocation &rel,
155 uint64_t val) const;
156 virtual void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
157 uint64_t val) const;
158 virtual void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
159 uint64_t val) const;
160 virtual void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
161 uint64_t val) const;
162 virtual void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
163 uint64_t val) const;
164
165 protected:
166 // On FreeBSD x86_64 the first page cannot be mmaped.
167 // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
168 // installs this is set to 65536, so the first 15 pages cannot be used.
169 // Given that, the smallest value that can be used in here is 0x10000.
170 uint64_t defaultImageBase = 0x10000;
171 };
172
173 TargetInfo *getAArch64TargetInfo();
174 TargetInfo *getAMDGPUTargetInfo();
175 TargetInfo *getARMTargetInfo();
176 TargetInfo *getAVRTargetInfo();
177 TargetInfo *getHexagonTargetInfo();
178 TargetInfo *getMSP430TargetInfo();
179 TargetInfo *getPPC64TargetInfo();
180 TargetInfo *getPPCTargetInfo();
181 TargetInfo *getRISCVTargetInfo();
182 TargetInfo *getSPARCV9TargetInfo();
183 TargetInfo *getX86TargetInfo();
184 TargetInfo *getX86_64TargetInfo();
185 template <class ELFT> TargetInfo *getMipsTargetInfo();
186
187 struct ErrorPlace {
188 InputSectionBase *isec;
189 std::string loc;
190 };
191
192 // Returns input section and corresponding source string for the given location.
193 ErrorPlace getErrorPlace(const uint8_t *loc);
194
getErrorLocation(const uint8_t * loc)195 static inline std::string getErrorLocation(const uint8_t *loc) {
196 return getErrorPlace(loc).loc;
197 }
198
199 void writePPC32GlinkSection(uint8_t *buf, size_t numEntries);
200
201 bool tryRelaxPPC64TocIndirection(const Relocation &rel, uint8_t *bufLoc);
202 unsigned getPPCDFormOp(unsigned secondaryOp);
203
204 // In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
205 // is a global entry point (GEP) which typically is used to initialize the TOC
206 // pointer in general purpose register 2. The second is a local entry
207 // point (LEP) which bypasses the TOC pointer initialization code. The
208 // offset between GEP and LEP is encoded in a function's st_other flags.
209 // This function will return the offset (in bytes) from the global entry-point
210 // to the local entry-point.
211 unsigned getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther);
212
213 // Returns true if a relocation is a small code model relocation that accesses
214 // the .toc section.
215 bool isPPC64SmallCodeModelTocReloc(RelType type);
216
217 // Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
218 // instruction (regardless of endianness). Therefore, the prefix is always in
219 // lower memory than the instruction.
220 void writePrefixedInstruction(uint8_t *loc, uint64_t insn);
221
222 void addPPC64SaveRestore();
223 uint64_t getPPC64TocBase();
224 uint64_t getAArch64Page(uint64_t expr);
225
226 extern const TargetInfo *target;
227 TargetInfo *getTarget();
228
229 template <class ELFT> bool isMipsPIC(const Defined *sym);
230
231 void reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
232 int64_t min, uint64_t max);
233 void reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
234 const Twine &msg);
235
236 // Make sure that V can be represented as an N bit signed integer.
checkInt(uint8_t * loc,int64_t v,int n,const Relocation & rel)237 inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
238 if (v != llvm::SignExtend64(v, n))
239 reportRangeError(loc, rel, Twine(v), llvm::minIntN(n), llvm::maxIntN(n));
240 }
241
242 // Make sure that V can be represented as an N bit unsigned integer.
checkUInt(uint8_t * loc,uint64_t v,int n,const Relocation & rel)243 inline void checkUInt(uint8_t *loc, uint64_t v, int n, const Relocation &rel) {
244 if ((v >> n) != 0)
245 reportRangeError(loc, rel, Twine(v), 0, llvm::maxUIntN(n));
246 }
247
248 // Make sure that V can be represented as an N bit signed or unsigned integer.
checkIntUInt(uint8_t * loc,uint64_t v,int n,const Relocation & rel)249 inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
250 const Relocation &rel) {
251 // For the error message we should cast V to a signed integer so that error
252 // messages show a small negative value rather than an extremely large one
253 if (v != (uint64_t)llvm::SignExtend64(v, n) && (v >> n) != 0)
254 reportRangeError(loc, rel, Twine((int64_t)v), llvm::minIntN(n),
255 llvm::maxUIntN(n));
256 }
257
checkAlignment(uint8_t * loc,uint64_t v,int n,const Relocation & rel)258 inline void checkAlignment(uint8_t *loc, uint64_t v, int n,
259 const Relocation &rel) {
260 if ((v & (n - 1)) != 0)
261 error(getErrorLocation(loc) + "improper alignment for relocation " +
262 lld::toString(rel.type) + ": 0x" + llvm::utohexstr(v) +
263 " is not aligned to " + Twine(n) + " bytes");
264 }
265
266 // Endianness-aware read/write.
read16(const void * p)267 inline uint16_t read16(const void *p) {
268 return llvm::support::endian::read16(p, config->endianness);
269 }
270
read32(const void * p)271 inline uint32_t read32(const void *p) {
272 return llvm::support::endian::read32(p, config->endianness);
273 }
274
read64(const void * p)275 inline uint64_t read64(const void *p) {
276 return llvm::support::endian::read64(p, config->endianness);
277 }
278
write16(void * p,uint16_t v)279 inline void write16(void *p, uint16_t v) {
280 llvm::support::endian::write16(p, v, config->endianness);
281 }
282
write32(void * p,uint32_t v)283 inline void write32(void *p, uint32_t v) {
284 llvm::support::endian::write32(p, v, config->endianness);
285 }
286
write64(void * p,uint64_t v)287 inline void write64(void *p, uint64_t v) {
288 llvm::support::endian::write64(p, v, config->endianness);
289 }
290 } // namespace elf
291 } // namespace lld
292
293 #endif
294