1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Implementation of ELF support for the MC-JIT runtime dynamic linker.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "RuntimeDyldELF.h"
15 #include "RuntimeDyldCheckerImpl.h"
16 #include "llvm/ADT/IntervalMap.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/Object/ELFObjectFile.h"
22 #include "llvm/Object/ObjectFile.h"
23 #include "llvm/Support/ELF.h"
24 #include "llvm/Support/Endian.h"
25 #include "llvm/Support/MemoryBuffer.h"
26 #include "llvm/Support/TargetRegistry.h"
27
28 using namespace llvm;
29 using namespace llvm::object;
30
31 #define DEBUG_TYPE "dyld"
32
33 namespace {
34
35 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
36 LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
37
38 typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
39 typedef Elf_Sym_Impl<ELFT> Elf_Sym;
40 typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
41 typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
42
43 typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
44
45 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
46
47 public:
48 DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec);
49
50 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
51
52 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
53
54 // Methods for type inquiry through isa, cast and dyn_cast
classof(const Binary * v)55 static inline bool classof(const Binary *v) {
56 return (isa<ELFObjectFile<ELFT>>(v) &&
57 classof(cast<ELFObjectFile<ELFT>>(v)));
58 }
classof(const ELFObjectFile<ELFT> * v)59 static inline bool classof(const ELFObjectFile<ELFT> *v) {
60 return v->isDyldType();
61 }
62 };
63
64
65
66 // The MemoryBuffer passed into this constructor is just a wrapper around the
67 // actual memory. Ultimately, the Binary parent class will take ownership of
68 // this MemoryBuffer object but not the underlying memory.
69 template <class ELFT>
DyldELFObject(MemoryBufferRef Wrapper,std::error_code & EC)70 DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC)
71 : ELFObjectFile<ELFT>(Wrapper, EC) {
72 this->isDyldELFObject = true;
73 }
74
75 template <class ELFT>
updateSectionAddress(const SectionRef & Sec,uint64_t Addr)76 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
77 uint64_t Addr) {
78 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
79 Elf_Shdr *shdr =
80 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
81
82 // This assumes the address passed in matches the target address bitness
83 // The template-based type cast handles everything else.
84 shdr->sh_addr = static_cast<addr_type>(Addr);
85 }
86
87 template <class ELFT>
updateSymbolAddress(const SymbolRef & SymRef,uint64_t Addr)88 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
89 uint64_t Addr) {
90
91 Elf_Sym *sym = const_cast<Elf_Sym *>(
92 ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
93
94 // This assumes the address passed in matches the target address bitness
95 // The template-based type cast handles everything else.
96 sym->st_value = static_cast<addr_type>(Addr);
97 }
98
99 class LoadedELFObjectInfo final
100 : public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> {
101 public:
LoadedELFObjectInfo(RuntimeDyldImpl & RTDyld,ObjSectionToIDMap ObjSecToIDMap)102 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
103 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
104
105 OwningBinary<ObjectFile>
106 getObjectForDebug(const ObjectFile &Obj) const override;
107 };
108
109 template <typename ELFT>
110 std::unique_ptr<DyldELFObject<ELFT>>
createRTDyldELFObject(MemoryBufferRef Buffer,const ObjectFile & SourceObject,const LoadedELFObjectInfo & L,std::error_code & ec)111 createRTDyldELFObject(MemoryBufferRef Buffer,
112 const ObjectFile &SourceObject,
113 const LoadedELFObjectInfo &L,
114 std::error_code &ec) {
115 typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr;
116 typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
117
118 std::unique_ptr<DyldELFObject<ELFT>> Obj =
119 llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec);
120
121 // Iterate over all sections in the object.
122 auto SI = SourceObject.section_begin();
123 for (const auto &Sec : Obj->sections()) {
124 StringRef SectionName;
125 Sec.getName(SectionName);
126 if (SectionName != "") {
127 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
128 Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
129 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
130
131 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
132 // This assumes that the address passed in matches the target address
133 // bitness. The template-based type cast handles everything else.
134 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
135 }
136 }
137 ++SI;
138 }
139
140 return Obj;
141 }
142
createELFDebugObject(const ObjectFile & Obj,const LoadedELFObjectInfo & L)143 OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj,
144 const LoadedELFObjectInfo &L) {
145 assert(Obj.isELF() && "Not an ELF object file.");
146
147 std::unique_ptr<MemoryBuffer> Buffer =
148 MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
149
150 std::error_code ec;
151
152 std::unique_ptr<ObjectFile> DebugObj;
153 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) {
154 typedef ELFType<support::little, false> ELF32LE;
155 DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L,
156 ec);
157 } else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) {
158 typedef ELFType<support::big, false> ELF32BE;
159 DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L,
160 ec);
161 } else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) {
162 typedef ELFType<support::big, true> ELF64BE;
163 DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L,
164 ec);
165 } else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) {
166 typedef ELFType<support::little, true> ELF64LE;
167 DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L,
168 ec);
169 } else
170 llvm_unreachable("Unexpected ELF format");
171
172 assert(!ec && "Could not construct copy ELF object file");
173
174 return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer));
175 }
176
177 OwningBinary<ObjectFile>
getObjectForDebug(const ObjectFile & Obj) const178 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
179 return createELFDebugObject(Obj, *this);
180 }
181
182 } // anonymous namespace
183
184 namespace llvm {
185
RuntimeDyldELF(RuntimeDyld::MemoryManager & MemMgr,RuntimeDyld::SymbolResolver & Resolver)186 RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
187 RuntimeDyld::SymbolResolver &Resolver)
188 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
~RuntimeDyldELF()189 RuntimeDyldELF::~RuntimeDyldELF() {}
190
registerEHFrames()191 void RuntimeDyldELF::registerEHFrames() {
192 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
193 SID EHFrameSID = UnregisteredEHFrameSections[i];
194 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
195 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
196 size_t EHFrameSize = Sections[EHFrameSID].getSize();
197 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
198 RegisteredEHFrameSections.push_back(EHFrameSID);
199 }
200 UnregisteredEHFrameSections.clear();
201 }
202
deregisterEHFrames()203 void RuntimeDyldELF::deregisterEHFrames() {
204 for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) {
205 SID EHFrameSID = RegisteredEHFrameSections[i];
206 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
207 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
208 size_t EHFrameSize = Sections[EHFrameSID].getSize();
209 MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
210 }
211 RegisteredEHFrameSections.clear();
212 }
213
214 std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
loadObject(const object::ObjectFile & O)215 RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
216 if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
217 return llvm::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
218 else {
219 HasError = true;
220 raw_string_ostream ErrStream(ErrorStr);
221 logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream, "");
222 return nullptr;
223 }
224 }
225
resolveX86_64Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend,uint64_t SymOffset)226 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
227 uint64_t Offset, uint64_t Value,
228 uint32_t Type, int64_t Addend,
229 uint64_t SymOffset) {
230 switch (Type) {
231 default:
232 llvm_unreachable("Relocation type not implemented yet!");
233 break;
234 case ELF::R_X86_64_64: {
235 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
236 Value + Addend;
237 DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
238 << format("%p\n", Section.getAddressWithOffset(Offset)));
239 break;
240 }
241 case ELF::R_X86_64_32:
242 case ELF::R_X86_64_32S: {
243 Value += Addend;
244 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
245 (Type == ELF::R_X86_64_32S &&
246 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
247 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
248 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
249 TruncatedAddr;
250 DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
251 << format("%p\n", Section.getAddressWithOffset(Offset)));
252 break;
253 }
254 case ELF::R_X86_64_PC8: {
255 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
256 int64_t RealOffset = Value + Addend - FinalAddress;
257 assert(isInt<8>(RealOffset));
258 int8_t TruncOffset = (RealOffset & 0xFF);
259 Section.getAddress()[Offset] = TruncOffset;
260 break;
261 }
262 case ELF::R_X86_64_PC32: {
263 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
264 int64_t RealOffset = Value + Addend - FinalAddress;
265 assert(isInt<32>(RealOffset));
266 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
267 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
268 TruncOffset;
269 break;
270 }
271 case ELF::R_X86_64_PC64: {
272 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
273 int64_t RealOffset = Value + Addend - FinalAddress;
274 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
275 RealOffset;
276 break;
277 }
278 }
279 }
280
resolveX86Relocation(const SectionEntry & Section,uint64_t Offset,uint32_t Value,uint32_t Type,int32_t Addend)281 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
282 uint64_t Offset, uint32_t Value,
283 uint32_t Type, int32_t Addend) {
284 switch (Type) {
285 case ELF::R_386_32: {
286 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
287 Value + Addend;
288 break;
289 }
290 case ELF::R_386_PC32: {
291 uint32_t FinalAddress =
292 Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
293 uint32_t RealOffset = Value + Addend - FinalAddress;
294 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
295 RealOffset;
296 break;
297 }
298 default:
299 // There are other relocation types, but it appears these are the
300 // only ones currently used by the LLVM ELF object writer
301 llvm_unreachable("Relocation type not implemented yet!");
302 break;
303 }
304 }
305
resolveAArch64Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend)306 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
307 uint64_t Offset, uint64_t Value,
308 uint32_t Type, int64_t Addend) {
309 uint32_t *TargetPtr =
310 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
311 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
312
313 DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
314 << format("%llx", Section.getAddressWithOffset(Offset))
315 << " FinalAddress: 0x" << format("%llx", FinalAddress)
316 << " Value: 0x" << format("%llx", Value) << " Type: 0x"
317 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
318 << "\n");
319
320 switch (Type) {
321 default:
322 llvm_unreachable("Relocation type not implemented yet!");
323 break;
324 case ELF::R_AARCH64_ABS64: {
325 uint64_t *TargetPtr =
326 reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset));
327 *TargetPtr = Value + Addend;
328 break;
329 }
330 case ELF::R_AARCH64_PREL32: {
331 uint64_t Result = Value + Addend - FinalAddress;
332 assert(static_cast<int64_t>(Result) >= INT32_MIN &&
333 static_cast<int64_t>(Result) <= UINT32_MAX);
334 *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
335 break;
336 }
337 case ELF::R_AARCH64_CALL26: // fallthrough
338 case ELF::R_AARCH64_JUMP26: {
339 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
340 // calculation.
341 uint64_t BranchImm = Value + Addend - FinalAddress;
342
343 // "Check that -2^27 <= result < 2^27".
344 assert(isInt<28>(BranchImm));
345
346 // AArch64 code is emitted with .rela relocations. The data already in any
347 // bits affected by the relocation on entry is garbage.
348 *TargetPtr &= 0xfc000000U;
349 // Immediate goes in bits 25:0 of B and BL.
350 *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
351 break;
352 }
353 case ELF::R_AARCH64_MOVW_UABS_G3: {
354 uint64_t Result = Value + Addend;
355
356 // AArch64 code is emitted with .rela relocations. The data already in any
357 // bits affected by the relocation on entry is garbage.
358 *TargetPtr &= 0xffe0001fU;
359 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
360 *TargetPtr |= Result >> (48 - 5);
361 // Shift must be "lsl #48", in bits 22:21
362 assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
363 break;
364 }
365 case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
366 uint64_t Result = Value + Addend;
367
368 // AArch64 code is emitted with .rela relocations. The data already in any
369 // bits affected by the relocation on entry is garbage.
370 *TargetPtr &= 0xffe0001fU;
371 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
372 *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
373 // Shift must be "lsl #32", in bits 22:21
374 assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
375 break;
376 }
377 case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
378 uint64_t Result = Value + Addend;
379
380 // AArch64 code is emitted with .rela relocations. The data already in any
381 // bits affected by the relocation on entry is garbage.
382 *TargetPtr &= 0xffe0001fU;
383 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
384 *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
385 // Shift must be "lsl #16", in bits 22:2
386 assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
387 break;
388 }
389 case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
390 uint64_t Result = Value + Addend;
391
392 // AArch64 code is emitted with .rela relocations. The data already in any
393 // bits affected by the relocation on entry is garbage.
394 *TargetPtr &= 0xffe0001fU;
395 // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
396 *TargetPtr |= ((Result & 0xffffU) << 5);
397 // Shift must be "lsl #0", in bits 22:21.
398 assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
399 break;
400 }
401 case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
402 // Operation: Page(S+A) - Page(P)
403 uint64_t Result =
404 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
405
406 // Check that -2^32 <= X < 2^32
407 assert(isInt<33>(Result) && "overflow check failed for relocation");
408
409 // AArch64 code is emitted with .rela relocations. The data already in any
410 // bits affected by the relocation on entry is garbage.
411 *TargetPtr &= 0x9f00001fU;
412 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
413 // from bits 32:12 of X.
414 *TargetPtr |= ((Result & 0x3000U) << (29 - 12));
415 *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
416 break;
417 }
418 case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
419 // Operation: S + A
420 uint64_t Result = Value + Addend;
421
422 // AArch64 code is emitted with .rela relocations. The data already in any
423 // bits affected by the relocation on entry is garbage.
424 *TargetPtr &= 0xffc003ffU;
425 // Immediate goes in bits 21:10 of LD/ST instruction, taken
426 // from bits 11:2 of X
427 *TargetPtr |= ((Result & 0xffc) << (10 - 2));
428 break;
429 }
430 case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
431 // Operation: S + A
432 uint64_t Result = Value + Addend;
433
434 // AArch64 code is emitted with .rela relocations. The data already in any
435 // bits affected by the relocation on entry is garbage.
436 *TargetPtr &= 0xffc003ffU;
437 // Immediate goes in bits 21:10 of LD/ST instruction, taken
438 // from bits 11:3 of X
439 *TargetPtr |= ((Result & 0xff8) << (10 - 3));
440 break;
441 }
442 }
443 }
444
resolveARMRelocation(const SectionEntry & Section,uint64_t Offset,uint32_t Value,uint32_t Type,int32_t Addend)445 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
446 uint64_t Offset, uint32_t Value,
447 uint32_t Type, int32_t Addend) {
448 // TODO: Add Thumb relocations.
449 uint32_t *TargetPtr =
450 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
451 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
452 Value += Addend;
453
454 DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
455 << Section.getAddressWithOffset(Offset)
456 << " FinalAddress: " << format("%p", FinalAddress) << " Value: "
457 << format("%x", Value) << " Type: " << format("%x", Type)
458 << " Addend: " << format("%x", Addend) << "\n");
459
460 switch (Type) {
461 default:
462 llvm_unreachable("Not implemented relocation type!");
463
464 case ELF::R_ARM_NONE:
465 break;
466 case ELF::R_ARM_PREL31:
467 case ELF::R_ARM_TARGET1:
468 case ELF::R_ARM_ABS32:
469 *TargetPtr = Value;
470 break;
471 // Write first 16 bit of 32 bit value to the mov instruction.
472 // Last 4 bit should be shifted.
473 case ELF::R_ARM_MOVW_ABS_NC:
474 case ELF::R_ARM_MOVT_ABS:
475 if (Type == ELF::R_ARM_MOVW_ABS_NC)
476 Value = Value & 0xFFFF;
477 else if (Type == ELF::R_ARM_MOVT_ABS)
478 Value = (Value >> 16) & 0xFFFF;
479 *TargetPtr &= ~0x000F0FFF;
480 *TargetPtr |= Value & 0xFFF;
481 *TargetPtr |= ((Value >> 12) & 0xF) << 16;
482 break;
483 // Write 24 bit relative value to the branch instruction.
484 case ELF::R_ARM_PC24: // Fall through.
485 case ELF::R_ARM_CALL: // Fall through.
486 case ELF::R_ARM_JUMP24:
487 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
488 RelValue = (RelValue & 0x03FFFFFC) >> 2;
489 assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE);
490 *TargetPtr &= 0xFF000000;
491 *TargetPtr |= RelValue;
492 break;
493 }
494 }
495
resolveMIPSRelocation(const SectionEntry & Section,uint64_t Offset,uint32_t Value,uint32_t Type,int32_t Addend)496 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
497 uint64_t Offset, uint32_t Value,
498 uint32_t Type, int32_t Addend) {
499 uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
500 Value += Addend;
501
502 DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: "
503 << Section.getAddressWithOffset(Offset) << " FinalAddress: "
504 << format("%p", Section.getLoadAddressWithOffset(Offset))
505 << " Value: " << format("%x", Value)
506 << " Type: " << format("%x", Type)
507 << " Addend: " << format("%x", Addend) << "\n");
508
509 uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
510
511 switch (Type) {
512 default:
513 llvm_unreachable("Not implemented relocation type!");
514 break;
515 case ELF::R_MIPS_32:
516 writeBytesUnaligned(Value, TargetPtr, 4);
517 break;
518 case ELF::R_MIPS_26:
519 Insn &= 0xfc000000;
520 Insn |= (Value & 0x0fffffff) >> 2;
521 writeBytesUnaligned(Insn, TargetPtr, 4);
522 break;
523 case ELF::R_MIPS_HI16:
524 // Get the higher 16-bits. Also add 1 if bit 15 is 1.
525 Insn &= 0xffff0000;
526 Insn |= ((Value + 0x8000) >> 16) & 0xffff;
527 writeBytesUnaligned(Insn, TargetPtr, 4);
528 break;
529 case ELF::R_MIPS_LO16:
530 Insn &= 0xffff0000;
531 Insn |= Value & 0xffff;
532 writeBytesUnaligned(Insn, TargetPtr, 4);
533 break;
534 case ELF::R_MIPS_PC32: {
535 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
536 writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4);
537 break;
538 }
539 case ELF::R_MIPS_PC16: {
540 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
541 Insn &= 0xffff0000;
542 Insn |= ((Value - FinalAddress) >> 2) & 0xffff;
543 writeBytesUnaligned(Insn, TargetPtr, 4);
544 break;
545 }
546 case ELF::R_MIPS_PC19_S2: {
547 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
548 Insn &= 0xfff80000;
549 Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
550 writeBytesUnaligned(Insn, TargetPtr, 4);
551 break;
552 }
553 case ELF::R_MIPS_PC21_S2: {
554 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
555 Insn &= 0xffe00000;
556 Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff;
557 writeBytesUnaligned(Insn, TargetPtr, 4);
558 break;
559 }
560 case ELF::R_MIPS_PC26_S2: {
561 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
562 Insn &= 0xfc000000;
563 Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff;
564 writeBytesUnaligned(Insn, TargetPtr, 4);
565 break;
566 }
567 case ELF::R_MIPS_PCHI16: {
568 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
569 Insn &= 0xffff0000;
570 Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff;
571 writeBytesUnaligned(Insn, TargetPtr, 4);
572 break;
573 }
574 case ELF::R_MIPS_PCLO16: {
575 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
576 Insn &= 0xffff0000;
577 Insn |= (Value - FinalAddress) & 0xffff;
578 writeBytesUnaligned(Insn, TargetPtr, 4);
579 break;
580 }
581 }
582 }
583
setMipsABI(const ObjectFile & Obj)584 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
585 if (Arch == Triple::UnknownArch ||
586 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
587 IsMipsO32ABI = false;
588 IsMipsN64ABI = false;
589 return;
590 }
591 unsigned AbiVariant;
592 Obj.getPlatformFlags(AbiVariant);
593 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
594 IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips");
595 if (AbiVariant & ELF::EF_MIPS_ABI2)
596 llvm_unreachable("Mips N32 ABI is not supported yet");
597 }
598
resolveMIPS64Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend,uint64_t SymOffset,SID SectionID)599 void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section,
600 uint64_t Offset, uint64_t Value,
601 uint32_t Type, int64_t Addend,
602 uint64_t SymOffset,
603 SID SectionID) {
604 uint32_t r_type = Type & 0xff;
605 uint32_t r_type2 = (Type >> 8) & 0xff;
606 uint32_t r_type3 = (Type >> 16) & 0xff;
607
608 // RelType is used to keep information for which relocation type we are
609 // applying relocation.
610 uint32_t RelType = r_type;
611 int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
612 RelType, Addend,
613 SymOffset, SectionID);
614 if (r_type2 != ELF::R_MIPS_NONE) {
615 RelType = r_type2;
616 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
617 CalculatedValue, SymOffset,
618 SectionID);
619 }
620 if (r_type3 != ELF::R_MIPS_NONE) {
621 RelType = r_type3;
622 CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
623 CalculatedValue, SymOffset,
624 SectionID);
625 }
626 applyMIPS64Relocation(Section.getAddressWithOffset(Offset), CalculatedValue,
627 RelType);
628 }
629
630 int64_t
evaluateMIPS64Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend,uint64_t SymOffset,SID SectionID)631 RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section,
632 uint64_t Offset, uint64_t Value,
633 uint32_t Type, int64_t Addend,
634 uint64_t SymOffset, SID SectionID) {
635
636 DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
637 << format("%llx", Section.getAddressWithOffset(Offset))
638 << " FinalAddress: 0x"
639 << format("%llx", Section.getLoadAddressWithOffset(Offset))
640 << " Value: 0x" << format("%llx", Value) << " Type: 0x"
641 << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
642 << " SymOffset: " << format("%x", SymOffset) << "\n");
643
644 switch (Type) {
645 default:
646 llvm_unreachable("Not implemented relocation type!");
647 break;
648 case ELF::R_MIPS_JALR:
649 case ELF::R_MIPS_NONE:
650 break;
651 case ELF::R_MIPS_32:
652 case ELF::R_MIPS_64:
653 return Value + Addend;
654 case ELF::R_MIPS_26:
655 return ((Value + Addend) >> 2) & 0x3ffffff;
656 case ELF::R_MIPS_GPREL16: {
657 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
658 return Value + Addend - (GOTAddr + 0x7ff0);
659 }
660 case ELF::R_MIPS_SUB:
661 return Value - Addend;
662 case ELF::R_MIPS_HI16:
663 // Get the higher 16-bits. Also add 1 if bit 15 is 1.
664 return ((Value + Addend + 0x8000) >> 16) & 0xffff;
665 case ELF::R_MIPS_LO16:
666 return (Value + Addend) & 0xffff;
667 case ELF::R_MIPS_CALL16:
668 case ELF::R_MIPS_GOT_DISP:
669 case ELF::R_MIPS_GOT_PAGE: {
670 uint8_t *LocalGOTAddr =
671 getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
672 uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8);
673
674 Value += Addend;
675 if (Type == ELF::R_MIPS_GOT_PAGE)
676 Value = (Value + 0x8000) & ~0xffff;
677
678 if (GOTEntry)
679 assert(GOTEntry == Value &&
680 "GOT entry has two different addresses.");
681 else
682 writeBytesUnaligned(Value, LocalGOTAddr, 8);
683
684 return (SymOffset - 0x7ff0) & 0xffff;
685 }
686 case ELF::R_MIPS_GOT_OFST: {
687 int64_t page = (Value + Addend + 0x8000) & ~0xffff;
688 return (Value + Addend - page) & 0xffff;
689 }
690 case ELF::R_MIPS_GPREL32: {
691 uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
692 return Value + Addend - (GOTAddr + 0x7ff0);
693 }
694 case ELF::R_MIPS_PC16: {
695 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
696 return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
697 }
698 case ELF::R_MIPS_PC32: {
699 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
700 return Value + Addend - FinalAddress;
701 }
702 case ELF::R_MIPS_PC18_S3: {
703 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
704 return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff;
705 }
706 case ELF::R_MIPS_PC19_S2: {
707 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
708 return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
709 }
710 case ELF::R_MIPS_PC21_S2: {
711 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
712 return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
713 }
714 case ELF::R_MIPS_PC26_S2: {
715 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
716 return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
717 }
718 case ELF::R_MIPS_PCHI16: {
719 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
720 return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
721 }
722 case ELF::R_MIPS_PCLO16: {
723 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
724 return (Value + Addend - FinalAddress) & 0xffff;
725 }
726 }
727 return 0;
728 }
729
applyMIPS64Relocation(uint8_t * TargetPtr,int64_t CalculatedValue,uint32_t Type)730 void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr,
731 int64_t CalculatedValue,
732 uint32_t Type) {
733 uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
734
735 switch (Type) {
736 default:
737 break;
738 case ELF::R_MIPS_32:
739 case ELF::R_MIPS_GPREL32:
740 case ELF::R_MIPS_PC32:
741 writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4);
742 break;
743 case ELF::R_MIPS_64:
744 case ELF::R_MIPS_SUB:
745 writeBytesUnaligned(CalculatedValue, TargetPtr, 8);
746 break;
747 case ELF::R_MIPS_26:
748 case ELF::R_MIPS_PC26_S2:
749 Insn = (Insn & 0xfc000000) | CalculatedValue;
750 writeBytesUnaligned(Insn, TargetPtr, 4);
751 break;
752 case ELF::R_MIPS_GPREL16:
753 Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff);
754 writeBytesUnaligned(Insn, TargetPtr, 4);
755 break;
756 case ELF::R_MIPS_HI16:
757 case ELF::R_MIPS_LO16:
758 case ELF::R_MIPS_PCHI16:
759 case ELF::R_MIPS_PCLO16:
760 case ELF::R_MIPS_PC16:
761 case ELF::R_MIPS_CALL16:
762 case ELF::R_MIPS_GOT_DISP:
763 case ELF::R_MIPS_GOT_PAGE:
764 case ELF::R_MIPS_GOT_OFST:
765 Insn = (Insn & 0xffff0000) | CalculatedValue;
766 writeBytesUnaligned(Insn, TargetPtr, 4);
767 break;
768 case ELF::R_MIPS_PC18_S3:
769 Insn = (Insn & 0xfffc0000) | CalculatedValue;
770 writeBytesUnaligned(Insn, TargetPtr, 4);
771 break;
772 case ELF::R_MIPS_PC19_S2:
773 Insn = (Insn & 0xfff80000) | CalculatedValue;
774 writeBytesUnaligned(Insn, TargetPtr, 4);
775 break;
776 case ELF::R_MIPS_PC21_S2:
777 Insn = (Insn & 0xffe00000) | CalculatedValue;
778 writeBytesUnaligned(Insn, TargetPtr, 4);
779 break;
780 }
781 }
782
783 // Return the .TOC. section and offset.
findPPC64TOCSection(const ELFObjectFileBase & Obj,ObjSectionToIDMap & LocalSections,RelocationValueRef & Rel)784 Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
785 ObjSectionToIDMap &LocalSections,
786 RelocationValueRef &Rel) {
787 // Set a default SectionID in case we do not find a TOC section below.
788 // This may happen for references to TOC base base (sym@toc, .odp
789 // relocation) without a .toc directive. In this case just use the
790 // first section (which is usually the .odp) since the code won't
791 // reference the .toc base directly.
792 Rel.SymbolName = nullptr;
793 Rel.SectionID = 0;
794
795 // The TOC consists of sections .got, .toc, .tocbss, .plt in that
796 // order. The TOC starts where the first of these sections starts.
797 for (auto &Section: Obj.sections()) {
798 StringRef SectionName;
799 if (auto EC = Section.getName(SectionName))
800 return errorCodeToError(EC);
801
802 if (SectionName == ".got"
803 || SectionName == ".toc"
804 || SectionName == ".tocbss"
805 || SectionName == ".plt") {
806 if (auto SectionIDOrErr =
807 findOrEmitSection(Obj, Section, false, LocalSections))
808 Rel.SectionID = *SectionIDOrErr;
809 else
810 return SectionIDOrErr.takeError();
811 break;
812 }
813 }
814
815 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
816 // thus permitting a full 64 Kbytes segment.
817 Rel.Addend = 0x8000;
818
819 return Error::success();
820 }
821
822 // Returns the sections and offset associated with the ODP entry referenced
823 // by Symbol.
findOPDEntrySection(const ELFObjectFileBase & Obj,ObjSectionToIDMap & LocalSections,RelocationValueRef & Rel)824 Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
825 ObjSectionToIDMap &LocalSections,
826 RelocationValueRef &Rel) {
827 // Get the ELF symbol value (st_value) to compare with Relocation offset in
828 // .opd entries
829 for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
830 si != se; ++si) {
831 section_iterator RelSecI = si->getRelocatedSection();
832 if (RelSecI == Obj.section_end())
833 continue;
834
835 StringRef RelSectionName;
836 if (auto EC = RelSecI->getName(RelSectionName))
837 return errorCodeToError(EC);
838
839 if (RelSectionName != ".opd")
840 continue;
841
842 for (elf_relocation_iterator i = si->relocation_begin(),
843 e = si->relocation_end();
844 i != e;) {
845 // The R_PPC64_ADDR64 relocation indicates the first field
846 // of a .opd entry
847 uint64_t TypeFunc = i->getType();
848 if (TypeFunc != ELF::R_PPC64_ADDR64) {
849 ++i;
850 continue;
851 }
852
853 uint64_t TargetSymbolOffset = i->getOffset();
854 symbol_iterator TargetSymbol = i->getSymbol();
855 int64_t Addend;
856 if (auto AddendOrErr = i->getAddend())
857 Addend = *AddendOrErr;
858 else
859 return errorCodeToError(AddendOrErr.getError());
860
861 ++i;
862 if (i == e)
863 break;
864
865 // Just check if following relocation is a R_PPC64_TOC
866 uint64_t TypeTOC = i->getType();
867 if (TypeTOC != ELF::R_PPC64_TOC)
868 continue;
869
870 // Finally compares the Symbol value and the target symbol offset
871 // to check if this .opd entry refers to the symbol the relocation
872 // points to.
873 if (Rel.Addend != (int64_t)TargetSymbolOffset)
874 continue;
875
876 section_iterator TSI = Obj.section_end();
877 if (auto TSIOrErr = TargetSymbol->getSection())
878 TSI = *TSIOrErr;
879 else
880 return TSIOrErr.takeError();
881 assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
882
883 bool IsCode = TSI->isText();
884 if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
885 LocalSections))
886 Rel.SectionID = *SectionIDOrErr;
887 else
888 return SectionIDOrErr.takeError();
889 Rel.Addend = (intptr_t)Addend;
890 return Error::success();
891 }
892 }
893 llvm_unreachable("Attempting to get address of ODP entry!");
894 }
895
896 // Relocation masks following the #lo(value), #hi(value), #ha(value),
897 // #higher(value), #highera(value), #highest(value), and #highesta(value)
898 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
899 // document.
900
applyPPClo(uint64_t value)901 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
902
applyPPChi(uint64_t value)903 static inline uint16_t applyPPChi(uint64_t value) {
904 return (value >> 16) & 0xffff;
905 }
906
applyPPCha(uint64_t value)907 static inline uint16_t applyPPCha (uint64_t value) {
908 return ((value + 0x8000) >> 16) & 0xffff;
909 }
910
applyPPChigher(uint64_t value)911 static inline uint16_t applyPPChigher(uint64_t value) {
912 return (value >> 32) & 0xffff;
913 }
914
applyPPChighera(uint64_t value)915 static inline uint16_t applyPPChighera (uint64_t value) {
916 return ((value + 0x8000) >> 32) & 0xffff;
917 }
918
applyPPChighest(uint64_t value)919 static inline uint16_t applyPPChighest(uint64_t value) {
920 return (value >> 48) & 0xffff;
921 }
922
applyPPChighesta(uint64_t value)923 static inline uint16_t applyPPChighesta (uint64_t value) {
924 return ((value + 0x8000) >> 48) & 0xffff;
925 }
926
resolvePPC32Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend)927 void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
928 uint64_t Offset, uint64_t Value,
929 uint32_t Type, int64_t Addend) {
930 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
931 switch (Type) {
932 default:
933 llvm_unreachable("Relocation type not implemented yet!");
934 break;
935 case ELF::R_PPC_ADDR16_LO:
936 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
937 break;
938 case ELF::R_PPC_ADDR16_HI:
939 writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
940 break;
941 case ELF::R_PPC_ADDR16_HA:
942 writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
943 break;
944 }
945 }
946
resolvePPC64Relocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend)947 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
948 uint64_t Offset, uint64_t Value,
949 uint32_t Type, int64_t Addend) {
950 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
951 switch (Type) {
952 default:
953 llvm_unreachable("Relocation type not implemented yet!");
954 break;
955 case ELF::R_PPC64_ADDR16:
956 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
957 break;
958 case ELF::R_PPC64_ADDR16_DS:
959 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
960 break;
961 case ELF::R_PPC64_ADDR16_LO:
962 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
963 break;
964 case ELF::R_PPC64_ADDR16_LO_DS:
965 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
966 break;
967 case ELF::R_PPC64_ADDR16_HI:
968 writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
969 break;
970 case ELF::R_PPC64_ADDR16_HA:
971 writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
972 break;
973 case ELF::R_PPC64_ADDR16_HIGHER:
974 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
975 break;
976 case ELF::R_PPC64_ADDR16_HIGHERA:
977 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
978 break;
979 case ELF::R_PPC64_ADDR16_HIGHEST:
980 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
981 break;
982 case ELF::R_PPC64_ADDR16_HIGHESTA:
983 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
984 break;
985 case ELF::R_PPC64_ADDR14: {
986 assert(((Value + Addend) & 3) == 0);
987 // Preserve the AA/LK bits in the branch instruction
988 uint8_t aalk = *(LocalAddress + 3);
989 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
990 } break;
991 case ELF::R_PPC64_REL16_LO: {
992 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
993 uint64_t Delta = Value - FinalAddress + Addend;
994 writeInt16BE(LocalAddress, applyPPClo(Delta));
995 } break;
996 case ELF::R_PPC64_REL16_HI: {
997 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
998 uint64_t Delta = Value - FinalAddress + Addend;
999 writeInt16BE(LocalAddress, applyPPChi(Delta));
1000 } break;
1001 case ELF::R_PPC64_REL16_HA: {
1002 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
1003 uint64_t Delta = Value - FinalAddress + Addend;
1004 writeInt16BE(LocalAddress, applyPPCha(Delta));
1005 } break;
1006 case ELF::R_PPC64_ADDR32: {
1007 int32_t Result = static_cast<int32_t>(Value + Addend);
1008 if (SignExtend32<32>(Result) != Result)
1009 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
1010 writeInt32BE(LocalAddress, Result);
1011 } break;
1012 case ELF::R_PPC64_REL24: {
1013 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
1014 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
1015 if (SignExtend32<26>(delta) != delta)
1016 llvm_unreachable("Relocation R_PPC64_REL24 overflow");
1017 // Generates a 'bl <address>' instruction
1018 writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC));
1019 } break;
1020 case ELF::R_PPC64_REL32: {
1021 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
1022 int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
1023 if (SignExtend32<32>(delta) != delta)
1024 llvm_unreachable("Relocation R_PPC64_REL32 overflow");
1025 writeInt32BE(LocalAddress, delta);
1026 } break;
1027 case ELF::R_PPC64_REL64: {
1028 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
1029 uint64_t Delta = Value - FinalAddress + Addend;
1030 writeInt64BE(LocalAddress, Delta);
1031 } break;
1032 case ELF::R_PPC64_ADDR64:
1033 writeInt64BE(LocalAddress, Value + Addend);
1034 break;
1035 }
1036 }
1037
resolveSystemZRelocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend)1038 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
1039 uint64_t Offset, uint64_t Value,
1040 uint32_t Type, int64_t Addend) {
1041 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
1042 switch (Type) {
1043 default:
1044 llvm_unreachable("Relocation type not implemented yet!");
1045 break;
1046 case ELF::R_390_PC16DBL:
1047 case ELF::R_390_PLT16DBL: {
1048 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
1049 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
1050 writeInt16BE(LocalAddress, Delta / 2);
1051 break;
1052 }
1053 case ELF::R_390_PC32DBL:
1054 case ELF::R_390_PLT32DBL: {
1055 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
1056 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
1057 writeInt32BE(LocalAddress, Delta / 2);
1058 break;
1059 }
1060 case ELF::R_390_PC32: {
1061 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
1062 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
1063 writeInt32BE(LocalAddress, Delta);
1064 break;
1065 }
1066 case ELF::R_390_64:
1067 writeInt64BE(LocalAddress, Value + Addend);
1068 break;
1069 case ELF::R_390_PC64: {
1070 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
1071 writeInt64BE(LocalAddress, Delta);
1072 break;
1073 }
1074 }
1075 }
1076
1077 // The target location for the relocation is described by RE.SectionID and
1078 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
1079 // SectionEntry has three members describing its location.
1080 // SectionEntry::Address is the address at which the section has been loaded
1081 // into memory in the current (host) process. SectionEntry::LoadAddress is the
1082 // address that the section will have in the target process.
1083 // SectionEntry::ObjAddress is the address of the bits for this section in the
1084 // original emitted object image (also in the current address space).
1085 //
1086 // Relocations will be applied as if the section were loaded at
1087 // SectionEntry::LoadAddress, but they will be applied at an address based
1088 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
1089 // Target memory contents if they are required for value calculations.
1090 //
1091 // The Value parameter here is the load address of the symbol for the
1092 // relocation to be applied. For relocations which refer to symbols in the
1093 // current object Value will be the LoadAddress of the section in which
1094 // the symbol resides (RE.Addend provides additional information about the
1095 // symbol location). For external symbols, Value will be the address of the
1096 // symbol in the target address space.
resolveRelocation(const RelocationEntry & RE,uint64_t Value)1097 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
1098 uint64_t Value) {
1099 const SectionEntry &Section = Sections[RE.SectionID];
1100 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
1101 RE.SymOffset, RE.SectionID);
1102 }
1103
resolveRelocation(const SectionEntry & Section,uint64_t Offset,uint64_t Value,uint32_t Type,int64_t Addend,uint64_t SymOffset,SID SectionID)1104 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
1105 uint64_t Offset, uint64_t Value,
1106 uint32_t Type, int64_t Addend,
1107 uint64_t SymOffset, SID SectionID) {
1108 switch (Arch) {
1109 case Triple::x86_64:
1110 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
1111 break;
1112 case Triple::x86:
1113 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1114 (uint32_t)(Addend & 0xffffffffL));
1115 break;
1116 case Triple::aarch64:
1117 case Triple::aarch64_be:
1118 resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
1119 break;
1120 case Triple::arm: // Fall through.
1121 case Triple::armeb:
1122 case Triple::thumb:
1123 case Triple::thumbeb:
1124 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1125 (uint32_t)(Addend & 0xffffffffL));
1126 break;
1127 case Triple::mips: // Fall through.
1128 case Triple::mipsel:
1129 case Triple::mips64:
1130 case Triple::mips64el:
1131 if (IsMipsO32ABI)
1132 resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL),
1133 Type, (uint32_t)(Addend & 0xffffffffL));
1134 else if (IsMipsN64ABI)
1135 resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset,
1136 SectionID);
1137 else
1138 llvm_unreachable("Mips ABI not handled");
1139 break;
1140 case Triple::ppc:
1141 resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
1142 break;
1143 case Triple::ppc64: // Fall through.
1144 case Triple::ppc64le:
1145 resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
1146 break;
1147 case Triple::systemz:
1148 resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
1149 break;
1150 default:
1151 llvm_unreachable("Unsupported CPU type!");
1152 }
1153 }
1154
computePlaceholderAddress(unsigned SectionID,uint64_t Offset) const1155 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
1156 return (void *)(Sections[SectionID].getObjAddress() + Offset);
1157 }
1158
processSimpleRelocation(unsigned SectionID,uint64_t Offset,unsigned RelType,RelocationValueRef Value)1159 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
1160 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
1161 if (Value.SymbolName)
1162 addRelocationForSymbol(RE, Value.SymbolName);
1163 else
1164 addRelocationForSection(RE, Value.SectionID);
1165 }
1166
getMatchingLoRelocation(uint32_t RelType,bool IsLocal) const1167 uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
1168 bool IsLocal) const {
1169 switch (RelType) {
1170 case ELF::R_MICROMIPS_GOT16:
1171 if (IsLocal)
1172 return ELF::R_MICROMIPS_LO16;
1173 break;
1174 case ELF::R_MICROMIPS_HI16:
1175 return ELF::R_MICROMIPS_LO16;
1176 case ELF::R_MIPS_GOT16:
1177 if (IsLocal)
1178 return ELF::R_MIPS_LO16;
1179 break;
1180 case ELF::R_MIPS_HI16:
1181 return ELF::R_MIPS_LO16;
1182 case ELF::R_MIPS_PCHI16:
1183 return ELF::R_MIPS_PCLO16;
1184 default:
1185 break;
1186 }
1187 return ELF::R_MIPS_NONE;
1188 }
1189
1190 Expected<relocation_iterator>
processRelocationRef(unsigned SectionID,relocation_iterator RelI,const ObjectFile & O,ObjSectionToIDMap & ObjSectionToID,StubMap & Stubs)1191 RuntimeDyldELF::processRelocationRef(
1192 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
1193 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
1194 const auto &Obj = cast<ELFObjectFileBase>(O);
1195 uint64_t RelType = RelI->getType();
1196 ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend();
1197 int64_t Addend = AddendOrErr ? *AddendOrErr : 0;
1198 elf_symbol_iterator Symbol = RelI->getSymbol();
1199
1200 // Obtain the symbol name which is referenced in the relocation
1201 StringRef TargetName;
1202 if (Symbol != Obj.symbol_end()) {
1203 if (auto TargetNameOrErr = Symbol->getName())
1204 TargetName = *TargetNameOrErr;
1205 else
1206 return TargetNameOrErr.takeError();
1207 }
1208 DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
1209 << " TargetName: " << TargetName << "\n");
1210 RelocationValueRef Value;
1211 // First search for the symbol in the local symbol table
1212 SymbolRef::Type SymType = SymbolRef::ST_Unknown;
1213
1214 // Search for the symbol in the global symbol table
1215 RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
1216 if (Symbol != Obj.symbol_end()) {
1217 gsi = GlobalSymbolTable.find(TargetName.data());
1218 Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
1219 if (!SymTypeOrErr) {
1220 std::string Buf;
1221 raw_string_ostream OS(Buf);
1222 logAllUnhandledErrors(SymTypeOrErr.takeError(), OS, "");
1223 OS.flush();
1224 report_fatal_error(Buf);
1225 }
1226 SymType = *SymTypeOrErr;
1227 }
1228 if (gsi != GlobalSymbolTable.end()) {
1229 const auto &SymInfo = gsi->second;
1230 Value.SectionID = SymInfo.getSectionID();
1231 Value.Offset = SymInfo.getOffset();
1232 Value.Addend = SymInfo.getOffset() + Addend;
1233 } else {
1234 switch (SymType) {
1235 case SymbolRef::ST_Debug: {
1236 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
1237 // and can be changed by another developers. Maybe best way is add
1238 // a new symbol type ST_Section to SymbolRef and use it.
1239 auto SectionOrErr = Symbol->getSection();
1240 if (!SectionOrErr) {
1241 std::string Buf;
1242 raw_string_ostream OS(Buf);
1243 logAllUnhandledErrors(SectionOrErr.takeError(), OS, "");
1244 OS.flush();
1245 report_fatal_error(Buf);
1246 }
1247 section_iterator si = *SectionOrErr;
1248 if (si == Obj.section_end())
1249 llvm_unreachable("Symbol section not found, bad object file format!");
1250 DEBUG(dbgs() << "\t\tThis is section symbol\n");
1251 bool isCode = si->isText();
1252 if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
1253 ObjSectionToID))
1254 Value.SectionID = *SectionIDOrErr;
1255 else
1256 return SectionIDOrErr.takeError();
1257 Value.Addend = Addend;
1258 break;
1259 }
1260 case SymbolRef::ST_Data:
1261 case SymbolRef::ST_Unknown: {
1262 Value.SymbolName = TargetName.data();
1263 Value.Addend = Addend;
1264
1265 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
1266 // will manifest here as a NULL symbol name.
1267 // We can set this as a valid (but empty) symbol name, and rely
1268 // on addRelocationForSymbol to handle this.
1269 if (!Value.SymbolName)
1270 Value.SymbolName = "";
1271 break;
1272 }
1273 default:
1274 llvm_unreachable("Unresolved symbol type!");
1275 break;
1276 }
1277 }
1278
1279 uint64_t Offset = RelI->getOffset();
1280
1281 DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
1282 << "\n");
1283 if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) &&
1284 (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) {
1285 // This is an AArch64 branch relocation, need to use a stub function.
1286 DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
1287 SectionEntry &Section = Sections[SectionID];
1288
1289 // Look for an existing stub.
1290 StubMap::const_iterator i = Stubs.find(Value);
1291 if (i != Stubs.end()) {
1292 resolveRelocation(Section, Offset,
1293 (uint64_t)Section.getAddressWithOffset(i->second),
1294 RelType, 0);
1295 DEBUG(dbgs() << " Stub function found\n");
1296 } else {
1297 // Create a new stub function.
1298 DEBUG(dbgs() << " Create a new stub function\n");
1299 Stubs[Value] = Section.getStubOffset();
1300 uint8_t *StubTargetAddr = createStubFunction(
1301 Section.getAddressWithOffset(Section.getStubOffset()));
1302
1303 RelocationEntry REmovz_g3(SectionID,
1304 StubTargetAddr - Section.getAddress(),
1305 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
1306 RelocationEntry REmovk_g2(SectionID, StubTargetAddr -
1307 Section.getAddress() + 4,
1308 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
1309 RelocationEntry REmovk_g1(SectionID, StubTargetAddr -
1310 Section.getAddress() + 8,
1311 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
1312 RelocationEntry REmovk_g0(SectionID, StubTargetAddr -
1313 Section.getAddress() + 12,
1314 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
1315
1316 if (Value.SymbolName) {
1317 addRelocationForSymbol(REmovz_g3, Value.SymbolName);
1318 addRelocationForSymbol(REmovk_g2, Value.SymbolName);
1319 addRelocationForSymbol(REmovk_g1, Value.SymbolName);
1320 addRelocationForSymbol(REmovk_g0, Value.SymbolName);
1321 } else {
1322 addRelocationForSection(REmovz_g3, Value.SectionID);
1323 addRelocationForSection(REmovk_g2, Value.SectionID);
1324 addRelocationForSection(REmovk_g1, Value.SectionID);
1325 addRelocationForSection(REmovk_g0, Value.SectionID);
1326 }
1327 resolveRelocation(Section, Offset,
1328 reinterpret_cast<uint64_t>(Section.getAddressWithOffset(
1329 Section.getStubOffset())),
1330 RelType, 0);
1331 Section.advanceStubOffset(getMaxStubSize());
1332 }
1333 } else if (Arch == Triple::arm) {
1334 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
1335 RelType == ELF::R_ARM_JUMP24) {
1336 // This is an ARM branch relocation, need to use a stub function.
1337 DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
1338 SectionEntry &Section = Sections[SectionID];
1339
1340 // Look for an existing stub.
1341 StubMap::const_iterator i = Stubs.find(Value);
1342 if (i != Stubs.end()) {
1343 resolveRelocation(
1344 Section, Offset,
1345 reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
1346 RelType, 0);
1347 DEBUG(dbgs() << " Stub function found\n");
1348 } else {
1349 // Create a new stub function.
1350 DEBUG(dbgs() << " Create a new stub function\n");
1351 Stubs[Value] = Section.getStubOffset();
1352 uint8_t *StubTargetAddr = createStubFunction(
1353 Section.getAddressWithOffset(Section.getStubOffset()));
1354 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1355 ELF::R_ARM_ABS32, Value.Addend);
1356 if (Value.SymbolName)
1357 addRelocationForSymbol(RE, Value.SymbolName);
1358 else
1359 addRelocationForSection(RE, Value.SectionID);
1360
1361 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1362 Section.getAddressWithOffset(
1363 Section.getStubOffset())),
1364 RelType, 0);
1365 Section.advanceStubOffset(getMaxStubSize());
1366 }
1367 } else {
1368 uint32_t *Placeholder =
1369 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
1370 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
1371 RelType == ELF::R_ARM_ABS32) {
1372 Value.Addend += *Placeholder;
1373 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
1374 // See ELF for ARM documentation
1375 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
1376 }
1377 processSimpleRelocation(SectionID, Offset, RelType, Value);
1378 }
1379 } else if (IsMipsO32ABI) {
1380 uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
1381 computePlaceholderAddress(SectionID, Offset));
1382 uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
1383 if (RelType == ELF::R_MIPS_26) {
1384 // This is an Mips branch relocation, need to use a stub function.
1385 DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1386 SectionEntry &Section = Sections[SectionID];
1387
1388 // Extract the addend from the instruction.
1389 // We shift up by two since the Value will be down shifted again
1390 // when applying the relocation.
1391 uint32_t Addend = (Opcode & 0x03ffffff) << 2;
1392
1393 Value.Addend += Addend;
1394
1395 // Look up for existing stub.
1396 StubMap::const_iterator i = Stubs.find(Value);
1397 if (i != Stubs.end()) {
1398 RelocationEntry RE(SectionID, Offset, RelType, i->second);
1399 addRelocationForSection(RE, SectionID);
1400 DEBUG(dbgs() << " Stub function found\n");
1401 } else {
1402 // Create a new stub function.
1403 DEBUG(dbgs() << " Create a new stub function\n");
1404 Stubs[Value] = Section.getStubOffset();
1405 uint8_t *StubTargetAddr = createStubFunction(
1406 Section.getAddressWithOffset(Section.getStubOffset()));
1407
1408 // Creating Hi and Lo relocations for the filled stub instructions.
1409 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1410 ELF::R_MIPS_HI16, Value.Addend);
1411 RelocationEntry RELo(SectionID,
1412 StubTargetAddr - Section.getAddress() + 4,
1413 ELF::R_MIPS_LO16, Value.Addend);
1414
1415 if (Value.SymbolName) {
1416 addRelocationForSymbol(REHi, Value.SymbolName);
1417 addRelocationForSymbol(RELo, Value.SymbolName);
1418 }
1419 else {
1420 addRelocationForSection(REHi, Value.SectionID);
1421 addRelocationForSection(RELo, Value.SectionID);
1422 }
1423
1424 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1425 addRelocationForSection(RE, SectionID);
1426 Section.advanceStubOffset(getMaxStubSize());
1427 }
1428 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
1429 int64_t Addend = (Opcode & 0x0000ffff) << 16;
1430 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1431 PendingRelocs.push_back(std::make_pair(Value, RE));
1432 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
1433 int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
1434 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
1435 const RelocationValueRef &MatchingValue = I->first;
1436 RelocationEntry &Reloc = I->second;
1437 if (MatchingValue == Value &&
1438 RelType == getMatchingLoRelocation(Reloc.RelType) &&
1439 SectionID == Reloc.SectionID) {
1440 Reloc.Addend += Addend;
1441 if (Value.SymbolName)
1442 addRelocationForSymbol(Reloc, Value.SymbolName);
1443 else
1444 addRelocationForSection(Reloc, Value.SectionID);
1445 I = PendingRelocs.erase(I);
1446 } else
1447 ++I;
1448 }
1449 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1450 if (Value.SymbolName)
1451 addRelocationForSymbol(RE, Value.SymbolName);
1452 else
1453 addRelocationForSection(RE, Value.SectionID);
1454 } else {
1455 if (RelType == ELF::R_MIPS_32)
1456 Value.Addend += Opcode;
1457 else if (RelType == ELF::R_MIPS_PC16)
1458 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
1459 else if (RelType == ELF::R_MIPS_PC19_S2)
1460 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
1461 else if (RelType == ELF::R_MIPS_PC21_S2)
1462 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
1463 else if (RelType == ELF::R_MIPS_PC26_S2)
1464 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
1465 processSimpleRelocation(SectionID, Offset, RelType, Value);
1466 }
1467 } else if (IsMipsN64ABI) {
1468 uint32_t r_type = RelType & 0xff;
1469 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1470 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
1471 || r_type == ELF::R_MIPS_GOT_DISP) {
1472 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
1473 if (i != GOTSymbolOffsets.end())
1474 RE.SymOffset = i->second;
1475 else {
1476 RE.SymOffset = allocateGOTEntries(SectionID, 1);
1477 GOTSymbolOffsets[TargetName] = RE.SymOffset;
1478 }
1479 }
1480 if (Value.SymbolName)
1481 addRelocationForSymbol(RE, Value.SymbolName);
1482 else
1483 addRelocationForSection(RE, Value.SectionID);
1484 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
1485 if (RelType == ELF::R_PPC64_REL24) {
1486 // Determine ABI variant in use for this object.
1487 unsigned AbiVariant;
1488 Obj.getPlatformFlags(AbiVariant);
1489 AbiVariant &= ELF::EF_PPC64_ABI;
1490 // A PPC branch relocation will need a stub function if the target is
1491 // an external symbol (Symbol::ST_Unknown) or if the target address
1492 // is not within the signed 24-bits branch address.
1493 SectionEntry &Section = Sections[SectionID];
1494 uint8_t *Target = Section.getAddressWithOffset(Offset);
1495 bool RangeOverflow = false;
1496 if (SymType != SymbolRef::ST_Unknown) {
1497 if (AbiVariant != 2) {
1498 // In the ELFv1 ABI, a function call may point to the .opd entry,
1499 // so the final symbol value is calculated based on the relocation
1500 // values in the .opd section.
1501 if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
1502 return std::move(Err);
1503 } else {
1504 // In the ELFv2 ABI, a function symbol may provide a local entry
1505 // point, which must be used for direct calls.
1506 uint8_t SymOther = Symbol->getOther();
1507 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
1508 }
1509 uint8_t *RelocTarget =
1510 Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
1511 int32_t delta = static_cast<int32_t>(Target - RelocTarget);
1512 // If it is within 26-bits branch range, just set the branch target
1513 if (SignExtend32<26>(delta) == delta) {
1514 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1515 if (Value.SymbolName)
1516 addRelocationForSymbol(RE, Value.SymbolName);
1517 else
1518 addRelocationForSection(RE, Value.SectionID);
1519 } else {
1520 RangeOverflow = true;
1521 }
1522 }
1523 if (SymType == SymbolRef::ST_Unknown || RangeOverflow) {
1524 // It is an external symbol (SymbolRef::ST_Unknown) or within a range
1525 // larger than 24-bits.
1526 StubMap::const_iterator i = Stubs.find(Value);
1527 if (i != Stubs.end()) {
1528 // Symbol function stub already created, just relocate to it
1529 resolveRelocation(Section, Offset,
1530 reinterpret_cast<uint64_t>(
1531 Section.getAddressWithOffset(i->second)),
1532 RelType, 0);
1533 DEBUG(dbgs() << " Stub function found\n");
1534 } else {
1535 // Create a new stub function.
1536 DEBUG(dbgs() << " Create a new stub function\n");
1537 Stubs[Value] = Section.getStubOffset();
1538 uint8_t *StubTargetAddr = createStubFunction(
1539 Section.getAddressWithOffset(Section.getStubOffset()),
1540 AbiVariant);
1541 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1542 ELF::R_PPC64_ADDR64, Value.Addend);
1543
1544 // Generates the 64-bits address loads as exemplified in section
1545 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
1546 // apply to the low part of the instructions, so we have to update
1547 // the offset according to the target endianness.
1548 uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
1549 if (!IsTargetLittleEndian)
1550 StubRelocOffset += 2;
1551
1552 RelocationEntry REhst(SectionID, StubRelocOffset + 0,
1553 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
1554 RelocationEntry REhr(SectionID, StubRelocOffset + 4,
1555 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
1556 RelocationEntry REh(SectionID, StubRelocOffset + 12,
1557 ELF::R_PPC64_ADDR16_HI, Value.Addend);
1558 RelocationEntry REl(SectionID, StubRelocOffset + 16,
1559 ELF::R_PPC64_ADDR16_LO, Value.Addend);
1560
1561 if (Value.SymbolName) {
1562 addRelocationForSymbol(REhst, Value.SymbolName);
1563 addRelocationForSymbol(REhr, Value.SymbolName);
1564 addRelocationForSymbol(REh, Value.SymbolName);
1565 addRelocationForSymbol(REl, Value.SymbolName);
1566 } else {
1567 addRelocationForSection(REhst, Value.SectionID);
1568 addRelocationForSection(REhr, Value.SectionID);
1569 addRelocationForSection(REh, Value.SectionID);
1570 addRelocationForSection(REl, Value.SectionID);
1571 }
1572
1573 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1574 Section.getAddressWithOffset(
1575 Section.getStubOffset())),
1576 RelType, 0);
1577 Section.advanceStubOffset(getMaxStubSize());
1578 }
1579 if (SymType == SymbolRef::ST_Unknown) {
1580 // Restore the TOC for external calls
1581 if (AbiVariant == 2)
1582 writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1)
1583 else
1584 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
1585 }
1586 }
1587 } else if (RelType == ELF::R_PPC64_TOC16 ||
1588 RelType == ELF::R_PPC64_TOC16_DS ||
1589 RelType == ELF::R_PPC64_TOC16_LO ||
1590 RelType == ELF::R_PPC64_TOC16_LO_DS ||
1591 RelType == ELF::R_PPC64_TOC16_HI ||
1592 RelType == ELF::R_PPC64_TOC16_HA) {
1593 // These relocations are supposed to subtract the TOC address from
1594 // the final value. This does not fit cleanly into the RuntimeDyld
1595 // scheme, since there may be *two* sections involved in determining
1596 // the relocation value (the section of the symbol referred to by the
1597 // relocation, and the TOC section associated with the current module).
1598 //
1599 // Fortunately, these relocations are currently only ever generated
1600 // referring to symbols that themselves reside in the TOC, which means
1601 // that the two sections are actually the same. Thus they cancel out
1602 // and we can immediately resolve the relocation right now.
1603 switch (RelType) {
1604 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
1605 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
1606 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
1607 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
1608 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
1609 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
1610 default: llvm_unreachable("Wrong relocation type.");
1611 }
1612
1613 RelocationValueRef TOCValue;
1614 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
1615 return std::move(Err);
1616 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
1617 llvm_unreachable("Unsupported TOC relocation.");
1618 Value.Addend -= TOCValue.Addend;
1619 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
1620 } else {
1621 // There are two ways to refer to the TOC address directly: either
1622 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
1623 // ignored), or via any relocation that refers to the magic ".TOC."
1624 // symbols (in which case the addend is respected).
1625 if (RelType == ELF::R_PPC64_TOC) {
1626 RelType = ELF::R_PPC64_ADDR64;
1627 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1628 return std::move(Err);
1629 } else if (TargetName == ".TOC.") {
1630 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1631 return std::move(Err);
1632 Value.Addend += Addend;
1633 }
1634
1635 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1636
1637 if (Value.SymbolName)
1638 addRelocationForSymbol(RE, Value.SymbolName);
1639 else
1640 addRelocationForSection(RE, Value.SectionID);
1641 }
1642 } else if (Arch == Triple::systemz &&
1643 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
1644 // Create function stubs for both PLT and GOT references, regardless of
1645 // whether the GOT reference is to data or code. The stub contains the
1646 // full address of the symbol, as needed by GOT references, and the
1647 // executable part only adds an overhead of 8 bytes.
1648 //
1649 // We could try to conserve space by allocating the code and data
1650 // parts of the stub separately. However, as things stand, we allocate
1651 // a stub for every relocation, so using a GOT in JIT code should be
1652 // no less space efficient than using an explicit constant pool.
1653 DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
1654 SectionEntry &Section = Sections[SectionID];
1655
1656 // Look for an existing stub.
1657 StubMap::const_iterator i = Stubs.find(Value);
1658 uintptr_t StubAddress;
1659 if (i != Stubs.end()) {
1660 StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
1661 DEBUG(dbgs() << " Stub function found\n");
1662 } else {
1663 // Create a new stub function.
1664 DEBUG(dbgs() << " Create a new stub function\n");
1665
1666 uintptr_t BaseAddress = uintptr_t(Section.getAddress());
1667 uintptr_t StubAlignment = getStubAlignment();
1668 StubAddress =
1669 (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
1670 -StubAlignment;
1671 unsigned StubOffset = StubAddress - BaseAddress;
1672
1673 Stubs[Value] = StubOffset;
1674 createStubFunction((uint8_t *)StubAddress);
1675 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
1676 Value.Offset);
1677 if (Value.SymbolName)
1678 addRelocationForSymbol(RE, Value.SymbolName);
1679 else
1680 addRelocationForSection(RE, Value.SectionID);
1681 Section.advanceStubOffset(getMaxStubSize());
1682 }
1683
1684 if (RelType == ELF::R_390_GOTENT)
1685 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
1686 Addend);
1687 else
1688 resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
1689 } else if (Arch == Triple::x86_64) {
1690 if (RelType == ELF::R_X86_64_PLT32) {
1691 // The way the PLT relocations normally work is that the linker allocates
1692 // the
1693 // PLT and this relocation makes a PC-relative call into the PLT. The PLT
1694 // entry will then jump to an address provided by the GOT. On first call,
1695 // the
1696 // GOT address will point back into PLT code that resolves the symbol. After
1697 // the first call, the GOT entry points to the actual function.
1698 //
1699 // For local functions we're ignoring all of that here and just replacing
1700 // the PLT32 relocation type with PC32, which will translate the relocation
1701 // into a PC-relative call directly to the function. For external symbols we
1702 // can't be sure the function will be within 2^32 bytes of the call site, so
1703 // we need to create a stub, which calls into the GOT. This case is
1704 // equivalent to the usual PLT implementation except that we use the stub
1705 // mechanism in RuntimeDyld (which puts stubs at the end of the section)
1706 // rather than allocating a PLT section.
1707 if (Value.SymbolName) {
1708 // This is a call to an external function.
1709 // Look for an existing stub.
1710 SectionEntry &Section = Sections[SectionID];
1711 StubMap::const_iterator i = Stubs.find(Value);
1712 uintptr_t StubAddress;
1713 if (i != Stubs.end()) {
1714 StubAddress = uintptr_t(Section.getAddress()) + i->second;
1715 DEBUG(dbgs() << " Stub function found\n");
1716 } else {
1717 // Create a new stub function (equivalent to a PLT entry).
1718 DEBUG(dbgs() << " Create a new stub function\n");
1719
1720 uintptr_t BaseAddress = uintptr_t(Section.getAddress());
1721 uintptr_t StubAlignment = getStubAlignment();
1722 StubAddress =
1723 (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
1724 -StubAlignment;
1725 unsigned StubOffset = StubAddress - BaseAddress;
1726 Stubs[Value] = StubOffset;
1727 createStubFunction((uint8_t *)StubAddress);
1728
1729 // Bump our stub offset counter
1730 Section.advanceStubOffset(getMaxStubSize());
1731
1732 // Allocate a GOT Entry
1733 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1);
1734
1735 // The load of the GOT address has an addend of -4
1736 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4);
1737
1738 // Fill in the value of the symbol we're targeting into the GOT
1739 addRelocationForSymbol(
1740 computeGOTOffsetRE(SectionID, GOTOffset, 0, ELF::R_X86_64_64),
1741 Value.SymbolName);
1742 }
1743
1744 // Make the target call a call into the stub table.
1745 resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32,
1746 Addend);
1747 } else {
1748 RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend,
1749 Value.Offset);
1750 addRelocationForSection(RE, Value.SectionID);
1751 }
1752 } else if (RelType == ELF::R_X86_64_GOTPCREL ||
1753 RelType == ELF::R_X86_64_GOTPCRELX ||
1754 RelType == ELF::R_X86_64_REX_GOTPCRELX) {
1755 uint64_t GOTOffset = allocateGOTEntries(SectionID, 1);
1756 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend);
1757
1758 // Fill in the value of the symbol we're targeting into the GOT
1759 RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64);
1760 if (Value.SymbolName)
1761 addRelocationForSymbol(RE, Value.SymbolName);
1762 else
1763 addRelocationForSection(RE, Value.SectionID);
1764 } else if (RelType == ELF::R_X86_64_PC32) {
1765 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1766 processSimpleRelocation(SectionID, Offset, RelType, Value);
1767 } else if (RelType == ELF::R_X86_64_PC64) {
1768 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
1769 processSimpleRelocation(SectionID, Offset, RelType, Value);
1770 } else {
1771 processSimpleRelocation(SectionID, Offset, RelType, Value);
1772 }
1773 } else {
1774 if (Arch == Triple::x86) {
1775 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1776 }
1777 processSimpleRelocation(SectionID, Offset, RelType, Value);
1778 }
1779 return ++RelI;
1780 }
1781
getGOTEntrySize()1782 size_t RuntimeDyldELF::getGOTEntrySize() {
1783 // We don't use the GOT in all of these cases, but it's essentially free
1784 // to put them all here.
1785 size_t Result = 0;
1786 switch (Arch) {
1787 case Triple::x86_64:
1788 case Triple::aarch64:
1789 case Triple::aarch64_be:
1790 case Triple::ppc64:
1791 case Triple::ppc64le:
1792 case Triple::systemz:
1793 Result = sizeof(uint64_t);
1794 break;
1795 case Triple::x86:
1796 case Triple::arm:
1797 case Triple::thumb:
1798 Result = sizeof(uint32_t);
1799 break;
1800 case Triple::mips:
1801 case Triple::mipsel:
1802 case Triple::mips64:
1803 case Triple::mips64el:
1804 if (IsMipsO32ABI)
1805 Result = sizeof(uint32_t);
1806 else if (IsMipsN64ABI)
1807 Result = sizeof(uint64_t);
1808 else
1809 llvm_unreachable("Mips ABI not handled");
1810 break;
1811 default:
1812 llvm_unreachable("Unsupported CPU type!");
1813 }
1814 return Result;
1815 }
1816
allocateGOTEntries(unsigned SectionID,unsigned no)1817 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no)
1818 {
1819 (void)SectionID; // The GOT Section is the same for all section in the object file
1820 if (GOTSectionID == 0) {
1821 GOTSectionID = Sections.size();
1822 // Reserve a section id. We'll allocate the section later
1823 // once we know the total size
1824 Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
1825 }
1826 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
1827 CurrentGOTIndex += no;
1828 return StartOffset;
1829 }
1830
resolveGOTOffsetRelocation(unsigned SectionID,uint64_t Offset,uint64_t GOTOffset)1831 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset)
1832 {
1833 // Fill in the relative address of the GOT Entry into the stub
1834 RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset);
1835 addRelocationForSection(GOTRE, GOTSectionID);
1836 }
1837
computeGOTOffsetRE(unsigned SectionID,uint64_t GOTOffset,uint64_t SymbolOffset,uint32_t Type)1838 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset,
1839 uint32_t Type)
1840 {
1841 (void)SectionID; // The GOT Section is the same for all section in the object file
1842 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
1843 }
1844
finalizeLoad(const ObjectFile & Obj,ObjSectionToIDMap & SectionMap)1845 Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
1846 ObjSectionToIDMap &SectionMap) {
1847 if (IsMipsO32ABI)
1848 if (!PendingRelocs.empty())
1849 return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
1850
1851 // If necessary, allocate the global offset table
1852 if (GOTSectionID != 0) {
1853 // Allocate memory for the section
1854 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
1855 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
1856 GOTSectionID, ".got", false);
1857 if (!Addr)
1858 return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
1859
1860 Sections[GOTSectionID] =
1861 SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
1862
1863 if (Checker)
1864 Checker->registerSection(Obj.getFileName(), GOTSectionID);
1865
1866 // For now, initialize all GOT entries to zero. We'll fill them in as
1867 // needed when GOT-based relocations are applied.
1868 memset(Addr, 0, TotalSize);
1869 if (IsMipsN64ABI) {
1870 // To correctly resolve Mips GOT relocations, we need a mapping from
1871 // object's sections to GOTs.
1872 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
1873 SI != SE; ++SI) {
1874 if (SI->relocation_begin() != SI->relocation_end()) {
1875 section_iterator RelocatedSection = SI->getRelocatedSection();
1876 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
1877 assert (i != SectionMap.end());
1878 SectionToGOTMap[i->second] = GOTSectionID;
1879 }
1880 }
1881 GOTSymbolOffsets.clear();
1882 }
1883 }
1884
1885 // Look for and record the EH frame section.
1886 ObjSectionToIDMap::iterator i, e;
1887 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
1888 const SectionRef &Section = i->first;
1889 StringRef Name;
1890 Section.getName(Name);
1891 if (Name == ".eh_frame") {
1892 UnregisteredEHFrameSections.push_back(i->second);
1893 break;
1894 }
1895 }
1896
1897 GOTSectionID = 0;
1898 CurrentGOTIndex = 0;
1899
1900 return Error::success();
1901 }
1902
isCompatibleFile(const object::ObjectFile & Obj) const1903 bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
1904 return Obj.isELF();
1905 }
1906
relocationNeedsStub(const RelocationRef & R) const1907 bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
1908 if (Arch != Triple::x86_64)
1909 return true; // Conservative answer
1910
1911 switch (R.getType()) {
1912 default:
1913 return true; // Conservative answer
1914
1915
1916 case ELF::R_X86_64_GOTPCREL:
1917 case ELF::R_X86_64_GOTPCRELX:
1918 case ELF::R_X86_64_REX_GOTPCRELX:
1919 case ELF::R_X86_64_PC32:
1920 case ELF::R_X86_64_PC64:
1921 case ELF::R_X86_64_64:
1922 // We know that these reloation types won't need a stub function. This list
1923 // can be extended as needed.
1924 return false;
1925 }
1926 }
1927
1928 } // namespace llvm
1929