1 //===- lib/FileFormat/MachO/ArchHandler_x86.cpp ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ArchHandler.h"
10 #include "Atoms.h"
11 #include "MachONormalizedFileBinaryUtils.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/ADT/StringSwitch.h"
14 #include "llvm/ADT/Triple.h"
15 #include "llvm/Support/Endian.h"
16 #include "llvm/Support/ErrorHandling.h"
17
18 using namespace llvm::MachO;
19 using namespace lld::mach_o::normalized;
20
21 namespace lld {
22 namespace mach_o {
23
24 using llvm::support::ulittle16_t;
25 using llvm::support::ulittle32_t;
26
27 using llvm::support::little16_t;
28 using llvm::support::little32_t;
29
30 class ArchHandler_x86 : public ArchHandler {
31 public:
32 ArchHandler_x86() = default;
33 ~ArchHandler_x86() override = default;
34
kindStrings()35 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
36
kindArch()37 Reference::KindArch kindArch() override { return Reference::KindArch::x86; }
38
stubInfo()39 const StubInfo &stubInfo() override { return _sStubInfo; }
40 bool isCallSite(const Reference &) override;
isNonCallBranch(const Reference &)41 bool isNonCallBranch(const Reference &) override {
42 return false;
43 }
44
45 bool isPointer(const Reference &) override;
46 bool isPairedReloc(const normalized::Relocation &) override;
47
needsCompactUnwind()48 bool needsCompactUnwind() override {
49 return false;
50 }
51
imageOffsetKind()52 Reference::KindValue imageOffsetKind() override {
53 return invalid;
54 }
55
imageOffsetKindIndirect()56 Reference::KindValue imageOffsetKindIndirect() override {
57 return invalid;
58 }
59
unwindRefToPersonalityFunctionKind()60 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
61 return invalid;
62 }
63
unwindRefToCIEKind()64 Reference::KindValue unwindRefToCIEKind() override {
65 return negDelta32;
66 }
67
unwindRefToFunctionKind()68 Reference::KindValue unwindRefToFunctionKind() override{
69 return delta32;
70 }
71
lazyImmediateLocationKind()72 Reference::KindValue lazyImmediateLocationKind() override {
73 return lazyImmediateLocation;
74 }
75
unwindRefToEhFrameKind()76 Reference::KindValue unwindRefToEhFrameKind() override {
77 return invalid;
78 }
79
pointerKind()80 Reference::KindValue pointerKind() override {
81 return invalid;
82 }
83
dwarfCompactUnwindType()84 uint32_t dwarfCompactUnwindType() override {
85 return 0x04000000U;
86 }
87
88 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
89 const DefinedAtom *inAtom,
90 uint32_t offsetInAtom,
91 uint64_t fixupAddress, bool swap,
92 FindAtomBySectionAndAddress atomFromAddress,
93 FindAtomBySymbolIndex atomFromSymbolIndex,
94 Reference::KindValue *kind,
95 const lld::Atom **target,
96 Reference::Addend *addend) override;
97 llvm::Error
98 getPairReferenceInfo(const normalized::Relocation &reloc1,
99 const normalized::Relocation &reloc2,
100 const DefinedAtom *inAtom,
101 uint32_t offsetInAtom,
102 uint64_t fixupAddress, bool swap, bool scatterable,
103 FindAtomBySectionAndAddress atomFromAddress,
104 FindAtomBySymbolIndex atomFromSymbolIndex,
105 Reference::KindValue *kind,
106 const lld::Atom **target,
107 Reference::Addend *addend) override;
108
109 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
110 FindAddressForAtom findAddress,
111 FindAddressForAtom findSectionAddress,
112 uint64_t imageBaseAddress,
113 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
114
115 void appendSectionRelocations(const DefinedAtom &atom,
116 uint64_t atomSectionOffset,
117 const Reference &ref,
118 FindSymbolIndexForAtom symbolIndexForAtom,
119 FindSectionIndexForAtom sectionIndexForAtom,
120 FindAddressForAtom addressForAtom,
121 normalized::Relocations &relocs) override;
122
isDataInCodeTransition(Reference::KindValue refKind)123 bool isDataInCodeTransition(Reference::KindValue refKind) override {
124 return refKind == modeCode || refKind == modeData;
125 }
126
dataInCodeTransitionStart(const MachODefinedAtom & atom)127 Reference::KindValue dataInCodeTransitionStart(
128 const MachODefinedAtom &atom) override {
129 return modeData;
130 }
131
dataInCodeTransitionEnd(const MachODefinedAtom & atom)132 Reference::KindValue dataInCodeTransitionEnd(
133 const MachODefinedAtom &atom) override {
134 return modeCode;
135 }
136
137 private:
138 static const Registry::KindStrings _sKindStrings[];
139 static const StubInfo _sStubInfo;
140
141 enum X86Kind : Reference::KindValue {
142 invalid, /// for error condition
143
144 modeCode, /// Content starting at this offset is code.
145 modeData, /// Content starting at this offset is data.
146
147 // Kinds found in mach-o .o files:
148 branch32, /// ex: call _foo
149 branch16, /// ex: callw _foo
150 abs32, /// ex: movl _foo, %eax
151 funcRel32, /// ex: movl _foo-L1(%eax), %eax
152 pointer32, /// ex: .long _foo
153 delta32, /// ex: .long _foo - .
154 negDelta32, /// ex: .long . - _foo
155
156 // Kinds introduced by Passes:
157 lazyPointer, /// Location contains a lazy pointer.
158 lazyImmediateLocation, /// Location contains immediate value used in stub.
159 };
160
161 static bool useExternalRelocationTo(const Atom &target);
162
163 void applyFixupFinal(const Reference &ref, uint8_t *location,
164 uint64_t fixupAddress, uint64_t targetAddress,
165 uint64_t inAtomAddress);
166
167 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
168 uint64_t fixupAddress,
169 uint64_t targetAddress,
170 uint64_t inAtomAddress);
171 };
172
173 //===----------------------------------------------------------------------===//
174 // ArchHandler_x86
175 //===----------------------------------------------------------------------===//
176
177 const Registry::KindStrings ArchHandler_x86::_sKindStrings[] = {
178 LLD_KIND_STRING_ENTRY(invalid),
179 LLD_KIND_STRING_ENTRY(modeCode),
180 LLD_KIND_STRING_ENTRY(modeData),
181 LLD_KIND_STRING_ENTRY(branch32),
182 LLD_KIND_STRING_ENTRY(branch16),
183 LLD_KIND_STRING_ENTRY(abs32),
184 LLD_KIND_STRING_ENTRY(funcRel32),
185 LLD_KIND_STRING_ENTRY(pointer32),
186 LLD_KIND_STRING_ENTRY(delta32),
187 LLD_KIND_STRING_ENTRY(negDelta32),
188 LLD_KIND_STRING_ENTRY(lazyPointer),
189 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
190 LLD_KIND_STRING_END
191 };
192
193 const ArchHandler::StubInfo ArchHandler_x86::_sStubInfo = {
194 "dyld_stub_binder",
195
196 // Lazy pointer references
197 { Reference::KindArch::x86, pointer32, 0, 0 },
198 { Reference::KindArch::x86, lazyPointer, 0, 0 },
199
200 // GOT pointer to dyld_stub_binder
201 { Reference::KindArch::x86, pointer32, 0, 0 },
202
203 // x86 code alignment
204 1,
205
206 // Stub size and code
207 6,
208 { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 }, // jmp *lazyPointer
209 { Reference::KindArch::x86, abs32, 2, 0 },
210 { false, 0, 0, 0 },
211
212 // Stub Helper size and code
213 10,
214 { 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $lazy-info-offset
215 0xE9, 0x00, 0x00, 0x00, 0x00 }, // jmp helperhelper
216 { Reference::KindArch::x86, lazyImmediateLocation, 1, 0 },
217 { Reference::KindArch::x86, branch32, 6, 0 },
218
219 // Stub helper image cache content type
220 DefinedAtom::typeNonLazyPointer,
221
222 // Stub Helper-Common size and code
223 12,
224 // Stub helper alignment
225 2,
226 { 0x68, 0x00, 0x00, 0x00, 0x00, // pushl $dyld_ImageLoaderCache
227 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *_fast_lazy_bind
228 0x90 }, // nop
229 { Reference::KindArch::x86, abs32, 1, 0 },
230 { false, 0, 0, 0 },
231 { Reference::KindArch::x86, abs32, 7, 0 },
232 { false, 0, 0, 0 }
233 };
234
isCallSite(const Reference & ref)235 bool ArchHandler_x86::isCallSite(const Reference &ref) {
236 return (ref.kindValue() == branch32);
237 }
238
isPointer(const Reference & ref)239 bool ArchHandler_x86::isPointer(const Reference &ref) {
240 return (ref.kindValue() == pointer32);
241 }
242
isPairedReloc(const Relocation & reloc)243 bool ArchHandler_x86::isPairedReloc(const Relocation &reloc) {
244 if (!reloc.scattered)
245 return false;
246 return (reloc.type == GENERIC_RELOC_LOCAL_SECTDIFF) ||
247 (reloc.type == GENERIC_RELOC_SECTDIFF);
248 }
249
250 llvm::Error
getReferenceInfo(const Relocation & reloc,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool swap,FindAtomBySectionAndAddress atomFromAddress,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)251 ArchHandler_x86::getReferenceInfo(const Relocation &reloc,
252 const DefinedAtom *inAtom,
253 uint32_t offsetInAtom,
254 uint64_t fixupAddress, bool swap,
255 FindAtomBySectionAndAddress atomFromAddress,
256 FindAtomBySymbolIndex atomFromSymbolIndex,
257 Reference::KindValue *kind,
258 const lld::Atom **target,
259 Reference::Addend *addend) {
260 DefinedAtom::ContentPermissions perms;
261 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
262 uint64_t targetAddress;
263 switch (relocPattern(reloc)) {
264 case GENERIC_RELOC_VANILLA | rPcRel | rExtern | rLength4:
265 // ex: call _foo (and _foo undefined)
266 *kind = branch32;
267 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
268 return ec;
269 *addend = fixupAddress + 4 + (int32_t)*(const little32_t *)fixupContent;
270 break;
271 case GENERIC_RELOC_VANILLA | rPcRel | rLength4:
272 // ex: call _foo (and _foo defined)
273 *kind = branch32;
274 targetAddress =
275 fixupAddress + 4 + (int32_t) * (const little32_t *)fixupContent;
276 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
277 break;
278 case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength4:
279 // ex: call _foo+n (and _foo defined)
280 *kind = branch32;
281 targetAddress =
282 fixupAddress + 4 + (int32_t) * (const little32_t *)fixupContent;
283 if (auto ec = atomFromAddress(0, reloc.value, target, addend))
284 return ec;
285 *addend = targetAddress - reloc.value;
286 break;
287 case GENERIC_RELOC_VANILLA | rPcRel | rExtern | rLength2:
288 // ex: callw _foo (and _foo undefined)
289 *kind = branch16;
290 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
291 return ec;
292 *addend = fixupAddress + 2 + (int16_t)*(const little16_t *)fixupContent;
293 break;
294 case GENERIC_RELOC_VANILLA | rPcRel | rLength2:
295 // ex: callw _foo (and _foo defined)
296 *kind = branch16;
297 targetAddress =
298 fixupAddress + 2 + (int16_t) * (const little16_t *)fixupContent;
299 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
300 break;
301 case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength2:
302 // ex: callw _foo+n (and _foo defined)
303 *kind = branch16;
304 targetAddress =
305 fixupAddress + 2 + (int16_t) * (const little16_t *)fixupContent;
306 if (auto ec = atomFromAddress(0, reloc.value, target, addend))
307 return ec;
308 *addend = targetAddress - reloc.value;
309 break;
310 case GENERIC_RELOC_VANILLA | rExtern | rLength4:
311 // ex: movl _foo, %eax (and _foo undefined)
312 // ex: .long _foo (and _foo undefined)
313 perms = inAtom->permissions();
314 *kind =
315 ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
316 : pointer32;
317 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
318 return ec;
319 *addend = *(const ulittle32_t *)fixupContent;
320 break;
321 case GENERIC_RELOC_VANILLA | rLength4:
322 // ex: movl _foo, %eax (and _foo defined)
323 // ex: .long _foo (and _foo defined)
324 perms = inAtom->permissions();
325 *kind =
326 ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
327 : pointer32;
328 targetAddress = *(const ulittle32_t *)fixupContent;
329 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
330 break;
331 case GENERIC_RELOC_VANILLA | rScattered | rLength4:
332 // ex: .long _foo+n (and _foo defined)
333 perms = inAtom->permissions();
334 *kind =
335 ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
336 : pointer32;
337 if (auto ec = atomFromAddress(0, reloc.value, target, addend))
338 return ec;
339 *addend = *(const ulittle32_t *)fixupContent - reloc.value;
340 break;
341 default:
342 return llvm::make_error<GenericError>("unsupported i386 relocation type");
343 }
344 return llvm::Error::success();
345 }
346
347 llvm::Error
getPairReferenceInfo(const normalized::Relocation & reloc1,const normalized::Relocation & reloc2,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool swap,bool scatterable,FindAtomBySectionAndAddress atomFromAddr,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)348 ArchHandler_x86::getPairReferenceInfo(const normalized::Relocation &reloc1,
349 const normalized::Relocation &reloc2,
350 const DefinedAtom *inAtom,
351 uint32_t offsetInAtom,
352 uint64_t fixupAddress, bool swap,
353 bool scatterable,
354 FindAtomBySectionAndAddress atomFromAddr,
355 FindAtomBySymbolIndex atomFromSymbolIndex,
356 Reference::KindValue *kind,
357 const lld::Atom **target,
358 Reference::Addend *addend) {
359 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
360 DefinedAtom::ContentPermissions perms = inAtom->permissions();
361 uint32_t fromAddress;
362 uint32_t toAddress;
363 uint32_t value;
364 const lld::Atom *fromTarget;
365 Reference::Addend offsetInTo;
366 Reference::Addend offsetInFrom;
367 switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
368 case ((GENERIC_RELOC_SECTDIFF | rScattered | rLength4) << 16 |
369 GENERIC_RELOC_PAIR | rScattered | rLength4):
370 case ((GENERIC_RELOC_LOCAL_SECTDIFF | rScattered | rLength4) << 16 |
371 GENERIC_RELOC_PAIR | rScattered | rLength4):
372 toAddress = reloc1.value;
373 fromAddress = reloc2.value;
374 value = *(const little32_t *)fixupContent;
375 if (auto ec = atomFromAddr(0, toAddress, target, &offsetInTo))
376 return ec;
377 if (auto ec = atomFromAddr(0, fromAddress, &fromTarget, &offsetInFrom))
378 return ec;
379 if (fromTarget != inAtom) {
380 if (*target != inAtom)
381 return llvm::make_error<GenericError>(
382 "SECTDIFF relocation where neither target is in atom");
383 *kind = negDelta32;
384 *addend = toAddress - value - fromAddress;
385 *target = fromTarget;
386 } else {
387 if ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) {
388 // SECTDIFF relocations are used in i386 codegen where the function
389 // prolog does a CALL to the next instruction which POPs the return
390 // address into EBX which becomes the pic-base register. The POP
391 // instruction is label the used for the subtrahend in expressions.
392 // The funcRel32 kind represents the 32-bit delta to some symbol from
393 // the start of the function (atom) containing the funcRel32.
394 *kind = funcRel32;
395 uint32_t ta = fromAddress + value - toAddress;
396 *addend = ta - offsetInFrom;
397 } else {
398 *kind = delta32;
399 *addend = fromAddress + value - toAddress;
400 }
401 }
402 return llvm::Error::success();
403 break;
404 default:
405 return llvm::make_error<GenericError>("unsupported i386 relocation type");
406 }
407 }
408
generateAtomContent(const DefinedAtom & atom,bool relocatable,FindAddressForAtom findAddress,FindAddressForAtom findSectionAddress,uint64_t imageBaseAddress,llvm::MutableArrayRef<uint8_t> atomContentBuffer)409 void ArchHandler_x86::generateAtomContent(const DefinedAtom &atom,
410 bool relocatable,
411 FindAddressForAtom findAddress,
412 FindAddressForAtom findSectionAddress,
413 uint64_t imageBaseAddress,
414 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
415 // Copy raw bytes.
416 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
417 atomContentBuffer.begin());
418 // Apply fix-ups.
419 for (const Reference *ref : atom) {
420 uint32_t offset = ref->offsetInAtom();
421 const Atom *target = ref->target();
422 uint64_t targetAddress = 0;
423 if (isa<DefinedAtom>(target))
424 targetAddress = findAddress(*target);
425 uint64_t atomAddress = findAddress(atom);
426 uint64_t fixupAddress = atomAddress + offset;
427 if (relocatable) {
428 applyFixupRelocatable(*ref, &atomContentBuffer[offset],
429 fixupAddress, targetAddress,
430 atomAddress);
431 } else {
432 applyFixupFinal(*ref, &atomContentBuffer[offset],
433 fixupAddress, targetAddress,
434 atomAddress);
435 }
436 }
437 }
438
applyFixupFinal(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress)439 void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *loc,
440 uint64_t fixupAddress,
441 uint64_t targetAddress,
442 uint64_t inAtomAddress) {
443 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
444 return;
445 assert(ref.kindArch() == Reference::KindArch::x86);
446 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
447 switch (static_cast<X86Kind>(ref.kindValue())) {
448 case branch32:
449 *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
450 break;
451 case branch16:
452 *loc32 = (targetAddress - (fixupAddress + 2)) + ref.addend();
453 break;
454 case pointer32:
455 case abs32:
456 *loc32 = targetAddress + ref.addend();
457 break;
458 case funcRel32:
459 *loc32 = targetAddress - inAtomAddress + ref.addend();
460 break;
461 case delta32:
462 *loc32 = targetAddress - fixupAddress + ref.addend();
463 break;
464 case negDelta32:
465 *loc32 = fixupAddress - targetAddress + ref.addend();
466 break;
467 case modeCode:
468 case modeData:
469 case lazyPointer:
470 // do nothing
471 break;
472 case lazyImmediateLocation:
473 *loc32 = ref.addend();
474 break;
475 case invalid:
476 llvm_unreachable("invalid x86 Reference Kind");
477 break;
478 }
479 }
480
applyFixupRelocatable(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress)481 void ArchHandler_x86::applyFixupRelocatable(const Reference &ref,
482 uint8_t *loc,
483 uint64_t fixupAddress,
484 uint64_t targetAddress,
485 uint64_t inAtomAddress) {
486 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
487 return;
488 assert(ref.kindArch() == Reference::KindArch::x86);
489 bool useExternalReloc = useExternalRelocationTo(*ref.target());
490 ulittle16_t *loc16 = reinterpret_cast<ulittle16_t *>(loc);
491 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
492 switch (static_cast<X86Kind>(ref.kindValue())) {
493 case branch32:
494 if (useExternalReloc)
495 *loc32 = ref.addend() - (fixupAddress + 4);
496 else
497 *loc32 =(targetAddress - (fixupAddress+4)) + ref.addend();
498 break;
499 case branch16:
500 if (useExternalReloc)
501 *loc16 = ref.addend() - (fixupAddress + 2);
502 else
503 *loc16 = (targetAddress - (fixupAddress+2)) + ref.addend();
504 break;
505 case pointer32:
506 case abs32:
507 *loc32 = targetAddress + ref.addend();
508 break;
509 case funcRel32:
510 *loc32 = targetAddress - inAtomAddress + ref.addend(); // FIXME
511 break;
512 case delta32:
513 *loc32 = targetAddress - fixupAddress + ref.addend();
514 break;
515 case negDelta32:
516 *loc32 = fixupAddress - targetAddress + ref.addend();
517 break;
518 case modeCode:
519 case modeData:
520 case lazyPointer:
521 case lazyImmediateLocation:
522 // do nothing
523 break;
524 case invalid:
525 llvm_unreachable("invalid x86 Reference Kind");
526 break;
527 }
528 }
529
useExternalRelocationTo(const Atom & target)530 bool ArchHandler_x86::useExternalRelocationTo(const Atom &target) {
531 // Undefined symbols are referenced via external relocations.
532 if (isa<UndefinedAtom>(&target))
533 return true;
534 if (const DefinedAtom *defAtom = dyn_cast<DefinedAtom>(&target)) {
535 switch (defAtom->merge()) {
536 case DefinedAtom::mergeAsTentative:
537 // Tentative definitions are referenced via external relocations.
538 return true;
539 case DefinedAtom::mergeAsWeak:
540 case DefinedAtom::mergeAsWeakAndAddressUsed:
541 // Global weak-defs are referenced via external relocations.
542 return (defAtom->scope() == DefinedAtom::scopeGlobal);
543 default:
544 break;
545 }
546 }
547 // Everything else is reference via an internal relocation.
548 return false;
549 }
550
appendSectionRelocations(const DefinedAtom & atom,uint64_t atomSectionOffset,const Reference & ref,FindSymbolIndexForAtom symbolIndexForAtom,FindSectionIndexForAtom sectionIndexForAtom,FindAddressForAtom addressForAtom,normalized::Relocations & relocs)551 void ArchHandler_x86::appendSectionRelocations(
552 const DefinedAtom &atom,
553 uint64_t atomSectionOffset,
554 const Reference &ref,
555 FindSymbolIndexForAtom symbolIndexForAtom,
556 FindSectionIndexForAtom sectionIndexForAtom,
557 FindAddressForAtom addressForAtom,
558 normalized::Relocations &relocs) {
559 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
560 return;
561 assert(ref.kindArch() == Reference::KindArch::x86);
562 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
563 bool useExternalReloc = useExternalRelocationTo(*ref.target());
564 switch (static_cast<X86Kind>(ref.kindValue())) {
565 case modeCode:
566 case modeData:
567 break;
568 case branch32:
569 if (useExternalReloc) {
570 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
571 GENERIC_RELOC_VANILLA | rExtern | rPcRel | rLength4);
572 } else {
573 if (ref.addend() != 0)
574 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
575 GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength4);
576 else
577 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()),0,
578 GENERIC_RELOC_VANILLA | rPcRel | rLength4);
579 }
580 break;
581 case branch16:
582 if (useExternalReloc) {
583 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
584 GENERIC_RELOC_VANILLA | rExtern | rPcRel | rLength2);
585 } else {
586 if (ref.addend() != 0)
587 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
588 GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength2);
589 else
590 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()),0,
591 GENERIC_RELOC_VANILLA | rPcRel | rLength2);
592 }
593 break;
594 case pointer32:
595 case abs32:
596 if (useExternalReloc)
597 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
598 GENERIC_RELOC_VANILLA | rExtern | rLength4);
599 else {
600 if (ref.addend() != 0)
601 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
602 GENERIC_RELOC_VANILLA | rScattered | rLength4);
603 else
604 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
605 GENERIC_RELOC_VANILLA | rLength4);
606 }
607 break;
608 case funcRel32:
609 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
610 GENERIC_RELOC_SECTDIFF | rScattered | rLength4);
611 appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) - ref.addend(),
612 GENERIC_RELOC_PAIR | rScattered | rLength4);
613 break;
614 case delta32:
615 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
616 GENERIC_RELOC_SECTDIFF | rScattered | rLength4);
617 appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) +
618 ref.offsetInAtom(),
619 GENERIC_RELOC_PAIR | rScattered | rLength4);
620 break;
621 case negDelta32:
622 appendReloc(relocs, sectionOffset, 0, addressForAtom(atom) +
623 ref.offsetInAtom(),
624 GENERIC_RELOC_SECTDIFF | rScattered | rLength4);
625 appendReloc(relocs, sectionOffset, 0, addressForAtom(*ref.target()),
626 GENERIC_RELOC_PAIR | rScattered | rLength4);
627 break;
628 case lazyPointer:
629 case lazyImmediateLocation:
630 llvm_unreachable("lazy reference kind implies Stubs pass was run");
631 break;
632 case invalid:
633 llvm_unreachable("unknown x86 Reference Kind");
634 break;
635 }
636 }
637
create_x86()638 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86() {
639 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86());
640 }
641
642 } // namespace mach_o
643 } // namespace lld
644