• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InputChunks.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputChunks.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "WriterUtils.h"
13 #include "lld/Common/ErrorHandler.h"
14 #include "lld/Common/LLVM.h"
15 #include "llvm/Support/LEB128.h"
16 
17 #define DEBUG_TYPE "lld"
18 
19 using namespace llvm;
20 using namespace llvm::wasm;
21 using namespace llvm::support::endian;
22 
23 namespace lld {
relocTypeToString(uint8_t relocType)24 StringRef relocTypeToString(uint8_t relocType) {
25   switch (relocType) {
26 #define WASM_RELOC(NAME, REL)                                                  \
27   case REL:                                                                    \
28     return #NAME;
29 #include "llvm/BinaryFormat/WasmRelocs.def"
30 #undef WASM_RELOC
31   }
32   llvm_unreachable("unknown reloc type");
33 }
34 
relocIs64(uint8_t relocType)35 bool relocIs64(uint8_t relocType) {
36   switch (relocType) {
37   case R_WASM_MEMORY_ADDR_LEB64:
38   case R_WASM_MEMORY_ADDR_SLEB64:
39   case R_WASM_MEMORY_ADDR_REL_SLEB64:
40   case R_WASM_MEMORY_ADDR_I64:
41     return true;
42   default:
43     return false;
44   }
45 }
46 
toString(const wasm::InputChunk * c)47 std::string toString(const wasm::InputChunk *c) {
48   return (toString(c->file) + ":(" + c->getName() + ")").str();
49 }
50 
51 namespace wasm {
getComdatName() const52 StringRef InputChunk::getComdatName() const {
53   uint32_t index = getComdat();
54   if (index == UINT32_MAX)
55     return StringRef();
56   return file->getWasmObj()->linkingData().Comdats[index];
57 }
58 
verifyRelocTargets() const59 void InputChunk::verifyRelocTargets() const {
60   for (const WasmRelocation &rel : relocations) {
61     uint64_t existingValue;
62     unsigned bytesRead = 0;
63     unsigned paddedLEBWidth = 5;
64     auto offset = rel.Offset - getInputSectionOffset();
65     const uint8_t *loc = data().data() + offset;
66     switch (rel.Type) {
67     case R_WASM_TYPE_INDEX_LEB:
68     case R_WASM_FUNCTION_INDEX_LEB:
69     case R_WASM_GLOBAL_INDEX_LEB:
70     case R_WASM_EVENT_INDEX_LEB:
71     case R_WASM_MEMORY_ADDR_LEB:
72       existingValue = decodeULEB128(loc, &bytesRead);
73       break;
74     case R_WASM_MEMORY_ADDR_LEB64:
75       existingValue = decodeULEB128(loc, &bytesRead);
76       paddedLEBWidth = 10;
77       break;
78     case R_WASM_TABLE_INDEX_SLEB:
79     case R_WASM_TABLE_INDEX_REL_SLEB:
80     case R_WASM_MEMORY_ADDR_SLEB:
81     case R_WASM_MEMORY_ADDR_REL_SLEB:
82     case R_WASM_MEMORY_ADDR_TLS_SLEB:
83       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
84       break;
85     case R_WASM_TABLE_INDEX_SLEB64:
86     case R_WASM_MEMORY_ADDR_SLEB64:
87     case R_WASM_MEMORY_ADDR_REL_SLEB64:
88       existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
89       paddedLEBWidth = 10;
90       break;
91     case R_WASM_TABLE_INDEX_I32:
92     case R_WASM_MEMORY_ADDR_I32:
93     case R_WASM_FUNCTION_OFFSET_I32:
94     case R_WASM_SECTION_OFFSET_I32:
95     case R_WASM_GLOBAL_INDEX_I32:
96       existingValue = read32le(loc);
97       break;
98     case R_WASM_TABLE_INDEX_I64:
99     case R_WASM_MEMORY_ADDR_I64:
100     case R_WASM_FUNCTION_OFFSET_I64:
101       existingValue = read64le(loc);
102       break;
103     default:
104       llvm_unreachable("unknown relocation type");
105     }
106 
107     if (bytesRead && bytesRead != paddedLEBWidth)
108       warn("expected LEB at relocation site be 5/10-byte padded");
109 
110     if (rel.Type != R_WASM_GLOBAL_INDEX_LEB &&
111         rel.Type != R_WASM_GLOBAL_INDEX_I32) {
112       auto expectedValue = file->calcExpectedValue(rel);
113       if (expectedValue != existingValue)
114         warn(toString(this) + ": unexpected existing value for " +
115              relocTypeToString(rel.Type) + ": existing=" +
116              Twine(existingValue) + " expected=" + Twine(expectedValue));
117     }
118   }
119 }
120 
121 // Copy this input chunk to an mmap'ed output file and apply relocations.
writeTo(uint8_t * buf) const122 void InputChunk::writeTo(uint8_t *buf) const {
123   // Copy contents
124   memcpy(buf + outputOffset, data().data(), data().size());
125 
126   // Apply relocations
127   if (relocations.empty())
128     return;
129 
130 #ifndef NDEBUG
131   verifyRelocTargets();
132 #endif
133 
134   LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
135                     << " count=" << relocations.size() << "\n");
136   int32_t off = outputOffset - getInputSectionOffset();
137   auto tombstone = getTombstone();
138 
139   for (const WasmRelocation &rel : relocations) {
140     uint8_t *loc = buf + rel.Offset + off;
141     auto value = file->calcNewValue(rel, tombstone);
142     LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
143     if (rel.Type != R_WASM_TYPE_INDEX_LEB)
144       LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
145     LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
146                       << " value=" << value << " offset=" << rel.Offset
147                       << "\n");
148 
149     switch (rel.Type) {
150     case R_WASM_TYPE_INDEX_LEB:
151     case R_WASM_FUNCTION_INDEX_LEB:
152     case R_WASM_GLOBAL_INDEX_LEB:
153     case R_WASM_EVENT_INDEX_LEB:
154     case R_WASM_MEMORY_ADDR_LEB:
155       encodeULEB128(value, loc, 5);
156       break;
157     case R_WASM_MEMORY_ADDR_LEB64:
158       encodeULEB128(value, loc, 10);
159       break;
160     case R_WASM_TABLE_INDEX_SLEB:
161     case R_WASM_TABLE_INDEX_REL_SLEB:
162     case R_WASM_MEMORY_ADDR_SLEB:
163     case R_WASM_MEMORY_ADDR_REL_SLEB:
164     case R_WASM_MEMORY_ADDR_TLS_SLEB:
165       encodeSLEB128(static_cast<int32_t>(value), loc, 5);
166       break;
167     case R_WASM_TABLE_INDEX_SLEB64:
168     case R_WASM_MEMORY_ADDR_SLEB64:
169     case R_WASM_MEMORY_ADDR_REL_SLEB64:
170       encodeSLEB128(static_cast<int64_t>(value), loc, 10);
171       break;
172     case R_WASM_TABLE_INDEX_I32:
173     case R_WASM_MEMORY_ADDR_I32:
174     case R_WASM_FUNCTION_OFFSET_I32:
175     case R_WASM_SECTION_OFFSET_I32:
176     case R_WASM_GLOBAL_INDEX_I32:
177       write32le(loc, value);
178       break;
179     case R_WASM_TABLE_INDEX_I64:
180     case R_WASM_MEMORY_ADDR_I64:
181     case R_WASM_FUNCTION_OFFSET_I64:
182       write64le(loc, value);
183       break;
184     default:
185       llvm_unreachable("unknown relocation type");
186     }
187   }
188 }
189 
190 // Copy relocation entries to a given output stream.
191 // This function is used only when a user passes "-r". For a regular link,
192 // we consume relocations instead of copying them to an output file.
writeRelocations(raw_ostream & os) const193 void InputChunk::writeRelocations(raw_ostream &os) const {
194   if (relocations.empty())
195     return;
196 
197   int32_t off = outputOffset - getInputSectionOffset();
198   LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
199                     << " offset=" << Twine(off) << "\n");
200 
201   for (const WasmRelocation &rel : relocations) {
202     writeUleb128(os, rel.Type, "reloc type");
203     writeUleb128(os, rel.Offset + off, "reloc offset");
204     writeUleb128(os, file->calcNewIndex(rel), "reloc index");
205 
206     if (relocTypeHasAddend(rel.Type))
207       writeSleb128(os, file->calcNewAddend(rel), "reloc addend");
208   }
209 }
210 
setFunctionIndex(uint32_t index)211 void InputFunction::setFunctionIndex(uint32_t index) {
212   LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
213                     << " -> " << index << "\n");
214   assert(!hasFunctionIndex());
215   functionIndex = index;
216 }
217 
setTableIndex(uint32_t index)218 void InputFunction::setTableIndex(uint32_t index) {
219   LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << getName() << " -> "
220                     << index << "\n");
221   assert(!hasTableIndex());
222   tableIndex = index;
223 }
224 
225 // Write a relocation value without padding and return the number of bytes
226 // witten.
writeCompressedReloc(uint8_t * buf,const WasmRelocation & rel,uint64_t value)227 static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
228                                      uint64_t value) {
229   switch (rel.Type) {
230   case R_WASM_TYPE_INDEX_LEB:
231   case R_WASM_FUNCTION_INDEX_LEB:
232   case R_WASM_GLOBAL_INDEX_LEB:
233   case R_WASM_EVENT_INDEX_LEB:
234   case R_WASM_MEMORY_ADDR_LEB:
235   case R_WASM_MEMORY_ADDR_LEB64:
236     return encodeULEB128(value, buf);
237   case R_WASM_TABLE_INDEX_SLEB:
238   case R_WASM_TABLE_INDEX_SLEB64:
239   case R_WASM_MEMORY_ADDR_SLEB:
240   case R_WASM_MEMORY_ADDR_SLEB64:
241     return encodeSLEB128(static_cast<int64_t>(value), buf);
242   default:
243     llvm_unreachable("unexpected relocation type");
244   }
245 }
246 
getRelocWidthPadded(const WasmRelocation & rel)247 static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
248   switch (rel.Type) {
249   case R_WASM_TYPE_INDEX_LEB:
250   case R_WASM_FUNCTION_INDEX_LEB:
251   case R_WASM_GLOBAL_INDEX_LEB:
252   case R_WASM_EVENT_INDEX_LEB:
253   case R_WASM_MEMORY_ADDR_LEB:
254   case R_WASM_TABLE_INDEX_SLEB:
255   case R_WASM_MEMORY_ADDR_SLEB:
256     return 5;
257   case R_WASM_TABLE_INDEX_SLEB64:
258   case R_WASM_MEMORY_ADDR_LEB64:
259   case R_WASM_MEMORY_ADDR_SLEB64:
260     return 10;
261   default:
262     llvm_unreachable("unexpected relocation type");
263   }
264 }
265 
getRelocWidth(const WasmRelocation & rel,uint64_t value)266 static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
267   uint8_t buf[10];
268   return writeCompressedReloc(buf, rel, value);
269 }
270 
271 // Relocations of type LEB and SLEB in the code section are padded to 5 bytes
272 // so that a fast linker can blindly overwrite them without needing to worry
273 // about the number of bytes needed to encode the values.
274 // However, for optimal output the code section can be compressed to remove
275 // the padding then outputting non-relocatable files.
276 // In this case we need to perform a size calculation based on the value at each
277 // relocation.  At best we end up saving 4 bytes for each relocation entry.
278 //
279 // This function only computes the final output size.  It must be called
280 // before getSize() is used to calculate of layout of the code section.
calculateSize()281 void InputFunction::calculateSize() {
282   if (!file || !config->compressRelocations)
283     return;
284 
285   LLVM_DEBUG(dbgs() << "calculateSize: " << getName() << "\n");
286 
287   const uint8_t *secStart = file->codeSection->Content.data();
288   const uint8_t *funcStart = secStart + getInputSectionOffset();
289   uint32_t functionSizeLength;
290   decodeULEB128(funcStart, &functionSizeLength);
291 
292   uint32_t start = getInputSectionOffset();
293   uint32_t end = start + function->Size;
294 
295   auto tombstone = getTombstone();
296 
297   uint32_t lastRelocEnd = start + functionSizeLength;
298   for (const WasmRelocation &rel : relocations) {
299     LLVM_DEBUG(dbgs() << "  region: " << (rel.Offset - lastRelocEnd) << "\n");
300     compressedFuncSize += rel.Offset - lastRelocEnd;
301     compressedFuncSize += getRelocWidth(rel, file->calcNewValue(rel, tombstone));
302     lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
303   }
304   LLVM_DEBUG(dbgs() << "  final region: " << (end - lastRelocEnd) << "\n");
305   compressedFuncSize += end - lastRelocEnd;
306 
307   // Now we know how long the resulting function is we can add the encoding
308   // of its length
309   uint8_t buf[5];
310   compressedSize = compressedFuncSize + encodeULEB128(compressedFuncSize, buf);
311 
312   LLVM_DEBUG(dbgs() << "  calculateSize orig: " << function->Size << "\n");
313   LLVM_DEBUG(dbgs() << "  calculateSize  new: " << compressedSize << "\n");
314 }
315 
316 // Override the default writeTo method so that we can (optionally) write the
317 // compressed version of the function.
writeTo(uint8_t * buf) const318 void InputFunction::writeTo(uint8_t *buf) const {
319   if (!file || !config->compressRelocations)
320     return InputChunk::writeTo(buf);
321 
322   buf += outputOffset;
323   uint8_t *orig = buf;
324   (void)orig;
325 
326   const uint8_t *secStart = file->codeSection->Content.data();
327   const uint8_t *funcStart = secStart + getInputSectionOffset();
328   const uint8_t *end = funcStart + function->Size;
329   auto tombstone = getTombstone();
330   uint32_t count;
331   decodeULEB128(funcStart, &count);
332   funcStart += count;
333 
334   LLVM_DEBUG(dbgs() << "write func: " << getName() << "\n");
335   buf += encodeULEB128(compressedFuncSize, buf);
336   const uint8_t *lastRelocEnd = funcStart;
337   for (const WasmRelocation &rel : relocations) {
338     unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
339     LLVM_DEBUG(dbgs() << "  write chunk: " << chunkSize << "\n");
340     memcpy(buf, lastRelocEnd, chunkSize);
341     buf += chunkSize;
342     buf += writeCompressedReloc(buf, rel, file->calcNewValue(rel, tombstone));
343     lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
344   }
345 
346   unsigned chunkSize = end - lastRelocEnd;
347   LLVM_DEBUG(dbgs() << "  write final chunk: " << chunkSize << "\n");
348   memcpy(buf, lastRelocEnd, chunkSize);
349   LLVM_DEBUG(dbgs() << "  total: " << (buf + chunkSize - orig) << "\n");
350 }
351 
352 // Generate code to apply relocations to the data section at runtime.
353 // This is only called when generating shared libaries (PIC) where address are
354 // not known at static link time.
generateRelocationCode(raw_ostream & os) const355 void InputSegment::generateRelocationCode(raw_ostream &os) const {
356   LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
357                     << " count=" << relocations.size() << "\n");
358 
359   unsigned opcode_ptr_const = config->is64.getValueOr(false)
360                                   ? WASM_OPCODE_I64_CONST
361                                   : WASM_OPCODE_I32_CONST;
362   unsigned opcode_ptr_add = config->is64.getValueOr(false)
363                                 ? WASM_OPCODE_I64_ADD
364                                 : WASM_OPCODE_I32_ADD;
365 
366   auto tombstone = getTombstone();
367   // TODO(sbc): Encode the relocations in the data section and write a loop
368   // here to apply them.
369   uint64_t segmentVA = outputSeg->startVA + outputSegmentOffset;
370   for (const WasmRelocation &rel : relocations) {
371     uint64_t offset = rel.Offset - getInputSectionOffset();
372     uint64_t outputOffset = segmentVA + offset;
373 
374     LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
375                       << " addend=" << rel.Addend << " index=" << rel.Index
376                       << " output offset=" << outputOffset << "\n");
377 
378     // Get __memory_base
379     writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
380     writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
381 
382     // Add the offset of the relocation
383     writeU8(os, opcode_ptr_const, "CONST");
384     writeSleb128(os, outputOffset, "offset");
385     writeU8(os, opcode_ptr_add, "ADD");
386 
387     bool is64 = relocIs64(rel.Type);
388     unsigned opcode_reloc_const =
389         is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
390     unsigned opcode_reloc_add =
391         is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
392     unsigned opcode_reloc_store =
393         is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
394 
395     Symbol *sym = file->getSymbol(rel);
396     // Now figure out what we want to store
397     if (sym->hasGOTIndex()) {
398       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
399       writeUleb128(os, sym->getGOTIndex(), "global index");
400       if (rel.Addend) {
401         writeU8(os, opcode_reloc_const, "CONST");
402         writeSleb128(os, rel.Addend, "addend");
403         writeU8(os, opcode_reloc_add, "ADD");
404       }
405     } else {
406       const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
407       if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
408           rel.Type == R_WASM_TABLE_INDEX_I64)
409         baseSymbol = WasmSym::tableBase;
410       writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
411       writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
412       writeU8(os, opcode_reloc_const, "CONST");
413       writeSleb128(os, file->calcNewValue(rel, tombstone), "offset");
414       writeU8(os, opcode_reloc_add, "ADD");
415     }
416 
417     // Store that value at the virtual address
418     writeU8(os, opcode_reloc_store, "I32_STORE");
419     writeUleb128(os, 2, "align");
420     writeUleb128(os, 0, "offset");
421   }
422 }
423 
getTombstoneForSection(StringRef name)424 uint64_t InputSection::getTombstoneForSection(StringRef name) {
425   // When a function is not live we need to update relocations referring to it.
426   // If they occur in DWARF debug symbols, we want to change the pc of the
427   // function to -1 to avoid overlapping with a valid range. However for the
428   // debug_ranges and debug_loc sections that would conflict with the existing
429   // meaning of -1 so we use -2.
430   // Returning 0 means there is no tombstone value for this section, and relocation
431   // will just use the addend.
432   if (!name.startswith(".debug_"))
433     return 0;
434   if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
435     return UINT64_C(-2);
436   return UINT64_C(-1);
437 }
438 
439 } // namespace wasm
440 } // namespace lld
441